--- /dev/null
+Subject: Update embedded BitMagic to version 6.4.0 (from 6.0.0).
+Description: Notably address new "inlining failed in call to
+ 'always_inline'" errors on mips*el. (The bmagic package currently has
+ version 6.3.0, slightly predating the necessary fix.)
+Author: Aaron M. Ucko <ucko@debian.org>
+Forwarded: yes
+Last-Updated: 2020-09-20
+Index: c++/include/util/bitset/bmsparsevec_compr.h
+===================================================================
+--- a/c++/include/util/bitset/bmsparsevec_compr.h (revision 90103)
++++ b/c++/include/util/bitset/bmsparsevec_compr.h (revision 90104)
+@@ -98,19 +98,121 @@
+ class reference
+ {
+ public:
+- reference(rsc_sparse_vector<Val, SV>& csv, size_type idx) BMNOEXEPT
++ reference(rsc_sparse_vector<Val, SV>& csv, size_type idx) BMNOEXCEPT
+ : csv_(csv), idx_(idx)
+ {}
+- operator value_type() const { return csv_.get(idx_); }
+- bool operator==(const reference& ref) const
++ operator value_type() const BMNOEXCEPT { return csv_.get(idx_); }
++ bool operator==(const reference& ref) const BMNOEXCEPT
+ { return bool(*this) == bool(ref); }
+- bool is_null() const { return csv_.is_null(idx_); }
++ bool is_null() const BMNOEXCEPT { return csv_.is_null(idx_); }
+ private:
+ rsc_sparse_vector<Val, SV>& csv_;
+ size_type idx_;
+ };
+
++ /**
++ Const iterator to traverse the rsc sparse vector.
+
++ Implementation uses buffer for decoding so, competing changes
++ to the original vector may not match the iterator returned values.
++
++ This iterator keeps an operational buffer, memory footprint is not
++ negligable
++
++ @ingroup sv
++ */
++ class const_iterator
++ {
++ public:
++ friend class rsc_sparse_vector;
++
++#ifndef BM_NO_STL
++ typedef std::input_iterator_tag iterator_category;
++#endif
++ typedef rsc_sparse_vector<Val, SV> rsc_sparse_vector_type;
++ typedef rsc_sparse_vector_type* rsc_sparse_vector_type_ptr;
++ typedef typename rsc_sparse_vector_type::value_type value_type;
++ typedef typename rsc_sparse_vector_type::size_type size_type;
++ typedef typename rsc_sparse_vector_type::bvector_type bvector_type;
++ typedef typename bvector_type::allocator_type allocator_type;
++ typedef typename
++ bvector_type::allocator_type::allocator_pool_type allocator_pool_type;
++ typedef bm::byte_buffer<allocator_type> buffer_type;
++
++ typedef unsigned difference_type;
++ typedef unsigned* pointer;
++ typedef value_type& reference;
++
++ public:
++ const_iterator() BMNOEXCEPT;
++ const_iterator(const rsc_sparse_vector_type* csv) BMNOEXCEPT;
++ const_iterator(const rsc_sparse_vector_type* csv, size_type pos) BMNOEXCEPT;
++ const_iterator(const const_iterator& it) BMNOEXCEPT;
++
++ bool operator==(const const_iterator& it) const BMNOEXCEPT
++ { return (pos_ == it.pos_) && (csv_ == it.csv_); }
++ bool operator!=(const const_iterator& it) const BMNOEXCEPT
++ { return ! operator==(it); }
++ bool operator < (const const_iterator& it) const BMNOEXCEPT
++ { return pos_ < it.pos_; }
++ bool operator <= (const const_iterator& it) const BMNOEXCEPT
++ { return pos_ <= it.pos_; }
++ bool operator > (const const_iterator& it) const BMNOEXCEPT
++ { return pos_ > it.pos_; }
++ bool operator >= (const const_iterator& it) const BMNOEXCEPT
++ { return pos_ >= it.pos_; }
++
++ /// \brief Get current position (value)
++ value_type operator*() const { return this->value(); }
++
++
++ /// \brief Advance to the next available value
++ const_iterator& operator++() BMNOEXCEPT { this->advance(); return *this; }
++
++ /// \brief Advance to the next available value
++ const_iterator& operator++(int)
++ { const_iterator tmp(*this);this->advance(); return tmp; }
++
++
++ /// \brief Get current position (value)
++ value_type value() const;
++
++ /// \brief Get NULL status
++ bool is_null() const BMNOEXCEPT;
++
++ /// Returns true if iterator is at a valid position
++ bool valid() const BMNOEXCEPT { return pos_ != bm::id_max; }
++
++ /// Invalidate current iterator
++ void invalidate() BMNOEXCEPT { pos_ = bm::id_max; }
++
++ /// Current position (index) in the vector
++ size_type pos() const BMNOEXCEPT{ return pos_; }
++
++ /// re-position to a specified position
++ void go_to(size_type pos) BMNOEXCEPT;
++
++ /// advance iterator forward by one
++ /// @return true if it is still valid
++ bool advance() BMNOEXCEPT;
++
++ void skip_zero_values() BMNOEXCEPT;
++ private:
++ enum buf_size_e
++ {
++ n_buf_size = 1024 * 8
++ };
++
++ private:
++ const rsc_sparse_vector_type* csv_; ///!< ptr to parent
++ size_type pos_; ///!< Position
++ mutable buffer_type vbuffer_; ///!< value buffer
++ mutable buffer_type tbuffer_; ///!< temp buffer
++ mutable value_type* buf_ptr_; ///!< position in the buffer
++ };
++
++
++
+ /**
+ Back insert iterator implements buffered insert, faster than generic
+ access assignment.
+@@ -141,8 +243,8 @@
+ typedef void reference;
+
+ public:
+- back_insert_iterator();
+- back_insert_iterator(rsc_sparse_vector_type* csv);
++ back_insert_iterator() BMNOEXCEPT;
++ back_insert_iterator(rsc_sparse_vector_type* csv) BMNOEXCEPT;
+
+ back_insert_iterator& operator=(const back_insert_iterator& bi)
+ {
+@@ -154,7 +256,8 @@
+ ~back_insert_iterator();
+
+ /** push value to the vector */
+- back_insert_iterator& operator=(value_type v) { this->add(v); return *this; }
++ back_insert_iterator& operator=(value_type v)
++ { this->add(v); return *this; }
+ /** noop */
+ back_insert_iterator& operator*() { return *this; }
+ /** noop */
+@@ -166,10 +269,10 @@
+ void add(value_type v);
+
+ /** add NULL (no-value) to the container */
+- void add_null();
++ void add_null() BMNOEXCEPT;
+
+ /** add a series of consequitve NULLs (no-value) to the container */
+- void add_null(size_type count);
++ void add_null(size_type count) BMNOEXCEPT;
+
+ /** flush the accumulated buffer */
+ void flush();
+@@ -183,7 +286,8 @@
+ ///size_type add_value(value_type v);
+
+ typedef rsc_sparse_vector_type::sparse_vector_type sparse_vector_type;
+- typedef typename sparse_vector_type::back_insert_iterator sparse_vector_bi;
++ typedef
++ typename sparse_vector_type::back_insert_iterator sparse_vector_bi;
+ private:
+ rsc_sparse_vector_type* csv_; ///!< pointer on the parent vector
+ sparse_vector_bi sv_bi_;
+@@ -192,6 +296,7 @@
+ public:
+ // ------------------------------------------------------------
+ /*! @name Construction and assignment */
++
+ //@{
+
+ rsc_sparse_vector(bm::null_support null_able = bm::use_null,
+@@ -198,6 +303,18 @@
+ allocation_policy_type ap = allocation_policy_type(),
+ size_type bv_max_size = bm::id_max,
+ const allocator_type& alloc = allocator_type());
++
++ /**
++ Contructor to pre-initialize the list of assigned (not NULL) elements.
++
++ If the list of not NULL elements is known upfront it can help to
++ pre-declare it, enable rank-select index and then use set function.
++ This scenario gives significant speed boost, comparing random assignment
++
++ @param bv_null - not NULL vector for the container
++ */
++ rsc_sparse_vector(const bvector_type& bv_null);
++
+ ~rsc_sparse_vector();
+
+ /*! copy-ctor */
+@@ -205,7 +322,7 @@
+
+
+ /*! copy assignmment operator */
+- rsc_sparse_vector<Val,SV>& operator = (const rsc_sparse_vector<Val, SV>& csv)
++ rsc_sparse_vector<Val,SV>& operator=(const rsc_sparse_vector<Val, SV>& csv)
+ {
+ if (this != &csv)
+ {
+@@ -219,13 +336,13 @@
+ }
+ return *this;
+ }
+-
++
+ #ifndef BM_NO_CXX11
+ /*! move-ctor */
+- rsc_sparse_vector(rsc_sparse_vector<Val,SV>&& csv) BMNOEXEPT;
++ rsc_sparse_vector(rsc_sparse_vector<Val,SV>&& csv) BMNOEXCEPT;
+
+ /*! move assignmment operator */
+- rsc_sparse_vector<Val,SV>& operator=(rsc_sparse_vector<Val,SV>&& csv) BMNOEXEPT
++ rsc_sparse_vector<Val,SV>& operator=(rsc_sparse_vector<Val,SV>&& csv) BMNOEXCEPT
+ {
+ if (this != &csv)
+ {
+@@ -249,7 +366,7 @@
+ /*! \brief return size of the vector
+ \return size of sparse vector
+ */
+- size_type size() const;
++ size_type size() const BMNOEXCEPT;
+
+ /*! \brief return true if vector is empty
+ \return true if empty
+@@ -281,7 +398,7 @@
+ \param idx - element index
+ \return value of the element
+ */
+- value_type get(size_type idx) const;
++ value_type get(size_type idx) const BMNOEXCEPT;
+
+ /*!
+ \brief set specified element with bounds checking and automatic resize
+@@ -301,7 +418,30 @@
+ */
+ void set(size_type idx, value_type v);
+
++
+ /*!
++ \brief increment specified element by one
++ \param idx - element index
++ */
++ void inc(size_type idx);
++
++ /*!
++ \brief increment specified element by one
++ \param idx - element index
++ \param v - increment value
++ */
++ void inc(size_type idx, value_type v);
++
++ /*!
++ \brief increment specified element by one, element MUST be NOT NULL
++ Faster than just inc() if element is NULL - behavior is undefined
++ \param idx - element index
++ \param v - increment value
++ @sa inc
++ */
++ void inc_not_null(size_type idx, value_type v);
++
++ /*!
+ \brief set specified element to NULL
+ RSC vector actually erases element when it is set to NULL (expensive).
+ \param idx - element index
+@@ -309,18 +449,17 @@
+ void set_null(size_type idx);
+
+
+-
+ /** \brief test if specified element is NULL
+ \param idx - element index
+ \return true if it is NULL false if it was assigned or container
+ is not configured to support assignment flags
+ */
+- bool is_null(size_type idx) const;
++ bool is_null(size_type idx) const BMNOEXCEPT;
+
+ /**
+ \brief Get bit-vector of assigned values (or NULL)
+ */
+- const bvector_type* get_null_bvector() const;
++ const bvector_type* get_null_bvector() const BMNOEXCEPT;
+
+ /**
+ \brief find position of compressed element by its rank
+@@ -327,7 +466,7 @@
+ \param rank - rank (virtual index in sparse vector)
+ \param idx - index (true position)
+ */
+- bool find_rank(size_type rank, size_type& idx) const;
++ bool find_rank(size_type rank, size_type& idx) const BMNOEXCEPT;
+
+ //@}
+
+@@ -334,12 +473,43 @@
+ // ------------------------------------------------------------
+ /*! @name Export content to C-stype array */
+ ///@{
+-
++
++ /**
++ \brief C-style decode
++ \param arr - decode target array (must be properly sized)
++ \param idx_from - start address to decode
++ \param size - number of elements to decode
++ \param zero_mem - flag if array needs to beset to zeros first
++
++ @return actual decoded size
++ @sa decode_buf
++ */
+ size_type decode(value_type* arr,
+ size_type idx_from,
+ size_type size,
+ bool zero_mem = true) const;
+
++
++ /**
++ \brief C-style decode (variant with external memory)
++ Analog of decode, but requires two arrays.
++ Faster than decode in many cases.
++
++ \param arr - decode target array (must be properly sized)
++ \param arr_buf_tmp - decode temp bufer (must be same size of arr)
++ \param idx_from - start address to decode
++ \param size - number of elements to decode
++ \param zero_mem - flag if array needs to beset to zeros first
++
++ @return actual decoded size
++ @sa decode
++ */
++ size_type decode_buf(value_type* arr,
++ value_type* arr_buf_tmp,
++ size_type idx_from,
++ size_type size,
++ bool zero_mem = true) const BMNOEXCEPT;
++
+ ///@}
+
+
+@@ -367,7 +537,7 @@
+ \brief check if another vector has the same content
+ \return true, if it is the same
+ */
+- bool equal(const rsc_sparse_vector<Val, SV>& csv) const;
++ bool equal(const rsc_sparse_vector<Val, SV>& csv) const BMNOEXCEPT;
+ //@}
+
+
+@@ -395,6 +565,20 @@
+ /*! @name Iterator access */
+ //@{
+
++ /** Provide const iterator access to container content */
++ const_iterator begin() const BMNOEXCEPT
++ { return const_iterator(this); }
++
++ /** Provide const iterator access to the end */
++ const_iterator end() const BMNOEXCEPT
++ { return const_iterator(this, bm::id_max); }
++
++ /** Get const_itertor re-positioned to specific element
++ @param idx - position in the sparse vector
++ */
++ const_iterator get_const_iterator(size_type idx) const BMNOEXCEPT
++ { return const_iterator(this, idx); }
++
+ back_insert_iterator get_back_inserter() { return back_insert_iterator(this); }
+ ///@}
+
+@@ -408,13 +592,14 @@
+ \param opt_mode - requested compression depth
+ \param stat - memory allocation statistics after optimization
+ */
+- void optimize(bm::word_t* temp_block = 0,
+- typename bvector_type::optmode opt_mode = bvector_type::opt_compress,
+- statistics* stat = 0);
++ void optimize(
++ bm::word_t* temp_block = 0,
++ typename bvector_type::optmode opt_mode = bvector_type::opt_compress,
++ statistics* stat = 0);
+
+ /*! \brief resize to zero, free memory
+ */
+- void clear() BMNOEXEPT;
++ void clear() BMNOEXCEPT;
+
+ /*!
+ @brief Calculates memory statistics.
+@@ -427,7 +612,8 @@
+
+ @sa statistics
+ */
+- void calc_stat(struct rsc_sparse_vector<Val, SV>::statistics* st) const;
++ void calc_stat(
++ struct rsc_sparse_vector<Val, SV>::statistics* st) const BMNOEXCEPT;
+
+ ///@}
+
+@@ -448,6 +634,14 @@
+ void copy_range(const rsc_sparse_vector<Val, SV>& csv,
+ size_type left, size_type right);
+
++ /**
++ @brief merge two vectors (argument gets destroyed)
++ It is important that both vectors have the same NULL vectors
++ @param csv - [in,out] argumnet vector to merge
++ (works like move so arg should not be used after the merge)
++ */
++ void merge_not_null(rsc_sparse_vector<Val, SV>& csv);
++
+ ///@}
+
+ // ------------------------------------------------------------
+@@ -467,12 +661,12 @@
+ /*!
+ \brief returns true if prefix sum table is in sync with the vector
+ */
+- bool in_sync() const { return in_sync_; }
++ bool in_sync() const BMNOEXCEPT { return in_sync_; }
+
+ /*!
+ \brief Unsync the prefix sum table
+ */
+- void unsync() { in_sync_ = false; }
++ void unsync() BMNOEXCEPT { in_sync_ = false; }
+ ///@}
+
+ // ------------------------------------------------------------
+@@ -483,19 +677,23 @@
+ \brief get access to bit-plain, function checks and creates a plain
+ \return bit-vector for the bit plain
+ */
+- bvector_type_const_ptr get_plain(unsigned i) const { return sv_.get_plain(i); }
++ bvector_type_const_ptr get_plain(unsigned i) const BMNOEXCEPT
++ { return sv_.get_plain(i); }
+
+- bvector_type_ptr get_plain(unsigned i) { return sv_.get_plain(i); }
++ bvector_type_ptr get_plain(unsigned i) BMNOEXCEPT
++ { return sv_.get_plain(i); }
+
+ /*!
+ Number of effective bit-plains in the value type
+ */
+- unsigned effective_plains() const { return sv_.effective_plains(); }
++ unsigned effective_plains() const BMNOEXCEPT
++ { return sv_.effective_plains(); }
+
+ /*!
+ \brief get total number of bit-plains in the vector
+ */
+- static unsigned plains() { return sparse_vector_type::plains(); }
++ static unsigned plains() BMNOEXCEPT
++ { return sparse_vector_type::plains(); }
+
+ /** Number of stored bit-plains (value plains + extra */
+ static unsigned stored_plains()
+@@ -504,22 +702,23 @@
+ /*!
+ \brief access dense vector
+ */
+- const sparse_vector_type& get_sv() const { return sv_; }
++ const sparse_vector_type& get_sv() const BMNOEXCEPT { return sv_; }
+
+ /*!
+ \brief size of internal dense vector
+ */
+- size_type effective_size() const { return sv_.size(); }
++ size_type effective_size() const BMNOEXCEPT { return sv_.size(); }
+
+ /**
+ \brief Always 1 (non-matrix type)
+ */
+- size_type effective_vector_max() const { return 1; }
++ size_type effective_vector_max() const BMNOEXCEPT { return 1; }
+
+ /*!
+ get read-only access to inetrnal bit-matrix
+ */
+- const bmatrix_type& get_bmatrix() const { return sv_.get_bmatrix(); }
++ const bmatrix_type& get_bmatrix() const BMNOEXCEPT
++ { return sv_.get_bmatrix(); }
+
+ ///@}
+
+@@ -537,27 +736,30 @@
+
+ \return true if id is known and resolved successfully
+ */
+- bool resolve(size_type idx, size_type* idx_to) const;
++ bool resolve(size_type idx, size_type* idx_to) const BMNOEXCEPT;
+
+ bool resolve_range(size_type from, size_type to,
+- size_type* idx_from, size_type* idx_to) const;
++ size_type* idx_from, size_type* idx_to) const BMNOEXCEPT;
+
+ void resize_internal(size_type sz) { sv_.resize_internal(sz); }
+- size_type size_internal() const { return sv_.size(); }
++ size_type size_internal() const BMNOEXCEPT { return sv_.size(); }
+
+- bool is_remap() const { return false; }
+- size_t remap_size() const { return 0; }
+- const unsigned char* get_remap_buffer() const { return 0; }
+- unsigned char* init_remap_buffer() { return 0; }
+- void set_remap() { }
++ bool is_remap() const BMNOEXCEPT { return false; }
++ size_t remap_size() const BMNOEXCEPT { return 0; }
++ const unsigned char* get_remap_buffer() const BMNOEXCEPT { return 0; }
++ unsigned char* init_remap_buffer() BMNOEXCEPT { return 0; }
++ void set_remap() BMNOEXCEPT { }
+
+ void push_back_no_check(size_type idx, value_type v);
+
+
+ private:
+- void construct_bv_blocks();
+- void free_bv_blocks();
+
++ /// Allocate memory for RS index
++ void construct_rs_index();
++ /// Free rs-index
++ void free_rs_index();
++
+ protected:
+ template<class SVect> friend class sparse_vector_scanner;
+ template<class SVect> friend class sparse_vector_serializer;
+@@ -580,21 +782,45 @@
+ allocation_policy_type ap,
+ size_type bv_max_size,
+ const allocator_type& alloc)
+-: sv_(null_able, ap, bv_max_size, alloc),
+- in_sync_(false)
++: sv_(null_able, ap, bv_max_size, alloc), in_sync_(false)
+ {
+ BM_ASSERT(null_able == bm::use_null);
+ BM_ASSERT(int(sv_value_plains) == int(SV::sv_value_plains));
+ size_ = max_id_ = 0;
+- construct_bv_blocks();
++ construct_rs_index();
+ }
+
+ //---------------------------------------------------------------------
+
+ template<class Val, class SV>
++rsc_sparse_vector<Val, SV>::rsc_sparse_vector(const bvector_type& bv_null)
++: sv_(bm::use_null), in_sync_(false)
++{
++ construct_rs_index();
++ bvector_type* bv = sv_.get_null_bvect();
++ BM_ASSERT(bv);
++ *bv = bv_null;
++
++ bool found = bv->find_reverse(max_id_);
++ if (found)
++ {
++ size_ = max_id_ + 1;
++ size_type sz = bv->count();
++ sv_.resize(sz);
++ }
++ else
++ {
++ BM_ASSERT(!bv->any());
++ size_ = max_id_ = 0;
++ }
++}
++
++//---------------------------------------------------------------------
++
++template<class Val, class SV>
+ rsc_sparse_vector<Val, SV>::~rsc_sparse_vector()
+ {
+- free_bv_blocks();
++ free_rs_index();
+ }
+
+ //---------------------------------------------------------------------
+@@ -602,24 +828,20 @@
+ template<class Val, class SV>
+ rsc_sparse_vector<Val, SV>::rsc_sparse_vector(
+ const rsc_sparse_vector<Val, SV>& csv)
+-: sv_(csv.sv_),
+- size_(csv.size_),
+- max_id_(csv.max_id_),
+- in_sync_(csv.in_sync_)
++: sv_(csv.sv_), size_(csv.size_), max_id_(csv.max_id_), in_sync_(csv.in_sync_)
+ {
+ BM_ASSERT(int(sv_value_plains) == int(SV::sv_value_plains));
+
+- construct_bv_blocks();
++ construct_rs_index();
+ if (in_sync_)
+- {
+ bv_blocks_ptr_->copy_from(*(csv.bv_blocks_ptr_));
+- }
+ }
+
+ //---------------------------------------------------------------------
+
+ template<class Val, class SV>
+-rsc_sparse_vector<Val, SV>::rsc_sparse_vector(rsc_sparse_vector<Val,SV>&& csv) BMNOEXEPT
++rsc_sparse_vector<Val, SV>::rsc_sparse_vector(
++ rsc_sparse_vector<Val,SV>&& csv) BMNOEXCEPT
+ : sv_(bm::use_null),
+ size_(0),
+ max_id_(0), in_sync_(false)
+@@ -636,7 +858,7 @@
+
+ template<class Val, class SV>
+ typename rsc_sparse_vector<Val, SV>::size_type
+-rsc_sparse_vector<Val, SV>::size() const
++rsc_sparse_vector<Val, SV>::size() const BMNOEXCEPT
+ {
+ return size_;
+ }
+@@ -686,6 +908,7 @@
+ size_type sv_idx = bv_null->count_range(0, idx);
+ bv_null->clear_bit_no_check(idx);
+ sv_.erase(--sv_idx);
++ in_sync_ = false;
+ }
+ }
+
+@@ -692,19 +915,102 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class SV>
++void rsc_sparse_vector<Val, SV>::inc(size_type idx)
++{
++ bvector_type* bv_null = sv_.get_null_bvect();
++ BM_ASSERT(bv_null);
++
++ size_type sv_idx;
++ bool found = bv_null->test(idx);
++
++ sv_idx = in_sync_ ? bv_null->count_to(idx, *bv_blocks_ptr_)
++ : bv_null->count_range(0, idx); // TODO: make test'n'count
++
++ if (found)
++ {
++ sv_.inc_no_null(--sv_idx);
++ }
++ else
++ {
++ sv_.insert_value_no_null(sv_idx, 1);
++ bv_null->set_bit_no_check(idx);
++
++ if (idx > max_id_)
++ {
++ max_id_ = idx;
++ size_ = max_id_ + 1;
++ }
++ in_sync_ = false;
++ }
++}
++
++//---------------------------------------------------------------------
++
++template<class Val, class SV>
++void rsc_sparse_vector<Val, SV>::inc(size_type idx, value_type v)
++{
++ bvector_type* bv_null = sv_.get_null_bvect();
++ BM_ASSERT(bv_null);
++
++ size_type sv_idx;
++ bool found = bv_null->test(idx);
++
++ sv_idx = in_sync_ ? bv_null->count_to(idx, *bv_blocks_ptr_)
++ : bv_null->count_range(0, idx); // TODO: make test'n'count
++
++ if (found)
++ {
++ sv_.inc_no_null(--sv_idx, v);
++ }
++ else
++ {
++ sv_.insert_value_no_null(sv_idx, v);
++ bv_null->set_bit_no_check(idx);
++
++ if (idx > max_id_)
++ {
++ max_id_ = idx;
++ size_ = max_id_ + 1;
++ }
++ in_sync_ = false;
++ }
++}
++
++//---------------------------------------------------------------------
++
++template<class Val, class SV>
++void rsc_sparse_vector<Val, SV>::inc_not_null(size_type idx, value_type v)
++{
++ bvector_type* bv_null = sv_.get_null_bvect();
++ BM_ASSERT(bv_null->test(idx)); // idx must be NOT NULL
++
++ size_type sv_idx;
++ sv_idx = in_sync_ ? bv_null->count_to(idx, *bv_blocks_ptr_)
++ : bv_null->count_range(0, idx); // TODO: make test'n'count
++ --sv_idx;
++ if (v == 1)
++ sv_.inc_no_null(sv_idx);
++ else
++ sv_.inc_no_null(sv_idx, v);
++}
++
++
++//---------------------------------------------------------------------
++
++template<class Val, class SV>
+ void rsc_sparse_vector<Val, SV>::set(size_type idx, value_type v)
+ {
+ bvector_type* bv_null = sv_.get_null_bvect();
+ BM_ASSERT(bv_null);
+-
++
++ size_type sv_idx;
+ bool found = bv_null->test(idx);
+- size_type sv_idx = bv_null->count_range(0, idx); // TODO: make test'n'count
+-// size_type sv_idx;
+-// bool found = resolve(idx, &sv_idx);
+
++ sv_idx = in_sync_ ? bv_null->count_to(idx, *bv_blocks_ptr_)
++ : bv_null->count_range(0, idx); // TODO: make test'n'count
++
+ if (found)
+ {
+- //sv_.set(--sv_idx, v);
+ sv_.set_value_no_null(--sv_idx, v);
+ }
+ else
+@@ -725,7 +1031,7 @@
+
+ template<class Val, class SV>
+ bool rsc_sparse_vector<Val, SV>::equal(
+- const rsc_sparse_vector<Val, SV>& csv) const
++ const rsc_sparse_vector<Val, SV>& csv) const BMNOEXCEPT
+ {
+ if (this == &csv)
+ return true;
+@@ -739,7 +1045,7 @@
+
+ template<class Val, class SV>
+ void rsc_sparse_vector<Val, SV>::load_from(
+- const sparse_vector_type& sv_src)
++ const sparse_vector_type& sv_src)
+ {
+ max_id_ = size_ = 0;
+
+@@ -837,10 +1143,10 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class SV>
+-bool rsc_sparse_vector<Val, SV>::resolve(size_type idx, size_type* idx_to) const
++bool rsc_sparse_vector<Val, SV>::resolve(size_type idx,
++ size_type* idx_to) const BMNOEXCEPT
+ {
+ BM_ASSERT(idx_to);
+-
+ const bvector_type* bv_null = sv_.get_null_bvector();
+ if (in_sync_)
+ {
+@@ -849,23 +1155,17 @@
+ else // slow access
+ {
+ bool found = bv_null->test(idx);
+- if (!found)
+- {
+- *idx_to = 0;
+- }
+- else
+- {
+- *idx_to = bv_null->count_range(0, idx);
+- }
++ *idx_to = found ? bv_null->count_range(0, idx) : 0;
+ }
+ return bool(*idx_to);
+ }
++
+ //---------------------------------------------------------------------
+
+ template<class Val, class SV>
+ bool rsc_sparse_vector<Val, SV>::resolve_range(
+ size_type from, size_type to,
+- size_type* idx_from, size_type* idx_to) const
++ size_type* idx_from, size_type* idx_to) const BMNOEXCEPT
+ {
+ BM_ASSERT(idx_to && idx_from);
+ const bvector_type* bv_null = sv_.get_null_bvector();
+@@ -876,12 +1176,15 @@
+ copy_sz = bv_null->count_range(from, to);
+ if (!copy_sz)
+ return false;
++
+ if (in_sync_)
+- sv_left = bv_null->count_range(0, from, *bv_blocks_ptr_);
++ sv_left = bv_null->rank_corrected(from, *bv_blocks_ptr_);
+ else
++ {
+ sv_left = bv_null->count_range(0, from);
+- bool tl = bv_null->test(from); // TODO: add count and test
+- sv_left -= tl; // rank correction
++ bool tl = bv_null->test(from); // TODO: add count and test
++ sv_left -= tl; // rank correction
++ }
+
+ *idx_from = sv_left; *idx_to = sv_left + copy_sz - 1;
+ return true;
+@@ -910,7 +1213,7 @@
+
+ template<class Val, class SV>
+ typename rsc_sparse_vector<Val, SV>::value_type
+-rsc_sparse_vector<Val, SV>::get(size_type idx) const
++rsc_sparse_vector<Val, SV>::get(size_type idx) const BMNOEXCEPT
+ {
+ size_type sv_idx;
+ bool found = resolve(idx, &sv_idx);
+@@ -923,7 +1226,7 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class SV>
+-bool rsc_sparse_vector<Val, SV>::is_null(size_type idx) const
++bool rsc_sparse_vector<Val, SV>::is_null(size_type idx) const BMNOEXCEPT
+ {
+ const bvector_type* bv_null = sv_.get_null_bvector();
+ BM_ASSERT(bv_null);
+@@ -950,7 +1253,7 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class SV>
+-void rsc_sparse_vector<Val, SV>::clear() BMNOEXEPT
++void rsc_sparse_vector<Val, SV>::clear() BMNOEXCEPT
+ {
+ sv_.clear();
+ in_sync_ = false; max_id_ = size_ = 0;
+@@ -960,7 +1263,7 @@
+
+ template<class Val, class SV>
+ void rsc_sparse_vector<Val, SV>::calc_stat(
+- struct rsc_sparse_vector<Val, SV>::statistics* st) const
++ struct rsc_sparse_vector<Val, SV>::statistics* st) const BMNOEXCEPT
+ {
+ BM_ASSERT(st);
+ sv_.calc_stat((typename sparse_vector_type::statistics*)st);
+@@ -977,7 +1280,7 @@
+
+ template<class Val, class SV>
+ const typename rsc_sparse_vector<Val, SV>::bvector_type*
+-rsc_sparse_vector<Val, SV>::get_null_bvector() const
++rsc_sparse_vector<Val, SV>::get_null_bvector() const BMNOEXCEPT
+ {
+ return sv_.get_null_bvector();
+ }
+@@ -986,7 +1289,8 @@
+
+ template<class Val, class SV>
+ bool
+-rsc_sparse_vector<Val, SV>::find_rank(size_type rank, size_type& idx) const
++rsc_sparse_vector<Val, SV>::find_rank(size_type rank,
++ size_type& idx) const BMNOEXCEPT
+ {
+ BM_ASSERT(rank);
+ bool b;
+@@ -1006,7 +1310,7 @@
+ rsc_sparse_vector<Val, SV>::decode(value_type* arr,
+ size_type idx_from,
+ size_type size,
+- bool /*zero_mem*/) const
++ bool zero_mem) const
+ {
+ if (size == 0)
+ return 0;
+@@ -1020,51 +1324,104 @@
+
+ if ((bm::id_max - size) <= idx_from)
+ size = bm::id_max - idx_from;
++ if ((idx_from + size) > this->size())
++ size = this->size() - idx_from;
+
+ const bvector_type* bv_null = sv_.get_null_bvector();
++ size_type rank = bv_null->rank_corrected(idx_from, *bv_blocks_ptr_);
+
+- size_type rank = bv_null->count_to(idx_from, *bv_blocks_ptr_);
+- bool b = bv_null->test(idx_from);
+-
++ BM_ASSERT(rank == bv_null->count_range(0, idx_from) - bv_null->test(idx_from));
++
+ bvector_enumerator_type en_i = bv_null->get_enumerator(idx_from);
+- size_type i = *en_i;
+- if (idx_from + size <= i) // empty space (all zeros)
++ BM_ASSERT(en_i.valid());
++
++ if (zero_mem)
++ ::memset(arr, 0, sizeof(value_type)*size);
++
++ sparse_vector_const_iterator it = sv_.get_const_iterator(rank);
++ size_type i = 0;
++ if (it.valid())
+ {
++ do
++ {
++ size_type en_idx = *en_i;
++ size_type delta = en_idx - idx_from;
++ idx_from += delta;
++ i += delta;
++ if (i >= size)
++ return size;
++ arr[i++] = it.value();
++ if (!en_i.advance())
++ break;
++ if (!it.advance())
++ break;
++ ++idx_from;
++ } while (i < size);
++ }
++ return i;
++}
++
++
++template<class Val, class SV>
++typename rsc_sparse_vector<Val, SV>::size_type
++rsc_sparse_vector<Val, SV>::decode_buf(value_type* arr,
++ value_type* arr_buf_tmp,
++ size_type idx_from,
++ size_type size,
++ bool zero_mem) const BMNOEXCEPT
++{
++ if (!size || (idx_from >= this->size()))
++ return 0;
++
++ BM_ASSERT(arr && arr_buf_tmp);
++ BM_ASSERT(arr != arr_buf_tmp);
++ BM_ASSERT(in_sync_); // call sync() before decoding
++ BM_ASSERT(bv_blocks_ptr_);
++
++ if ((bm::id_max - size) <= idx_from)
++ size = bm::id_max - idx_from;
++ if ((idx_from + size) > this->size())
++ size = this->size() - idx_from;
++
++ if (zero_mem)
+ ::memset(arr, 0, sizeof(value_type)*size);
++
++ const bvector_type* bv_null = sv_.get_null_bvector();
++ size_type rank = bv_null->rank_corrected(idx_from, *bv_blocks_ptr_);
++
++ BM_ASSERT(rank == bv_null->count_range(0, idx_from) - bv_null->test(idx_from));
++
++ bvector_enumerator_type en_i = bv_null->get_enumerator(idx_from);
++ if (!en_i.valid())
+ return size;
+- }
+- rank -= b;
+- sparse_vector_const_iterator it = sv_.get_const_iterator(rank);
+- i = 0;
+- while (it.valid())
++
++ size_type i = en_i.value();
++ if (idx_from + size <= i) // empty space (all zeros)
++ return size;
++
++ size_type extract_cnt =
++ bv_null->count_range(idx_from, idx_from + size - 1, *bv_blocks_ptr_);
++
++ BM_ASSERT(extract_cnt <= this->size());
++ auto ex_sz = sv_.decode(arr_buf_tmp, rank, extract_cnt, true);
++ BM_ASSERT(ex_sz == extract_cnt); (void) ex_sz;
++
++ for (i = 0; i < extract_cnt; ++i)
+ {
+- if (!en_i.valid())
+- break;
++ BM_ASSERT(en_i.valid());
+ size_type en_idx = *en_i;
+- while (idx_from < en_idx) // zero the empty prefix
+- {
+- arr[i] ^= arr[i];
+- ++i; ++idx_from;
+- if (i == size)
+- return i;
+- }
+- BM_ASSERT(idx_from == en_idx);
+- arr[i] = *it;
+- ++i; ++idx_from;
+- if (i == size)
+- return i;
+-
++ arr[en_idx-idx_from] = arr_buf_tmp[i];
+ en_i.advance();
+- it.advance();
+- } // while
+-
+- return i;
++ } // for i
++
++ return size;
+ }
+
++
+ //---------------------------------------------------------------------
+
+ template<class Val, class SV>
+-void rsc_sparse_vector<Val, SV>::construct_bv_blocks()
++void rsc_sparse_vector<Val, SV>::construct_rs_index()
+ {
+ if (bv_blocks_ptr_)
+ return;
+@@ -1076,7 +1433,7 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class SV>
+-void rsc_sparse_vector<Val, SV>::free_bv_blocks()
++void rsc_sparse_vector<Val, SV>::free_rs_index()
+ {
+ if (bv_blocks_ptr_)
+ {
+@@ -1086,12 +1443,56 @@
+ }
+
+ //---------------------------------------------------------------------
++
++template<class Val, class SV>
++void rsc_sparse_vector<Val, SV>::copy_range(
++ const rsc_sparse_vector<Val, SV>& csv,
++ size_type left, size_type right)
++{
++ if (left > right)
++ bm::xor_swap(left, right);
++
++ if (left >= csv.size())
++ return;
++
++ size_ = csv.size_; max_id_ = csv.max_id_;
++ in_sync_ = false;
++
++ const bvector_type* arg_bv_null = csv.sv_.get_null_bvector();
++ size_type sv_left, sv_right;
++ bool range_valid = csv.resolve_range(left, right, &sv_left, &sv_right);
++ if (!range_valid)
++ {
++ sv_.clear(); sv_.resize(size_);
++ bvector_type* bv_null = sv_.get_null_bvect();
++ bv_null->copy_range(*arg_bv_null, 0, right);
++ return;
++ }
++ bvector_type* bv_null = sv_.get_null_bvect();
++ bv_null->copy_range(*arg_bv_null, 0, right); // not NULL vector gets a full copy
++ sv_.copy_range(csv.sv_, sv_left, sv_right, bm::no_null); // don't copy NULL
++}
++
++
++//---------------------------------------------------------------------
++
++template<class Val, class SV>
++void rsc_sparse_vector<Val, SV>::merge_not_null(rsc_sparse_vector<Val, SV>& csv)
++{
++ // MUST have the same NULL to work
++ BM_ASSERT(sv_.get_null_bvector()->equal(*csv.sv_.get_null_bvector()));
++
++ sv_.merge(csv.sv_);
++}
++
++
++//---------------------------------------------------------------------
+ //
+ //---------------------------------------------------------------------
+
+
+ template<class Val, class SV>
+-rsc_sparse_vector<Val, SV>::back_insert_iterator::back_insert_iterator()
++rsc_sparse_vector<Val, SV>::back_insert_iterator::back_insert_iterator() BMNOEXCEPT
+ : csv_(0)
+ {}
+
+@@ -1100,7 +1501,7 @@
+
+ template<class Val, class SV>
+ rsc_sparse_vector<Val, SV>::back_insert_iterator::back_insert_iterator
+- (rsc_sparse_vector_type* csv)
++ (rsc_sparse_vector_type* csv) BMNOEXCEPT
+ {
+ csv_ = csv;
+ sv_bi_ = csv->sv_.get_back_inserter();
+@@ -1134,7 +1535,7 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class SV>
+-void rsc_sparse_vector<Val, SV>::back_insert_iterator::add_null()
++void rsc_sparse_vector<Val, SV>::back_insert_iterator::add_null() BMNOEXCEPT
+ {
+ BM_ASSERT(csv_);
+ csv_->max_id_++;
+@@ -1145,7 +1546,7 @@
+
+ template<class Val, class SV>
+ void rsc_sparse_vector<Val, SV>::back_insert_iterator::add_null(
+- rsc_sparse_vector<Val, SV>::back_insert_iterator::size_type count)
++ rsc_sparse_vector<Val, SV>::back_insert_iterator::size_type count) BMNOEXCEPT
+ {
+ BM_ASSERT(csv_);
+ csv_->max_id_+=count;
+@@ -1162,39 +1563,140 @@
+ }
+
+ //---------------------------------------------------------------------
++//
++//---------------------------------------------------------------------
+
++template<class Val, class BV>
++rsc_sparse_vector<Val, BV>::const_iterator::const_iterator() BMNOEXCEPT
++: csv_(0), pos_(bm::id_max), buf_ptr_(0)
++{}
++
++//---------------------------------------------------------------------
++
+ template<class Val, class SV>
+-void rsc_sparse_vector<Val, SV>::copy_range(
+- const rsc_sparse_vector<Val, SV>& csv,
+- size_type left, size_type right)
++rsc_sparse_vector<Val, SV>::const_iterator::const_iterator(
++ const typename rsc_sparse_vector<Val, SV>::const_iterator& it) BMNOEXCEPT
++: csv_(it.csv_), pos_(it.pos_), buf_ptr_(0)
++{}
++
++//---------------------------------------------------------------------
++
++template<class Val, class SV>
++rsc_sparse_vector<Val, SV>::const_iterator::const_iterator(
++ const typename rsc_sparse_vector<Val, SV>::const_iterator::rsc_sparse_vector_type* csv
++ ) BMNOEXCEPT
++: csv_(csv), buf_ptr_(0)
+ {
+- if (left > right)
+- bm::xor_swap(left, right);
++ BM_ASSERT(csv_);
++ pos_ = csv_->empty() ? bm::id_max : 0u;
++}
+
+- if (left >= csv.size())
+- return;
+-
+- size_ = csv.size_; max_id_ = csv.max_id_;
+- in_sync_ = false;
++//---------------------------------------------------------------------
+
+- const bvector_type* arg_bv_null = csv.sv_.get_null_bvector();
+- size_type sv_left, sv_right;
+- bool range_valid = csv.resolve_range(left, right, &sv_left, &sv_right);
++template<class Val, class SV>
++rsc_sparse_vector<Val, SV>::const_iterator::const_iterator(
++ const typename rsc_sparse_vector<Val, SV>::const_iterator::rsc_sparse_vector_type* csv,
++ typename rsc_sparse_vector<Val, SV>::size_type pos) BMNOEXCEPT
++: csv_(csv), buf_ptr_(0)
++{
++ BM_ASSERT(csv_);
++ this->go_to(pos);
++}
+
+- if (!range_valid)
++//---------------------------------------------------------------------
++
++template<class Val, class SV>
++void rsc_sparse_vector<Val, SV>::const_iterator::go_to(size_type pos) BMNOEXCEPT
++{
++ pos_ = (!csv_ || pos >= csv_->size()) ? bm::id_max : pos;
++ buf_ptr_ = 0;
++}
++
++//---------------------------------------------------------------------
++
++template<class Val, class SV>
++bool rsc_sparse_vector<Val, SV>::const_iterator::advance() BMNOEXCEPT
++{
++ if (pos_ == bm::id_max) // nothing to do, we are at the end
++ return false;
++ ++pos_;
++ if (pos_ >= csv_->size())
+ {
+- sv_.clear();
+- sv_.resize(size_);
+- bvector_type* bv_null = sv_.get_null_bvect();
+- bv_null->copy_range(*arg_bv_null, 0, right);
+- return;
++ this->invalidate();
++ return false;
+ }
+- bvector_type* bv_null = sv_.get_null_bvect();
+- bv_null->copy_range(*arg_bv_null, 0, right); // not NULL vector gets a full copy
+- sv_.copy_range(csv.sv_, sv_left, sv_right, bm::no_null); // don't copy NULL
++ if (buf_ptr_)
++ {
++ ++buf_ptr_;
++ if (buf_ptr_ - ((value_type*)vbuffer_.data()) >= n_buf_size)
++ buf_ptr_ = 0;
++ }
++ return true;
+ }
+
++//---------------------------------------------------------------------
+
++template<class Val, class SV>
++typename rsc_sparse_vector<Val, SV>::const_iterator::value_type
++rsc_sparse_vector<Val, SV>::const_iterator::value() const
++{
++ BM_ASSERT(this->valid());
++ value_type v;
++
++ if (!buf_ptr_)
++ {
++ vbuffer_.reserve(n_buf_size * sizeof(value_type));
++ tbuffer_.reserve(n_buf_size * sizeof(value_type));
++ buf_ptr_ = (value_type*)(vbuffer_.data());
++ value_type* tmp_buf_ptr = (value_type*) (tbuffer_.data());
++
++ csv_->decode_buf(buf_ptr_, tmp_buf_ptr, pos_, n_buf_size, true);
++ }
++ v = *buf_ptr_;
++ return v;
++}
++
++//---------------------------------------------------------------------
++
++template<class Val, class SV>
++void rsc_sparse_vector<Val, SV>::const_iterator::skip_zero_values() BMNOEXCEPT
++{
++ value_type v = value();
++ if (buf_ptr_)
++ {
++ v = *buf_ptr_;
++ value_type* buf_end = ((value_type*)vbuffer_.data()) + n_buf_size;
++ while(!v)
++ {
++ ++pos_;
++ if (++buf_ptr_ < buf_end)
++ v = *buf_ptr_;
++ else
++ break;
++ }
++ if (pos_ >= csv_->size())
++ {
++ pos_ = bm::id_max;
++ return;
++ }
++ if (buf_ptr_ >= buf_end)
++ buf_ptr_ = 0;
++ }
++}
++
++//---------------------------------------------------------------------
++
++template<class Val, class SV>
++bool rsc_sparse_vector<Val, SV>::const_iterator::is_null() const BMNOEXCEPT
++{
++ return csv_->is_null(pos_);
++}
++
++
++//---------------------------------------------------------------------
++
++
++
+ } // namespace bm
+
+ #include "bmundef.h"
+Index: c++/include/util/bitset/bmsparsevec.h
+===================================================================
+--- a/c++/include/util/bitset/bmsparsevec.h (revision 90103)
++++ b/c++/include/util/bitset/bmsparsevec.h (revision 90104)
+@@ -37,7 +37,7 @@
+
+
+ #include "bmtrans.h"
+-#include "bmalgo.h"
++#include "bmalgo_impl.h"
+ #include "bmbuffer.h"
+ #include "bmbmatrix.h"
+ #include "bmdef.h"
+@@ -113,10 +113,10 @@
+ class reference
+ {
+ public:
+- reference(sparse_vector<Val, BV>& sv, size_type idx) BMNOEXEPT
++ reference(sparse_vector<Val, BV>& sv, size_type idx) BMNOEXCEPT
+ : sv_(sv), idx_(idx)
+ {}
+- operator value_type() const { return sv_.get(idx_); }
++ operator value_type() const BMNOEXCEPT { return sv_.get(idx_); }
+ reference& operator=(const reference& ref)
+ {
+ sv_.set(idx_, (value_type)ref);
+@@ -127,9 +127,9 @@
+ sv_.set(idx_, val);
+ return *this;
+ }
+- bool operator==(const reference& ref) const
++ bool operator==(const reference& ref) const BMNOEXCEPT
+ { return bool(*this) == bool(ref); }
+- bool is_null() const { return sv_.is_null(idx_); }
++ bool is_null() const BMNOEXCEPT { return sv_.is_null(idx_); }
+ private:
+ sparse_vector<Val, BV>& sv_;
+ size_type idx_;
+@@ -169,30 +169,30 @@
+ typedef value_type& reference;
+
+ public:
+- const_iterator();
+- const_iterator(const sparse_vector_type* sv);
+- const_iterator(const sparse_vector_type* sv, size_type pos);
+- const_iterator(const const_iterator& it);
++ const_iterator() BMNOEXCEPT;
++ const_iterator(const sparse_vector_type* sv) BMNOEXCEPT;
++ const_iterator(const sparse_vector_type* sv, size_type pos) BMNOEXCEPT;
++ const_iterator(const const_iterator& it) BMNOEXCEPT;
+
+- bool operator==(const const_iterator& it) const
++ bool operator==(const const_iterator& it) const BMNOEXCEPT
+ { return (pos_ == it.pos_) && (sv_ == it.sv_); }
+- bool operator!=(const const_iterator& it) const
++ bool operator!=(const const_iterator& it) const BMNOEXCEPT
+ { return ! operator==(it); }
+- bool operator < (const const_iterator& it) const
++ bool operator < (const const_iterator& it) const BMNOEXCEPT
+ { return pos_ < it.pos_; }
+- bool operator <= (const const_iterator& it) const
++ bool operator <= (const const_iterator& it) const BMNOEXCEPT
+ { return pos_ <= it.pos_; }
+- bool operator > (const const_iterator& it) const
++ bool operator > (const const_iterator& it) const BMNOEXCEPT
+ { return pos_ > it.pos_; }
+- bool operator >= (const const_iterator& it) const
++ bool operator >= (const const_iterator& it) const BMNOEXCEPT
+ { return pos_ >= it.pos_; }
+
+ /// \brief Get current position (value)
+- value_type operator*() const { return this->value(); }
++ value_type operator*() const { return this->value(); }
+
+
+ /// \brief Advance to the next available value
+- const_iterator& operator++() { this->advance(); return *this; }
++ const_iterator& operator++() BMNOEXCEPT { this->advance(); return *this; }
+
+ /// \brief Advance to the next available value
+ const_iterator& operator++(int)
+@@ -203,24 +203,25 @@
+ value_type value() const;
+
+ /// \brief Get NULL status
+- bool is_null() const;
++ bool is_null() const BMNOEXCEPT;
+
+ /// Returns true if iterator is at a valid position
+- bool valid() const { return pos_ != bm::id_max; }
++ bool valid() const BMNOEXCEPT { return pos_ != bm::id_max; }
+
+ /// Invalidate current iterator
+- void invalidate() { pos_ = bm::id_max; }
++ void invalidate() BMNOEXCEPT { pos_ = bm::id_max; }
+
+ /// Current position (index) in the vector
+- size_type pos() const { return pos_; }
++ size_type pos() const BMNOEXCEPT{ return pos_; }
+
+ /// re-position to a specified position
+- void go_to(size_type pos);
++ void go_to(size_type pos) BMNOEXCEPT;
+
+ /// advance iterator forward by one
+- void advance();
++ /// @return true if it is still valid
++ bool advance() BMNOEXCEPT;
+
+- void skip_zero_values();
++ void skip_zero_values() BMNOEXCEPT;
+ private:
+ enum buf_size_e
+ {
+@@ -232,7 +233,6 @@
+ size_type pos_; ///!< Position
+ mutable buffer_type buffer_; ///!< value buffer
+ mutable value_type* buf_ptr_; ///!< position in the buffer
+- mutable allocator_pool_type pool_;
+ };
+
+ /**
+@@ -313,7 +313,7 @@
+ Get access to not-null vector
+ @internal
+ */
+- bvector_type* get_null_bvect() const { return bv_null_; }
++ bvector_type* get_null_bvect() const BMNOEXCEPT { return bv_null_; }
+
+ /** add value to the buffer without changing the NULL vector
+ @param v - value to push back
+@@ -323,9 +323,9 @@
+ size_type add_value_no_null(value_type v);
+
+ /**
+- Reconf back inserter not to touch the NULL vector
++ Reconfшпгку back inserter not to touch the NULL vector
+ */
+- void disable_set_null() { set_not_null_ = false; }
++ void disable_set_null() BMNOEXCEPT { set_not_null_ = false; }
+ // ---------------------------------------------------------------
+
+ protected:
+@@ -387,11 +387,11 @@
+
+ #ifndef BM_NO_CXX11
+ /*! move-ctor */
+- sparse_vector(sparse_vector<Val, BV>&& sv) BMNOEXEPT;
++ sparse_vector(sparse_vector<Val, BV>&& sv) BMNOEXCEPT;
+
+
+ /*! move assignmment operator */
+- sparse_vector<Val,BV>& operator = (sparse_vector<Val, BV>&& sv) BMNOEXEPT
++ sparse_vector<Val,BV>& operator = (sparse_vector<Val, BV>&& sv) BMNOEXCEPT
+ {
+ if (this != &sv)
+ {
+@@ -402,7 +402,7 @@
+ }
+ #endif
+
+- ~sparse_vector() BMNOEXEPT;
++ ~sparse_vector() BMNOEXCEPT;
+ ///@}
+
+
+@@ -411,7 +411,8 @@
+ ///@{
+
+ /** \brief Operator to get write access to an element */
+- reference operator[](size_type idx) { return reference(*this, idx); }
++ reference operator[](size_type idx) BMNOEXCEPT
++ { return reference(*this, idx); }
+
+ /*!
+ \brief get specified element without bounds checking
+@@ -418,7 +419,8 @@
+ \param idx - element index
+ \return value of the element
+ */
+- value_type operator[](size_type idx) const { return this->get(idx); }
++ value_type operator[](size_type idx) const BMNOEXCEPT
++ { return this->get(idx); }
+
+ /*!
+ \brief access specified element with bounds checking
+@@ -431,7 +433,7 @@
+ \param idx - element index
+ \return value of the element
+ */
+- value_type get(size_type idx) const;
++ value_type get(size_type idx) const BMNOEXCEPT;
+
+ /*!
+ \brief set specified element with bounds checking and automatic resize
+@@ -485,21 +487,24 @@
+ //@{
+
+ /** Provide const iterator access to container content */
+- const_iterator begin() const;
++ const_iterator begin() const BMNOEXCEPT;
+
+ /** Provide const iterator access to the end */
+- const_iterator end() const { return const_iterator(this, bm::id_max); }
++ const_iterator end() const BMNOEXCEPT
++ { return const_iterator(this, bm::id_max); }
+
+ /** Get const_itertor re-positioned to specific element
+ @param idx - position in the sparse vector
+ */
+- const_iterator get_const_iterator(size_type idx) const { return const_iterator(this, idx); }
++ const_iterator get_const_iterator(size_type idx) const BMNOEXCEPT
++ { return const_iterator(this, idx); }
+
+ /** Provide back insert iterator
+ Back insert iterator implements buffered insertion,
+ which is faster, than random access or push_back
+ */
+- back_insert_iterator get_back_inserter() { return back_insert_iterator(this); }
++ back_insert_iterator get_back_inserter()
++ { return back_insert_iterator(this); }
+ ///@}
+
+
+@@ -515,7 +520,7 @@
+ /** \brief trait if sparse vector is "compressed" (false)
+ */
+ static
+- bool is_compressed() { return false; }
++ bool is_compressed() BMNOEXCEPT { return false; }
+
+ ///@}
+
+@@ -608,7 +613,7 @@
+
+ /*! \brief content exchange
+ */
+- void swap(sparse_vector<Val, BV>& sv) BMNOEXEPT;
++ void swap(sparse_vector<Val, BV>& sv) BMNOEXCEPT;
+
+ // ------------------------------------------------------------
+ /*! @name Clear */
+@@ -615,7 +620,7 @@
+ ///@{
+
+ /*! \brief resize to zero, free memory */
+- void clear() BMNOEXEPT;
++ void clear() BMNOEXCEPT;
+
+ /*!
+ \brief clear range (assign bit 0 for all plains)
+@@ -636,12 +641,12 @@
+ /*! \brief return size of the vector
+ \return size of sparse vector
+ */
+- size_type size() const { return this->size_; }
++ size_type size() const BMNOEXCEPT { return this->size_; }
+
+ /*! \brief return true if vector is empty
+ \return true if empty
+ */
+- bool empty() const { return (size() == 0); }
++ bool empty() const BMNOEXCEPT { return (size() == 0); }
+
+ /*! \brief resize vector
+ \param sz - new size
+@@ -663,7 +668,7 @@
+ \return true, if it is the same
+ */
+ bool equal(const sparse_vector<Val, BV>& sv,
+- bm::null_support null_able = bm::use_null) const;
++ bm::null_support null_able = bm::use_null) const BMNOEXCEPT;
+
+ ///@}
+
+@@ -679,7 +684,7 @@
+
+ \return 0 - equal, < 0 - vect[i] < str, >0 otherwise
+ */
+- int compare(size_type idx, const value_type val) const;
++ int compare(size_type idx, const value_type val) const BMNOEXCEPT;
+
+ ///@}
+
+@@ -694,8 +699,9 @@
+ \param stat - memory allocation statistics after optimization
+ */
+ void optimize(bm::word_t* temp_block = 0,
+- typename bvector_type::optmode opt_mode = bvector_type::opt_compress,
+- typename sparse_vector<Val, BV>::statistics* stat = 0);
++ typename bvector_type::optmode opt_mode = bvector_type::opt_compress,
++ typename sparse_vector<Val, BV>::statistics* stat = 0);
++
+ /*!
+ \brief Optimize sizes of GAP blocks
+
+@@ -715,7 +721,8 @@
+
+ @sa statistics
+ */
+- void calc_stat(struct sparse_vector<Val, BV>::statistics* st) const;
++ void calc_stat(
++ struct sparse_vector<Val, BV>::statistics* st) const BMNOEXCEPT;
+ ///@}
+
+ // ------------------------------------------------------------
+@@ -789,7 +796,6 @@
+ \param offset - target index in the sparse vector to export from
+ \param zero_mem - set to false if target array is pre-initialized
+ with 0s to avoid performance penalty
+- \param pool_ptr - optional pointer to block allocation pool
+ \return number of exported elements
+
+ \sa decode
+@@ -799,8 +805,7 @@
+ size_type extract(value_type* arr,
+ size_type size,
+ size_type offset = 0,
+- bool zero_mem = true,
+- allocator_pool_type* pool_ptr = 0) const;
++ bool zero_mem = true) const BMNOEXCEPT2;
+
+ /** \brief extract small window without use of masking vector
+ \sa decode
+@@ -824,7 +829,7 @@
+ \internal
+ */
+ static
+- size_type translate_address(size_type i) { return i; }
++ size_type translate_address(size_type i) BMNOEXCEPT { return i; }
+
+ /**
+ \brief throw range error
+@@ -845,17 +850,17 @@
+ \brief find position of compressed element by its rank
+ */
+ static
+- bool find_rank(size_type rank, size_type& pos);
++ bool find_rank(size_type rank, size_type& pos) BMNOEXCEPT;
+
+ /**
+ \brief size of sparse vector (may be different for RSC)
+ */
+- size_type effective_size() const { return size(); }
++ size_type effective_size() const BMNOEXCEPT { return size(); }
+
+ /**
+ \brief Always 1 (non-matrix type)
+ */
+- size_type effective_vector_max() const { return 1; }
++ size_type effective_vector_max() const BMNOEXCEPT { return 1; }
+
+ ///@}
+
+@@ -862,7 +867,7 @@
+ /// Set allocator pool for local (non-threaded)
+ /// memory cyclic(lots of alloc-free ops) opertations
+ ///
+- void set_allocator_pool(allocator_pool_type* pool_ptr);
++ void set_allocator_pool(allocator_pool_type* pool_ptr) BMNOEXCEPT;
+
+ protected:
+ enum octet_plains
+@@ -886,20 +891,26 @@
+ void insert_value_no_null(size_type idx, value_type v);
+
+ void resize_internal(size_type sz) { resize(sz); }
+- size_type size_internal() const { return size(); }
++ size_type size_internal() const BMNOEXCEPT { return size(); }
+
+- bool is_remap() const { return false; }
+- size_t remap_size() const { return 0; }
+- const unsigned char* get_remap_buffer() const { return 0; }
+- unsigned char* init_remap_buffer() { return 0; }
+- void set_remap() { }
++ bool is_remap() const BMNOEXCEPT { return false; }
++ size_t remap_size() const BMNOEXCEPT { return 0; }
++ const unsigned char* get_remap_buffer() const BMNOEXCEPT { return 0; }
++ unsigned char* init_remap_buffer() BMNOEXCEPT { return 0; }
++ void set_remap() BMNOEXCEPT { }
+
+ bool resolve_range(size_type from, size_type to,
+- size_type* idx_from, size_type* idx_to) const
++ size_type* idx_from, size_type* idx_to) const BMNOEXCEPT
+ {
+ *idx_from = from; *idx_to = to; return true;
+ }
+
++ /// Increment element by 1 without chnaging NULL vector or size
++ void inc_no_null(size_type idx);
++
++ /// increment by v without chnaging NULL vector or size
++ void inc_no_null(size_type idx, value_type v);
++
+ protected:
+ template<class V, class SV> friend class rsc_sparse_vector;
+ template<class SVect> friend class sparse_vector_scanner;
+@@ -933,7 +944,7 @@
+ #ifndef BM_NO_CXX11
+
+ template<class Val, class BV>
+-sparse_vector<Val, BV>::sparse_vector(sparse_vector<Val, BV>&& sv) BMNOEXEPT
++sparse_vector<Val, BV>::sparse_vector(sparse_vector<Val, BV>&& sv) BMNOEXCEPT
+ {
+ parent_type::swap(sv);
+ }
+@@ -944,13 +955,13 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class BV>
+-sparse_vector<Val, BV>::~sparse_vector() BMNOEXEPT
++sparse_vector<Val, BV>::~sparse_vector() BMNOEXCEPT
+ {}
+
+ //---------------------------------------------------------------------
+
+ template<class Val, class BV>
+-void sparse_vector<Val, BV>::swap(sparse_vector<Val, BV>& sv) BMNOEXEPT
++void sparse_vector<Val, BV>::swap(sparse_vector<Val, BV>& sv) BMNOEXCEPT
+ {
+ parent_type::swap(sv);
+ }
+@@ -1080,19 +1091,7 @@
+ size_type dec_size,
+ bool zero_mem) const
+ {
+- if (dec_size < 32)
+- {
+- return extract_range(arr, dec_size, idx_from, zero_mem);
+- }
+- return extract_plains(arr, dec_size, idx_from, zero_mem);
+- // TODO: write proper extract() based on for_each_range() and a visitor
+- /*
+- if (dec_size < 1024)
+- {
+- return extract_plains(arr, dec_size, idx_from, zero_mem);
+- }
+ return extract(arr, dec_size, idx_from, zero_mem);
+- */
+ }
+
+ //---------------------------------------------------------------------
+@@ -1373,11 +1372,10 @@
+
+ template<class Val, class BV>
+ typename sparse_vector<Val, BV>::size_type
+-sparse_vector<Val, BV>::extract(value_type* arr,
++sparse_vector<Val, BV>::extract(value_type* BMRESTRICT arr,
+ size_type size,
+ size_type offset,
+- bool zero_mem,
+- allocator_pool_type* pool_ptr) const
++ bool zero_mem) const BMNOEXCEPT2
+ {
+ /// Decoder functor
+ /// @internal
+@@ -1384,78 +1382,55 @@
+ ///
+ struct sv_decode_visitor_func
+ {
+- sv_decode_visitor_func(value_type* varr,
++ sv_decode_visitor_func(value_type* BMRESTRICT varr,
+ value_type mask,
+- size_type off)
+- : arr_(varr), mask_(mask), off_(off)
++ size_type off) BMNOEXCEPT2
++ : arr_(varr), mask_(mask), sv_off_(off)
+ {}
+-
+- void add_bits(size_type arr_offset, const unsigned char* bits, unsigned bits_size)
++
++ void add_bits(size_type bv_offset,
++ const unsigned char* bits, unsigned bits_size) BMNOEXCEPT
+ {
+- size_type idx_base = arr_offset - off_;
+- const value_type m = mask_;
+- unsigned i = 0;
+- for (; i < bits_size; ++i)
+- arr_[idx_base + bits[i]] |= m;
++ // can be negative (-1) when bv base offset = 0 and sv = 1,2..
++ size_type base = bv_offset - sv_off_;
++ value_type m = mask_;
++ for (unsigned i = 0; i < bits_size; ++i)
++ arr_[bits[i] + base] |= m;
+ }
+-
+- void add_range(size_type arr_offset, unsigned sz)
++ void add_range(size_type bv_offset, size_type sz) BMNOEXCEPT
+ {
+- size_type idx_base = arr_offset - off_;
+- const value_type m = mask_;
+- for (unsigned i = 0; i < sz; ++i)
+- arr_[i + idx_base] |= m;
++ auto base = bv_offset - sv_off_;
++ value_type m = mask_;
++ for (size_type i = 0; i < sz; ++i)
++ arr_[i + base] |= m;
+ }
+- value_type* arr_;
+- value_type mask_;
+- size_type off_;
++
++ value_type* BMRESTRICT arr_; ///< target array for reverse transpose
++ value_type mask_; ///< bit-plane mask
++ size_type sv_off_; ///< SV read offset
+ };
+
+-
+- if (size == 0)
++ if (!size)
+ return 0;
+
+ if (zero_mem)
+ ::memset(arr, 0, sizeof(value_type)*size);
+
+- size_type start = offset;
+- size_type end = start + size;
++ size_type end = offset + size;
+ if (end > this->size_)
+- {
+ end = this->size_;
+- }
+-
+- bool masked_scan = !(offset == 0 && size == this->size());
+- if (masked_scan) // use temp vector to decompress the area
++
++ sv_decode_visitor_func func(arr, 0, offset);
++
++ for (size_type i = 0; i < parent_type::value_bits(); ++i)
+ {
+- bvector_type bv_mask;
+- bv_mask.set_allocator_pool(pool_ptr);
+-
+- for (size_type i = 0; i < parent_type::value_bits(); ++i)
+- {
+- const bvector_type* bv = this->bmatr_.get_row(i);
+- if (bv)
+- {
+- bv_mask.copy_range(*bv, offset, end - 1);
+- sv_decode_visitor_func func(arr, (value_type(1) << i), offset);
+- bm::for_each_bit(bv_mask, func);
+- }
+- } // for i
+- }
+- else
+- {
+- for (size_type i = 0; i < parent_type::value_bits(); ++i)
+- {
+- const bvector_type* bv = this->bmatr_.get_row(i);
+- if (bv)
+- {
+- sv_decode_visitor_func func(arr, (value_type(1) << i), 0);
+- bm::for_each_bit(*bv, func);
+- }
+- } // for i
+- }
+-
+- return end - start;
++ const bvector_type* bv = this->bmatr_.get_row(i);
++ if (!bv)
++ continue;
++ func.mask_ = (value_type(1) << i); // set target plane OR mask
++ bm::for_each_bit_range_no_check(*bv, offset, end-1, func);
++ } // for i
++ return end - offset;
+ }
+
+ //---------------------------------------------------------------------
+@@ -1473,7 +1448,8 @@
+
+ template<class Val, class BV>
+ typename sparse_vector<Val, BV>::value_type
+-sparse_vector<Val, BV>::get(typename sparse_vector<Val, BV>::size_type i) const
++sparse_vector<Val, BV>::get(
++ typename sparse_vector<Val, BV>::size_type i) const BMNOEXCEPT
+ {
+ BM_ASSERT(i < bm::id_max);
+ BM_ASSERT(i < size());
+@@ -1485,7 +1461,7 @@
+ bool b = this->bmatr_.test_4rows(j);
+ if (b)
+ {
+- value_type vm = this->bmatr_.get_half_octet(i, j);
++ value_type vm = (value_type)this->bmatr_.get_half_octet(i, j);
+ v |= vm << j;
+ }
+ } // for j
+@@ -1692,7 +1668,17 @@
+ {
+ if (idx >= this->size_)
+ this->size_ = idx+1;
++ inc_no_null(idx);
++ bvector_type* bv_null = this->get_null_bvect();
++ if (bv_null)
++ bv_null->set_bit_no_check(idx);
++}
+
++//---------------------------------------------------------------------
++
++template<class Val, class BV>
++void sparse_vector<Val, BV>::inc_no_null(size_type idx)
++{
+ for (unsigned i = 0; i < parent_type::sv_value_plains; ++i)
+ {
+ bvector_type* bv = this->get_plain(i);
+@@ -1700,16 +1686,22 @@
+ if (!carry_over)
+ break;
+ }
+- bvector_type* bv_null = this->get_null_bvect();
+- if (bv_null)
+- bv_null->set_bit_no_check(idx);
+ }
+
+ //---------------------------------------------------------------------
+
+ template<class Val, class BV>
+-void sparse_vector<Val, BV>::clear() BMNOEXEPT
++void sparse_vector<Val, BV>::inc_no_null(size_type idx, value_type v)
+ {
++ value_type v_prev = get(idx);
++ set_value_no_null(idx, v + v_prev);
++}
++
++//---------------------------------------------------------------------
++
++template<class Val, class BV>
++void sparse_vector<Val, BV>::clear() BMNOEXCEPT
++{
+ parent_type::clear();
+ }
+
+@@ -1716,7 +1708,7 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class BV>
+-bool sparse_vector<Val, BV>::find_rank(size_type rank, size_type& pos)
++bool sparse_vector<Val, BV>::find_rank(size_type rank, size_type& pos) BMNOEXCEPT
+ {
+ BM_ASSERT(rank);
+ pos = rank - 1;
+@@ -1740,7 +1732,7 @@
+
+ template<class Val, class BV>
+ void sparse_vector<Val, BV>::calc_stat(
+- struct sparse_vector<Val, BV>::statistics* st) const
++ struct sparse_vector<Val, BV>::statistics* st) const BMNOEXCEPT
+ {
+ BM_ASSERT(st);
+ typename bvector_type::statistics stbv;
+@@ -1906,7 +1898,8 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class BV>
+-int sparse_vector<Val, BV>::compare(size_type idx, const value_type val) const
++int sparse_vector<Val, BV>::compare(size_type idx,
++ const value_type val) const BMNOEXCEPT
+ {
+ // TODO: consider bit-by-bit comparison to minimize CPU hit miss in plans get()
+ value_type sv_value = get(idx);
+@@ -1917,7 +1910,7 @@
+
+ template<class Val, class BV>
+ bool sparse_vector<Val, BV>::equal(const sparse_vector<Val, BV>& sv,
+- bm::null_support null_able) const
++ bm::null_support null_able) const BMNOEXCEPT
+ {
+ return parent_type::equal(sv, null_able);
+ }
+@@ -1926,7 +1919,7 @@
+
+ template<class Val, class BV>
+ typename sparse_vector<Val, BV>::const_iterator
+-sparse_vector<Val, BV>::begin() const
++sparse_vector<Val, BV>::begin() const BMNOEXCEPT
+ {
+ typedef typename sparse_vector<Val, BV>::const_iterator it_type;
+ return it_type(this);
+@@ -1936,7 +1929,7 @@
+
+ template<class Val, class BV>
+ void sparse_vector<Val, BV>::set_allocator_pool(
+- typename sparse_vector<Val, BV>::allocator_pool_type* pool_ptr)
++ typename sparse_vector<Val, BV>::allocator_pool_type* pool_ptr) BMNOEXCEPT
+ {
+ this->bmatr_.set_allocator_pool(pool_ptr);
+ }
+@@ -1948,7 +1941,7 @@
+
+
+ template<class Val, class BV>
+-sparse_vector<Val, BV>::const_iterator::const_iterator()
++sparse_vector<Val, BV>::const_iterator::const_iterator() BMNOEXCEPT
+ : sv_(0), pos_(bm::id_max), buf_ptr_(0)
+ {}
+
+@@ -1956,7 +1949,7 @@
+
+ template<class Val, class BV>
+ sparse_vector<Val, BV>::const_iterator::const_iterator(
+- const typename sparse_vector<Val, BV>::const_iterator& it)
++ const typename sparse_vector<Val, BV>::const_iterator& it) BMNOEXCEPT
+ : sv_(it.sv_), pos_(it.pos_), buf_ptr_(0)
+ {}
+
+@@ -1964,7 +1957,8 @@
+
+ template<class Val, class BV>
+ sparse_vector<Val, BV>::const_iterator::const_iterator(
+- const typename sparse_vector<Val, BV>::const_iterator::sparse_vector_type* sv)
++ const typename sparse_vector<Val, BV>::const_iterator::sparse_vector_type* sv
++ ) BMNOEXCEPT
+ : sv_(sv), buf_ptr_(0)
+ {
+ BM_ASSERT(sv_);
+@@ -1976,7 +1970,7 @@
+ template<class Val, class BV>
+ sparse_vector<Val, BV>::const_iterator::const_iterator(
+ const typename sparse_vector<Val, BV>::const_iterator::sparse_vector_type* sv,
+- typename sparse_vector<Val, BV>::size_type pos)
++ typename sparse_vector<Val, BV>::size_type pos) BMNOEXCEPT
+ : sv_(sv), buf_ptr_(0)
+ {
+ BM_ASSERT(sv_);
+@@ -1986,7 +1980,7 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class BV>
+-void sparse_vector<Val, BV>::const_iterator::go_to(size_type pos)
++void sparse_vector<Val, BV>::const_iterator::go_to(size_type pos) BMNOEXCEPT
+ {
+ pos_ = (!sv_ || pos >= sv_->size()) ? bm::id_max : pos;
+ buf_ptr_ = 0;
+@@ -1995,22 +1989,23 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class BV>
+-void sparse_vector<Val, BV>::const_iterator::advance()
++bool sparse_vector<Val, BV>::const_iterator::advance() BMNOEXCEPT
+ {
+ if (pos_ == bm::id_max) // nothing to do, we are at the end
+- return;
++ return false;
+ ++pos_;
+ if (pos_ >= sv_->size())
++ {
+ this->invalidate();
+- else
++ return false;
++ }
++ if (buf_ptr_)
+ {
+- if (buf_ptr_)
+- {
+- ++buf_ptr_;
+- if (buf_ptr_ - ((value_type*)buffer_.data()) >= n_buf_size)
+- buf_ptr_ = 0;
+- }
++ ++buf_ptr_;
++ if (buf_ptr_ - ((value_type*)buffer_.data()) >= n_buf_size)
++ buf_ptr_ = 0;
+ }
++ return true;
+ }
+
+ //---------------------------------------------------------------------
+@@ -2026,7 +2021,7 @@
+ {
+ buffer_.reserve(n_buf_size * sizeof(value_type));
+ buf_ptr_ = (value_type*)(buffer_.data());
+- sv_->extract(buf_ptr_, n_buf_size, pos_, true, &pool_);
++ sv_->extract(buf_ptr_, n_buf_size, pos_, true);
+ }
+ v = *buf_ptr_;
+ return v;
+@@ -2035,7 +2030,7 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class BV>
+-void sparse_vector<Val, BV>::const_iterator::skip_zero_values()
++void sparse_vector<Val, BV>::const_iterator::skip_zero_values() BMNOEXCEPT
+ {
+ value_type v = value();
+ if (buf_ptr_)
+@@ -2063,7 +2058,7 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class BV>
+-bool sparse_vector<Val, BV>::const_iterator::is_null() const
++bool sparse_vector<Val, BV>::const_iterator::is_null() const BMNOEXCEPT
+ {
+ return sv_->is_null(pos_);
+ }
+Index: c++/include/util/bitset/bmsparsevec_algo.h
+===================================================================
+--- a/c++/include/util/bitset/bmsparsevec_algo.h (revision 90103)
++++ b/c++/include/util/bitset/bmsparsevec_algo.h (revision 90104)
+@@ -18,7 +18,7 @@
+ For more information please visit: http://bitmagic.io
+ */
+ /*! \file bmsparsevec_algo.h
+- \brief Algorithms for sparse_vector<>
++ \brief Algorithms for bm::sparse_vector
+ */
+
+ #ifndef BM__H__INCLUDED__
+@@ -31,6 +31,7 @@
+ #include "bmsparsevec.h"
+ #include "bmaggregator.h"
+ #include "bmbuffer.h"
++#include "bmalgo.h"
+ #include "bmdef.h"
+
+ #ifdef _MSC_VER
+@@ -231,7 +232,7 @@
+ } // null_proc
+ }
+
+- for (unsigned i = 0; mismatch & (i < plains1); ++i)
++ for (unsigned i = 0; mismatch && (i < plains1); ++i)
+ {
+ typename SV::bvector_type_const_ptr bv1 = sv1.get_plain(i);
+ typename SV::bvector_type_const_ptr bv2 = sv2.get_plain(i);
+@@ -503,7 +504,7 @@
+ /**
+ \brief reset sparse vector binding
+ */
+- void reset_binding();
++ void reset_binding() BMNOEXCEPT;
+
+ /**
+ \brief find all sparse vector elements EQ to search value
+@@ -709,7 +710,7 @@
+ int compare_str(const SV& sv, size_type idx, const value_type* str);
+
+ /// compare sv[idx] with input value
+- int compare(const SV& sv, size_type idx, const value_type val);
++ int compare(const SV& sv, size_type idx, const value_type val) BMNOEXCEPT;
+
+ protected:
+ sparse_vector_scanner(const sparse_vector_scanner&) = delete;
+@@ -1047,7 +1048,7 @@
+ {
+ sv_ptr_->gather(&gb_->buffer_[0], &gb_->gather_idx_[0], buf_cnt, BM_SORTED_UNIFORM);
+ bv_out.set(&gb_->buffer_[0], buf_cnt, BM_SORTED);
+- buf_cnt ^= buf_cnt;
++ buf_cnt = 0;
+ }
+ nb_old = nb;
+ gb_->gather_idx_[buf_cnt++] = idx;
+@@ -1061,7 +1062,7 @@
+ {
+ sv_ptr_->gather(&gb_->buffer_[0], &gb_->gather_idx_[0], buf_cnt, BM_SORTED_UNIFORM);
+ bv_out.set(&gb_->buffer_[0], buf_cnt, bm::BM_SORTED);
+- buf_cnt ^= buf_cnt;
++ buf_cnt = 0;
+ }
+ } // for en
+ if (buf_cnt)
+@@ -1157,7 +1158,7 @@
+ //----------------------------------------------------------------------------
+
+ template<typename SV>
+-void sparse_vector_scanner<SV>::reset_binding()
++void sparse_vector_scanner<SV>::reset_binding() BMNOEXCEPT
+ {
+ bound_sv_ = 0;
+ effective_str_max_ = 0;
+@@ -2020,7 +2021,7 @@
+ template<typename SV>
+ int sparse_vector_scanner<SV>::compare(const SV& sv,
+ size_type idx,
+- const value_type val)
++ const value_type val) BMNOEXCEPT
+ {
+ // TODO: implement sentinel elements cache (similar to compare_str())
+ return sv.compare(idx, val);
+Index: c++/include/util/bitset/bmdef.h
+===================================================================
+--- a/c++/include/util/bitset/bmdef.h (revision 90103)
++++ b/c++/include/util/bitset/bmdef.h (revision 90104)
+@@ -72,10 +72,15 @@
+ // cxx11 features
+ //
+ #if defined(BM_NO_CXX11) || (defined(_MSC_VER) && _MSC_VER < 1900)
+-# define BMNOEXEPT
++# define BMNOEXCEPT
++# define BMNOEXCEPT2
+ #else
+-# ifndef BMNOEXEPT
+-# define BMNOEXEPT noexcept
++# ifndef BMNOEXCEPT
++# define BMNOEXCEPT noexcept
++#if defined(__EMSCRIPTEN__)
++#else
++# define BMNOEXCEPT2
++#endif
+ # endif
+ #endif
+
+@@ -84,19 +89,17 @@
+ // detects use of EMSCRIPTEN engine and tweaks settings
+ // WebAssemply compiles into 32-bit ptr yet 64-bit wordsize use GCC extensions
+ //
++// BMNOEXCEPT2 is to declare "noexcept" for WebAsm only where needed
++// and silence GCC warnings where not
+ #if defined(__EMSCRIPTEN__)
+ # define BM64OPT
+ # define BM_USE_GCC_BUILD
+-//# define BM_FORBID_UNALIGNED_ACCESS
++# define BMNOEXCEPT2 noexcept
++#else
++# define BMNOEXCEPT2
+ #endif
+
+-// disable 'register' keyword, which is obsolete in C++11
+-//
+-#ifndef BMREGISTER
+-# define BMREGISTER
+-#endif
+
+-
+ // Enable MSVC 8.0 (2005) specific optimization options
+ //
+ #if(_MSC_VER >= 1400)
+Index: c++/include/util/bitset/bmbuffer.h
+===================================================================
+--- a/c++/include/util/bitset/bmbuffer.h (revision 90103)
++++ b/c++/include/util/bitset/bmbuffer.h (revision 90104)
+@@ -33,35 +33,35 @@
+ class byte_buffer_ptr
+ {
+ public:
+- byte_buffer_ptr()
++ byte_buffer_ptr() BMNOEXCEPT
+ : byte_buf_(0), size_(0)
+ {}
+
+ /// construct byte buffer pointer
+ ///
+- byte_buffer_ptr(unsigned char* in_buf, size_t in_size)
++ byte_buffer_ptr(unsigned char* in_buf, size_t in_size) BMNOEXCEPT
+ : byte_buf_(in_buf), size_(in_size)
+ {}
+
+ /// Set buffer pointer
+- void set_buf(unsigned char* in_buf, size_t in_size)
++ void set_buf(unsigned char* in_buf, size_t in_size) BMNOEXCEPT
+ {
+ byte_buf_ = in_buf; size_= in_size;
+ }
+
+ /// Get buffer size
+- size_t size() const { return size_; }
++ size_t size() const BMNOEXCEPT { return size_; }
+
+ /// Get read access to buffer memory
+- const unsigned char* buf() const { return byte_buf_; }
++ const unsigned char* buf() const BMNOEXCEPT { return byte_buf_; }
+
+ /// Get write access to buffer memory
+- unsigned char* data() { return byte_buf_; }
++ unsigned char* data() BMNOEXCEPT { return byte_buf_; }
+
+- bool operator==(const byte_buffer_ptr& lhs) const { return equal(lhs); }
++ bool operator==(const byte_buffer_ptr& lhs) const BMNOEXCEPT { return equal(lhs); }
+
+ /// return true if content and size is the same
+- bool equal(const byte_buffer_ptr& lhs) const
++ bool equal(const byte_buffer_ptr& lhs) const BMNOEXCEPT
+ {
+ if (this == &lhs)
+ return true;
+@@ -89,7 +89,7 @@
+ typedef size_t size_type;
+
+ public:
+- byte_buffer() : capacity_(0), alloc_factor_(0)
++ byte_buffer() BMNOEXCEPT : capacity_(0), alloc_factor_(0)
+ {}
+
+ byte_buffer(size_t in_capacity)
+@@ -98,7 +98,7 @@
+ allocate(in_capacity);
+ }
+
+- byte_buffer(const byte_buffer& lhs)
++ byte_buffer(const byte_buffer& lhs) BMNOEXCEPT
+ {
+ byte_buf_ = 0;
+ size_ = capacity_ = alloc_factor_ = 0;
+@@ -110,7 +110,7 @@
+
+ #ifndef BM_NO_CXX11
+ /// Move constructor
+- byte_buffer(byte_buffer&& in_buf) BMNOEXEPT
++ byte_buffer(byte_buffer&& in_buf) BMNOEXCEPT
+ {
+ byte_buf_ = in_buf.byte_buf_;
+ in_buf.byte_buf_ = 0;
+@@ -121,7 +121,7 @@
+ }
+
+ /// Move assignment operator
+- byte_buffer& operator=(byte_buffer&& lhs) BMNOEXEPT
++ byte_buffer& operator=(byte_buffer&& lhs) BMNOEXCEPT
+ {
+ move_from(lhs);
+ return *this;
+@@ -128,7 +128,7 @@
+ }
+ #endif
+
+- byte_buffer& operator=(const byte_buffer& lhs)
++ byte_buffer& operator=(const byte_buffer& lhs) BMNOEXCEPT
+ {
+ if (this == &lhs)
+ return *this;
+@@ -143,7 +143,7 @@
+ }
+
+ /// swap content with another buffer
+- void swap(byte_buffer& other) BMNOEXEPT
++ void swap(byte_buffer& other) BMNOEXCEPT
+ {
+ if (this == &other)
+ return;
+@@ -157,7 +157,7 @@
+ }
+
+ /// take/move content from another buffer
+- void move_from(byte_buffer& other) BMNOEXEPT
++ void move_from(byte_buffer& other) BMNOEXCEPT
+ {
+ if (this == &other)
+ return;
+@@ -190,7 +190,7 @@
+
+
+ /// Get buffer capacity
+- size_t capacity() const { return capacity_; }
++ size_t capacity() const BMNOEXCEPT { return capacity_; }
+
+ /// adjust current size (buffer content preserved)
+ void resize(size_t new_size, bool copy_content = true)
+@@ -213,6 +213,11 @@
+ {
+ if (new_capacity <= capacity_)
+ return;
++ if (!capacity_)
++ {
++ allocate(new_capacity);
++ return;
++ }
+
+ byte_buffer tmp_buffer(new_capacity);
+ tmp_buffer = *this;
+@@ -247,7 +252,7 @@
+ }
+
+ /// return memory consumtion
+- size_t mem_usage() const
++ size_t mem_usage() const BMNOEXCEPT
+ {
+ return sizeof(capacity_) + sizeof(alloc_factor_) +
+ capacity();
+@@ -258,7 +263,7 @@
+ void set_buf(unsigned char* buf, size_t size);
+
+ /// compute number of words for the desired capacity
+- static size_t compute_words(size_t capacity)
++ static size_t compute_words(size_t capacity) BMNOEXCEPT
+ {
+ size_t words = (capacity / sizeof(bm::word_t))+1;
+ return words;
+@@ -307,10 +312,8 @@
+ typedef Val value_type;
+ typedef typename buffer_type::size_type size_type;
+
+- heap_vector()
+- : buffer_()
+- {
+- }
++ heap_vector() BMNOEXCEPT : buffer_()
++ {}
+
+ heap_vector(const heap_vector<Val, BVAlloc, trivial_type>& hv)
+ : buffer_()
+@@ -359,14 +362,14 @@
+ }
+ }
+
+- value_type* data() { return (value_type*) buffer_.data(); }
++ value_type* data() BMNOEXCEPT { return (value_type*) buffer_.data(); }
+
+- void swap(heap_vector<Val, BVAlloc, trivial_type>& other) BMNOEXEPT
++ void swap(heap_vector<Val, BVAlloc, trivial_type>& other) BMNOEXCEPT
+ {
+ buffer_.swap(other.buffer_);
+ }
+
+- const value_type& operator[](size_type pos) const
++ const value_type& operator[](size_type pos) const BMNOEXCEPT
+ {
+ BM_ASSERT(pos < size());
+ size_type v_size = value_size();
+@@ -374,7 +377,7 @@
+ return *reinterpret_cast<const value_type*>(p);
+ }
+
+- value_type& operator[](size_type pos)
++ value_type& operator[](size_type pos) BMNOEXCEPT
+ {
+ BM_ASSERT(pos < size());
+ size_type v_size = value_size();
+@@ -393,22 +396,22 @@
+ return *reinterpret_cast<value_type*>(p);
+ }
+
+- const value_type* begin() const
++ const value_type* begin() const BMNOEXCEPT
+ {
+ return (const value_type*) buffer_.buf();
+ }
+
+- size_type size() const
++ size_type size() const BMNOEXCEPT
+ {
+ return buffer_.size() / value_size();
+ }
+
+- size_type capacity() const
++ size_type capacity() const BMNOEXCEPT
+ {
+ return buffer_.capacity() / value_size();
+ }
+
+- bool empty() const
++ bool empty() const BMNOEXCEPT
+ {
+ return (buffer_.size() == 0);
+ }
+@@ -492,7 +495,7 @@
+ buffer_.resize(new_size * v_size);
+ }
+
+- static size_type value_size()
++ static size_type value_size() BMNOEXCEPT
+ {
+ size_type size_of = sizeof(value_type);
+ return size_of;
+@@ -537,13 +540,13 @@
+ row_size_in_bytes = sizeof(value_type) * COLS
+ };
+
+- static size_t rows() { return ROWS; }
+- static size_t cols() { return COLS; }
++ static size_t rows() BMNOEXCEPT { return ROWS; }
++ static size_t cols() BMNOEXCEPT { return COLS; }
+
+ /**
+ By default object is constructed NOT allocated.
+ */
+- heap_matrix()
++ heap_matrix() BMNOEXCEPT
+ : buffer_()
+ {}
+
+@@ -565,12 +568,12 @@
+ buffer_.resize(size_in_bytes);
+ }
+
+- bool is_init() const
++ bool is_init() const BMNOEXCEPT
+ {
+ return buffer_.size();
+ }
+
+- value_type get(size_type row_idx, size_type col_idx) const
++ value_type get(size_type row_idx, size_type col_idx) const BMNOEXCEPT
+ {
+ BM_ASSERT(row_idx < ROWS);
+ BM_ASSERT(col_idx < COLS);
+@@ -579,7 +582,7 @@
+ return ((const value_type*)buf)[col_idx];
+ }
+
+- const value_type* row(size_type row_idx) const
++ const value_type* row(size_type row_idx) const BMNOEXCEPT
+ {
+ BM_ASSERT(row_idx < ROWS);
+ BM_ASSERT(buffer_.size());
+@@ -587,7 +590,7 @@
+ return (const value_type*) buf;
+ }
+
+- value_type* row(size_type row_idx)
++ value_type* row(size_type row_idx) BMNOEXCEPT
+ {
+ BM_ASSERT(row_idx < ROWS);
+ BM_ASSERT(buffer_.size());
+@@ -597,7 +600,7 @@
+ }
+
+ /** memset all buffer to all zeroes */
+- void set_zero()
++ void set_zero() BMNOEXCEPT
+ {
+ ::memset(buffer_.data(), 0, size_in_bytes);
+ }
+@@ -604,7 +607,7 @@
+
+ /*! swap content
+ */
+- void swap(heap_matrix& other) BMNOEXEPT
++ void swap(heap_matrix& other) BMNOEXCEPT
+ {
+ buffer_.swap(other.buffer_);
+ }
+@@ -611,7 +614,7 @@
+
+ /*! move content from another matrix
+ */
+- void move_from(heap_matrix& other) BMNOEXEPT
++ void move_from(heap_matrix& other) BMNOEXCEPT
+ {
+ buffer_.move_from(other.buffer_);
+ }
+@@ -624,7 +627,7 @@
+ /*! remapping: vect[idx] = matrix[idx, vect[idx] ]
+ */
+ template<typename VECT_TYPE>
+- void remap(VECT_TYPE* vect, size_type size) const
++ void remap(VECT_TYPE* vect, size_type size) const BMNOEXCEPT
+ {
+ BM_ASSERT(size <= ROWS);
+ const unsigned char* buf = buffer_.buf();
+@@ -641,7 +644,7 @@
+ /*! zero-terminated remap: vect[idx] = matrix[idx, vect[idx] ]
+ */
+ template<typename VECT_TYPE>
+- void remapz(VECT_TYPE* vect) const
++ void remapz(VECT_TYPE* vect) const BMNOEXCEPT
+ {
+ const unsigned char* buf = buffer_.buf();
+ for (size_type i = 0; i < ROWS; ++i)
+@@ -704,12 +707,12 @@
+ buffer_.resize(size_in_bytes());
+ }
+
+- bool is_init() const
++ bool is_init() const BMNOEXCEPT
+ {
+ return buffer_.size();
+ }
+
+- const value_type* row(size_type row_idx) const
++ const value_type* row(size_type row_idx) const BMNOEXCEPT
+ {
+ BM_ASSERT(row_idx < rows_);
+ BM_ASSERT(buffer_.size());
+@@ -717,7 +720,7 @@
+ return (const value_type*) buf;
+ }
+
+- value_type* row(size_type row_idx)
++ value_type* row(size_type row_idx) BMNOEXCEPT
+ {
+ BM_ASSERT(row_idx < rows_);
+ BM_ASSERT(buffer_.size());
+@@ -726,8 +729,24 @@
+ return (value_type*)buf;
+ }
+
++ value_type get(size_type row_idx, size_type col_idx) BMNOEXCEPT
++ {
++ BM_ASSERT(row_idx < rows_);
++ BM_ASSERT(col_idx < cols_);
++ const value_type* r = row(row_idx);
++ return r[col_idx];
++ }
++
++ void set(size_type row_idx, size_type col_idx, value_type v) BMNOEXCEPT
++ {
++ BM_ASSERT(row_idx < rows_);
++ BM_ASSERT(col_idx < cols_);
++ value_type* r = row(row_idx);
++ r[col_idx] = v;
++ }
++
+ /** memset all buffer to all zeroes */
+- void set_zero()
++ void set_zero() BMNOEXCEPT
+ {
+ ::memset(buffer_.data(), 0, size_in_bytes());
+ }
+@@ -734,7 +753,7 @@
+
+ /*! swap content
+ */
+- void swap(dynamic_heap_matrix& other) BMNOEXEPT
++ void swap(dynamic_heap_matrix& other) BMNOEXCEPT
+ {
+ bm::xor_swap(rows_, other.rows_);
+ bm::xor_swap(cols_, other.cols_);
+@@ -743,7 +762,7 @@
+
+ /*! move content from another matrix
+ */
+- void move_from(dynamic_heap_matrix& other) BMNOEXEPT
++ void move_from(dynamic_heap_matrix& other) BMNOEXCEPT
+ {
+ rows_ = other.rows_;
+ cols_ = other.cols_;
+@@ -751,16 +770,46 @@
+ }
+
+ /** Get low-level buffer access */
+- buffer_type& get_buffer() { return buffer_; }
++ buffer_type& get_buffer() BMNOEXCEPT { return buffer_; }
+ /** Get low-level buffer access */
+- const buffer_type& get_buffer() const { return buffer_; }
++ const buffer_type& get_buffer() const BMNOEXCEPT { return buffer_; }
+
++ /**
++ copy values of the left triangle elements to the right triangle
++ (operation specific to matrices with symmetric distances)
++ */
++ void replicate_triange() BMNOEXCEPT
++ {
++ BM_ASSERT(rows_ == cols_);
++ for (size_type i = 0; i < rows_; ++i)
++ {
++ for (size_type j = i+1; j < cols_; ++j)
++ {
++ set(i, j, get(j, i));
++ }
++ }
++ }
++ /**
++ Sum of row elements
++ */
++ template<typename ACC>
++ void sum(ACC& acc, size_type row_idx) const BMNOEXCEPT
++ {
++ BM_ASSERT(row_idx < rows_);
++ ACC s = 0;
++ const value_type* r = row(row_idx);
++ for (size_type j = 0; j < cols_; ++j)
++ s += r[j];
++ acc = s;
++ }
++
+ protected:
+- size_type size_in_bytes() const
++
++ size_type size_in_bytes() const BMNOEXCEPT
+ {
+ return sizeof(value_type) * cols_ * rows_;
+ }
+- size_type row_size_in_bytes() const
++ size_type row_size_in_bytes() const BMNOEXCEPT
+ {
+ return sizeof(value_type) * cols_;
+ }
+Index: c++/include/util/bitset/bmavx2.h
+===================================================================
+--- a/c++/include/util/bitset/bmavx2.h (revision 90103)
++++ b/c++/include/util/bitset/bmavx2.h (revision 90104)
+@@ -223,6 +223,66 @@
+ }
+
+ /*!
++ @brief Calculate population count based on digest
++
++ @return popcnt
++ @ingroup AVX2
++*/
++inline
++bm::id_t avx2_bit_block_count(const bm::word_t* const block,
++ bm::id64_t digest)
++{
++ bm::id_t count = 0;
++ bm::id64_t* cnt64;
++ BM_AVX2_POPCNT_PROLOG;
++ __m256i cnt = _mm256_setzero_si256();
++ while (digest)
++ {
++ bm::id64_t t = bm::bmi_blsi_u64(digest); // d & -d;
++
++ unsigned wave = _mm_popcnt_u64(t - 1);
++ unsigned off = wave * bm::set_block_digest_wave_size;
++
++ const __m256i* BMRESTRICT wave_src = (__m256i*)&block[off];
++
++ __m256i m1A, m1B, m1C, m1D;
++ m1A = _mm256_load_si256(wave_src);
++ m1B = _mm256_load_si256(wave_src+1);
++ if (!_mm256_testz_si256(m1A, m1A))
++ {
++ BM_AVX2_BIT_COUNT(bc, m1A)
++ cnt = _mm256_add_epi64(cnt, bc);
++ }
++ if (!_mm256_testz_si256(m1B, m1B))
++ {
++ BM_AVX2_BIT_COUNT(bc, m1B)
++ cnt = _mm256_add_epi64(cnt, bc);
++ }
++
++ m1C = _mm256_load_si256(wave_src+2);
++ m1D = _mm256_load_si256(wave_src+3);
++ if (!_mm256_testz_si256(m1C, m1C))
++ {
++ BM_AVX2_BIT_COUNT(bc, m1C)
++ cnt = _mm256_add_epi64(cnt, bc);
++ }
++ if (!_mm256_testz_si256(m1D, m1D))
++ {
++ BM_AVX2_BIT_COUNT(bc, m1D)
++ cnt = _mm256_add_epi64(cnt, bc);
++ }
++
++ digest = bm::bmi_bslr_u64(digest); // d &= d - 1;
++ } // while
++ cnt64 = (bm::id64_t*)&cnt;
++ count = (unsigned)(cnt64[0] + cnt64[1] + cnt64[2] + cnt64[3]);
++ return count;
++
++}
++
++
++
++/*!
+ @brief AND bit count for two aligned bit-blocks
+ @ingroup AVX2
+ */
+@@ -1368,6 +1428,20 @@
+ }
+
+ /*!
++ @brief check if wave of pointers is all 0xFFF
++ @ingroup AVX2
++*/
++BMFORCEINLINE
++bool avx2_test_all_one_wave(const void* ptr)
++{
++ __m256i maskF = _mm256_set1_epi32(~0u); // braodcast 0xFF
++ __m256i wcmpA = _mm256_cmpeq_epi8(_mm256_loadu_si256((__m256i*)ptr), maskF); // (w0 == maskF)
++ unsigned maskA = unsigned(_mm256_movemask_epi8(wcmpA));
++ return (maskA == ~0u);
++}
++
++
++/*!
+ @brief check if wave of pointers is all NULL
+ @ingroup AVX2
+ */
+@@ -2474,16 +2548,24 @@
+ }
+
+ /**
+- hybrid binary search, starts as binary, then switches to scan
+-
++ Hybrid binary search, starts as binary, then switches to scan
++
+ NOTE: AVX code uses _mm256_subs_epu16 - saturated substraction
+ which gives 0 if A-B=0 if A < B (not negative a value).
+-
++
++ \param buf - GAP buffer pointer.
++ \param pos - index of the element.
++ \param is_set - output. GAP value (0 or 1).
++ \return GAP index.
++
+ @ingroup AVX2
+ */
+ inline
+-unsigned avx2_gap_test(const unsigned short* buf, unsigned pos)
++unsigned avx2_gap_bfind(const unsigned short* BMRESTRICT buf,
++ unsigned pos, unsigned* BMRESTRICT is_set)
+ {
++ BM_ASSERT(is_set);
++
+ const unsigned linear_cutoff = 48;
+ const unsigned unroll_factor = 16;
+
+@@ -2500,8 +2582,9 @@
+ {
+ if (buf[start] >= pos)
+ {
+- res = ((*buf) & 1) ^ ((--start) & 1);
+- return res;
++ res = ((*buf) & 1) ^ ((start-1) & 1);
++ *is_set = res;
++ return start;
+ }
+ } // for
+ BM_ASSERT(0);
+@@ -2516,7 +2599,7 @@
+ // but stay within allocated block memory
+ //
+ dsize = arr_end - start;
+-
++
+ __m256i mZ = _mm256_setzero_si256();
+ __m256i mPos = _mm256_set1_epi16((unsigned short)pos);
+ __m256i vect16, mSub, mge_mask;
+@@ -2532,8 +2615,9 @@
+ {
+ int lz = _tzcnt_u32(mask) / 2;
+ start += lz;
+- res = ((*buf) & 1) ^ ((--start) & 1);
+- return res;
++ res = ((*buf) & 1) ^ ((start-1) & 1);
++ *is_set = res;
++ return start;
+ }
+ } // for k
+ unsigned tail = unroll_factor - (end - start);
+@@ -2544,22 +2628,19 @@
+ mSub = _mm256_subs_epu16(mPos, vect16);
+ mge_mask = _mm256_cmpeq_epi16(mSub, mZ);
+ int mask = _mm256_movemask_epi8(mge_mask);
+- BM_ASSERT(mask);
+- // TODO: if should be not needed, cleanup
+- if (mask)
+- {
+- int lz = _tzcnt_u32(mask) / 2;
+- start += lz;
+- res = ((*buf) & 1) ^ ((--start) & 1);
+- return res;
+- }
+- start += unroll_factor; // remove with if when sure
++ BM_ASSERT(mask); // the rersult MUST be here at this point
++
++ int lz = _tzcnt_u32(mask) / 2;
++ start += lz;
++ res = ((*buf) & 1) ^ ((start-1) & 1);
++ *is_set = res;
++ return start;
+ }
+ for (; start < end; ++start)
+ {
+ if (buf[start] >= pos)
+ break;
+- }
++ } // for
+ break;
+ }
+ unsigned curr = (start + end) >> 1;
+@@ -2568,11 +2649,25 @@
+ else
+ end = curr;
+ } // while
+- res = ((*buf) & 1) ^ ((--start) & 1);
+- return res;
++ res = ((*buf) & 1) ^ ((start-1) & 1);
++ *is_set = res;
++ return start;
+ }
+
++
+ /**
++ Hybrid binary search, starts as binary, then switches to scan
++ @ingroup AVX2
++*/
++inline
++unsigned avx2_gap_test(const unsigned short* BMRESTRICT buf, unsigned pos)
++{
++ unsigned is_set;
++ bm::avx2_gap_bfind(buf, pos, &is_set);
++ return is_set;
++}
++
++/**
+ lower bound (great or equal) linear scan in ascending order sorted array
+ @ingroup AVX2
+ \internal
+@@ -3024,6 +3119,13 @@
+ #define VECT_BIT_BLOCK_XOR(t, src, src_xor, d) \
+ avx2_bit_block_xor(t, src, src_xor, d)
+
++#define VECT_GAP_BFIND(buf, pos, is_set) \
++ avx2_gap_bfind(buf, pos, is_set)
++
++#define VECT_BIT_COUNT_DIGEST(blk, d) \
++ avx2_bit_block_count(blk, d)
++
++
+ } // namespace
+
+
+Index: c++/include/util/bitset/bmrandom.h
+===================================================================
+--- a/c++/include/util/bitset/bmrandom.h (revision 90103)
++++ b/c++/include/util/bitset/bmrandom.h (revision 90104)
+@@ -97,7 +97,7 @@
+ unsigned process_word(bm::word_t* blk_out,
+ const bm::word_t* blk_src,
+ unsigned nword,
+- unsigned take_count);
++ unsigned take_count) BMNOEXCEPT;
+
+ static
+ void get_random_array(bm::word_t* blk_out,
+@@ -106,7 +106,7 @@
+ unsigned count);
+ static
+ unsigned compute_take_count(unsigned bc,
+- size_type in_count, size_type sample_count);
++ size_type in_count, size_type sample_count) BMNOEXCEPT;
+
+
+ private:
+@@ -357,9 +357,10 @@
+ }
+
+ template<class BV>
+-unsigned random_subset<BV>::compute_take_count(unsigned bc,
+- size_type in_count,
+- size_type sample_count)
++unsigned random_subset<BV>::compute_take_count(
++ unsigned bc,
++ size_type in_count,
++ size_type sample_count) BMNOEXCEPT
+ {
+ float block_percent = float(bc) / float(in_count);
+ float bits_to_take = float(sample_count) * block_percent;
+@@ -404,7 +405,7 @@
+ }
+ // now transform vacant bits to array, then pick random elements
+ //
+- unsigned arr_len = bit_convert_to_arr(bit_list_,
++ unsigned arr_len = bm::bit_convert_to_arr(bit_list_,
+ sub_block_,
+ bm::gap_max_bits,
+ bm::gap_max_bits,
+@@ -418,7 +419,7 @@
+ unsigned random_subset<BV>::process_word(bm::word_t* blk_out,
+ const bm::word_t* blk_src,
+ unsigned nword,
+- unsigned take_count)
++ unsigned take_count) BMNOEXCEPT
+ {
+ unsigned new_bits, mask;
+ do
+Index: c++/include/util/bitset/bmtimer.h
+===================================================================
+--- a/c++/include/util/bitset/bmtimer.h (revision 90103)
++++ b/c++/include/util/bitset/bmtimer.h (revision 90104)
+@@ -46,7 +46,7 @@
+ std::chrono::duration<double, std::milli> duration;
+ unsigned repeats;
+
+- statistics() : repeats(1) {}
++ statistics() : duration(0), repeats(1) {}
+
+ statistics(std::chrono::duration<double, std::milli> d, unsigned r)
+ : duration(d), repeats(r)
+@@ -147,7 +147,13 @@
+ if (ms > 1000)
+ {
+ double sec = ms / 1000;
+- std::cout << it->first << "; " << std::setprecision(4) << sec << " sec" << std::endl;
++ if (sec > 60)
++ {
++ double min = sec / 60;
++ std::cout << it->first << "; " << std::setprecision(4) << min << " min" << std::endl;
++ }
++ else
++ std::cout << it->first << "; " << std::setprecision(4) << sec << " sec" << std::endl;
+ }
+ else
+ std::cout << it->first << "; " << it->second.duration.count() << " ms" << std::endl;
+Index: c++/include/util/bitset/bmalgo.h
+===================================================================
+--- a/c++/include/util/bitset/bmalgo.h (revision 90103)
++++ b/c++/include/util/bitset/bmalgo.h (revision 90104)
+@@ -46,7 +46,7 @@
+ \ingroup setalgo
+ */
+ template<class BV>
+-typename BV::size_type count_and(const BV& bv1, const BV& bv2)
++typename BV::size_type count_and(const BV& bv1, const BV& bv2) BMNOEXCEPT
+ {
+ return bm::distance_and_operation(bv1, bv2);
+ }
+@@ -59,7 +59,7 @@
+ \ingroup setalgo
+ */
+ template<class BV>
+-typename BV::size_type any_and(const BV& bv1, const BV& bv2)
++typename BV::size_type any_and(const BV& bv1, const BV& bv2) BMNOEXCEPT
+ {
+ distance_metric_descriptor dmd(bm::COUNT_AND);
+
+@@ -78,7 +78,7 @@
+ */
+ template<class BV>
+ bm::distance_metric_descriptor::size_type
+-count_xor(const BV& bv1, const BV& bv2)
++count_xor(const BV& bv1, const BV& bv2) BMNOEXCEPT
+ {
+ distance_metric_descriptor dmd(bm::COUNT_XOR);
+
+@@ -94,7 +94,7 @@
+ \ingroup setalgo
+ */
+ template<class BV>
+-typename BV::size_type any_xor(const BV& bv1, const BV& bv2)
++typename BV::size_type any_xor(const BV& bv1, const BV& bv2) BMNOEXCEPT
+ {
+ distance_metric_descriptor dmd(bm::COUNT_XOR);
+
+@@ -112,7 +112,7 @@
+ \ingroup setalgo
+ */
+ template<class BV>
+-typename BV::size_type count_sub(const BV& bv1, const BV& bv2)
++typename BV::size_type count_sub(const BV& bv1, const BV& bv2) BMNOEXCEPT
+ {
+ distance_metric_descriptor dmd(bm::COUNT_SUB_AB);
+
+@@ -129,7 +129,7 @@
+ \ingroup setalgo
+ */
+ template<class BV>
+-typename BV::size_type any_sub(const BV& bv1, const BV& bv2)
++typename BV::size_type any_sub(const BV& bv1, const BV& bv2) BMNOEXCEPT
+ {
+ distance_metric_descriptor dmd(bm::COUNT_SUB_AB);
+
+@@ -146,7 +146,7 @@
+ \ingroup setalgo
+ */
+ template<class BV>
+-typename BV::size_type count_or(const BV& bv1, const BV& bv2)
++typename BV::size_type count_or(const BV& bv1, const BV& bv2) BMNOEXCEPT
+ {
+ distance_metric_descriptor dmd(bm::COUNT_OR);
+
+@@ -162,7 +162,7 @@
+ \ingroup setalgo
+ */
+ template<class BV>
+-typename BV::size_type any_or(const BV& bv1, const BV& bv2)
++typename BV::size_type any_or(const BV& bv1, const BV& bv2) BMNOEXCEPT
+ {
+ distance_metric_descriptor dmd(bm::COUNT_OR);
+
+@@ -173,18 +173,18 @@
+
+
+ #define BM_SCANNER_OP(x) \
+- if (0 != (block = blk_blk[j+x])) \
++if (0 != (block = blk_blk[j+x])) \
++{ \
++ if (BM_IS_GAP(block)) \
+ { \
+- if (BM_IS_GAP(block)) \
+- { \
+- bm::for_each_gap_blk(BMGAP_PTR(block), (r+j+x)*bm::bits_in_block,\
+- bit_functor); \
+- } \
+- else \
+- { \
+- bm::for_each_bit_blk(block, (r+j+x)*bm::bits_in_block,bit_functor); \
+- } \
+- }
++ bm::for_each_gap_blk(BMGAP_PTR(block), (r+j+x)*bm::bits_in_block,\
++ bit_functor); \
++ } \
++ else \
++ { \
++ bm::for_each_bit_blk(block, (r+j+x)*bm::bits_in_block,bit_functor); \
++ } \
++}
+
+
+ /**
+@@ -191,9 +191,10 @@
+ @brief bit-vector visitor scanner to traverse each 1 bit using C++ visitor
+
+ @param bv - bit vector to scan
+- @param bit_functor (should support add_bits() and add_range() methods
++ @param bit_functor - visitor: should support add_bits(), add_range()
+
+ \ingroup setalgo
++ @sa for_each_bit_range visit_each_bit
+ */
+ template<class BV, class Func>
+ void for_each_bit(const BV& bv,
+@@ -248,10 +249,100 @@
+ } // for i
+ }
+
++/**
++ @brief bit-vector range visitor to traverse each 1 bit
++
++ @param bv - bit vector to scan
++ @param right - start of closed interval [from..to]
++ @param left - end of close interval [from..to]
++ @param bit_functor - visitor: should support add_bits(), add_range()
++
++ \ingroup setalgo
++ @sa for_each_bit
++*/
++template<class BV, class Func>
++void for_each_bit_range(const BV& bv,
++ typename BV::size_type left,
++ typename BV::size_type right,
++ Func& bit_functor)
++{
++ if (left > right)
++ bm::xor_swap(left, right);
++ if (right == bm::id_max)
++ --right;
++ BM_ASSERT(left < bm::id_max && right < bm::id_max);
++
++ bm::for_each_bit_range_no_check(bv, left, right, bit_functor);
++}
++
++
+ #undef BM_SCANNER_OP
+
++
++/// private adaptor for C-style callbacks
++///
++/// @internal
++///
++template <class VCBT, class size_type>
++struct bit_vitor_callback_adaptor
++{
++ typedef VCBT bit_visitor_callback_type;
++
++ bit_vitor_callback_adaptor(void* h, bit_visitor_callback_type cb_func)
++ : handle_(h), func_(cb_func)
++ {}
++
++ void add_bits(size_type offset, const unsigned char* bits, unsigned size)
++ {
++ for (unsigned i = 0; i < size; ++i)
++ func_(handle_, offset + bits[i]);
++ }
++ void add_range(size_type offset, size_type size)
++ {
++ for (size_type i = 0; i < size; ++i)
++ func_(handle_, offset + i);
++ }
++
++ void* handle_;
++ bit_visitor_callback_type func_;
++};
++
++
++/// Functor for bit-copy (for testing)
++///
++/// @internal
++///
++template <class BV>
++struct bit_vistor_copy_functor
++{
++ typedef typename BV::size_type size_type;
++
++ bit_vistor_copy_functor(BV& bv)
++ : bv_(bv)
++ {
++ bv_.init();
++ }
++
++ void add_bits(size_type offset, const unsigned char* bits, unsigned size)
++ {
++ BM_ASSERT(size);
++ for (unsigned i = 0; i < size; ++i)
++ bv_.set_bit_no_check(offset + bits[i]);
++ }
++ void add_range(size_type offset, size_type size)
++ {
++ BM_ASSERT(size);
++ bv_.set_range(offset, offset + size - 1);
++ }
++
++ BV& bv_;
++ bit_visitor_callback_type func_;
++};
++
++
++
+ /**
+- @brief bit-vector visitor scanner to traverse each 1 bit using C callback
++ @brief bvector visitor scanner to traverse each 1 bit using C callback
+
+ @param bv - bit vector to scan
+ @param handle_ptr - handle to private memory used by callback
+@@ -267,33 +358,101 @@
+ bit_visitor_callback_type callback_ptr)
+ {
+ typedef typename BV::size_type size_type;
+- // private adaptor for C-style callbacks
+- struct callback_adaptor
++ bm::bit_vitor_callback_adaptor<bit_visitor_callback_type, size_type>
++ func(handle_ptr, callback_ptr);
++ bm::for_each_bit(bv, func);
++}
++
++/**
++ @brief bvector visitor scanner to traverse each bits in range (C callback)
++
++ @param bv - bit vector to scan
++ @param left - from [left..right]
++ @param right - to [left..right]
++ @param handle_ptr - handle to private memory used by callback
++ @param callback_ptr - callback function
++
++ \ingroup setalgo
++
++ @sa bit_visitor_callback_type for_each_bit
++*/
++template<class BV>
++void visit_each_bit_range(const BV& bv,
++ typename BV::size_type left,
++ typename BV::size_type right,
++ void* handle_ptr,
++ bit_visitor_callback_type callback_ptr)
++{
++ typedef typename BV::size_type size_type;
++ bm::bit_vitor_callback_adaptor<bit_visitor_callback_type, size_type>
++ func(handle_ptr, callback_ptr);
++ bm::for_each_bit_range(bv, left, right, func);
++}
++
++/**
++ @brief Algorithm to identify bit-vector ranges (splits) for the rank
++
++ Rank range split algorithm walks the bit-vector to create list of
++ non-overlapping ranges [s1..e1],[s2..e2]...[sN...eN] with requested
++ (rank) number of 1 bits. All ranges should be the same popcount weight,
++ except the last one, which may have less.
++ Scan is progressing from left to right so result ranges will be
++ naturally sorted.
++
++ @param bv - bit vector to perform the range split scan
++ @param rank - requested number of bits in each range
++ if 0 it will create single range [first..last]
++ to cover the whole bv
++ @param target_v - [out] STL(or STL-like) vector of pairs to keep pairs results
++
++ \ingroup setalgo
++ */
++template<typename BV, typename PairVect>
++void rank_range_split(const BV& bv,
++ typename BV::size_type rank,
++ PairVect& target_v)
++{
++ target_v.resize(0);
++ typename BV::size_type first, last, pos;
++ bool found = bv.find_range(first, last);
++ if (!found) // empty bit-vector
++ return;
++
++ if (!rank) // if rank is not defined, include the whole vector [first..last]
+ {
+- callback_adaptor(void* h, bit_visitor_callback_type cb_func)
+- : handle_(h), func_(cb_func)
+- {}
+-
+- void add_bits(size_type offset, const unsigned char* bits, unsigned size)
++ typename PairVect::value_type pv;
++ pv.first = first; pv.second = last;
++ target_v.push_back(pv);
++ return;
++ }
++
++ while (1)
++ {
++ typename PairVect::value_type pv;
++ found = bv.find_rank(rank, first, pos);
++ if (found)
+ {
+- for (unsigned i = 0; i < size; ++i)
+- func_(handle_, offset + bits[i]);
++ pv.first = first; pv.second = pos;
++ target_v.push_back(pv);
++ if (pos >= last)
++ break;
++ first = pos + 1;
++ continue;
+ }
+- void add_range(size_type offset, unsigned size)
++ // insufficient rank (last range)
++ found = bv.any_range(first, last);
++ if (found)
+ {
+- for (unsigned i = 0; i < size; ++i)
+- func_(handle_, offset + i);
++ pv.first = first; pv.second = last;
++ target_v.push_back(pv);
+ }
+-
+- void* handle_;
+- bit_visitor_callback_type func_;
+- };
+-
+- callback_adaptor func(handle_ptr, callback_ptr);
+- bm::for_each_bit(bv, func);
++ break;
++ } // while
++
+ }
+
+
++
+ /**
+ Algorithms for rank compression of bit-vector
+
+@@ -562,6 +721,7 @@
+
+
+
++
+ } // bm
+
+ #include "bmundef.h"
+Index: c++/include/util/bitset/bmrs.h
+===================================================================
+--- a/c++/include/util/bitset/bmrs.h (revision 90103)
++++ b/c++/include/util/bitset/bmrs.h (revision 90104)
+@@ -59,7 +59,7 @@
+ rs_index(const rs_index& rsi);
+
+ /// init arrays to zeros
+- void init() BMNOEXEPT;
++ void init() BMNOEXCEPT;
+
+ /// copy rs index
+ void copy_from(const rs_index& rsi);
+@@ -165,7 +165,7 @@
+
+
+ template<typename BVAlloc>
+-void rs_index<BVAlloc>::init() BMNOEXEPT
++void rs_index<BVAlloc>::init() BMNOEXCEPT
+ {
+ sblock_count_.resize(0);
+ sblock_row_idx_.resize(0);
+Index: c++/include/util/bitset/bmconst.h
+===================================================================
+--- a/c++/include/util/bitset/bmconst.h (revision 90103)
++++ b/c++/include/util/bitset/bmconst.h (revision 90104)
+@@ -96,14 +96,14 @@
+ const unsigned set_array_mask = 0xFFu;
+
+ const unsigned set_total_blocks32 = (bm::set_array_size32 * bm::set_array_size32);
++const unsigned set_sub_total_bits = bm::set_sub_array_size * bm::gap_max_bits;
+
+ #ifdef BM64ADDR
+ const unsigned set_total_blocks48 = bm::id_max48 / bm::gap_max_bits;
+ const unsigned long long id_max = bm::id_max48;
+-const unsigned long long set_array_size48 = 1 + (bm::id_max48 / (bm::set_sub_array_size * bm::gap_max_bits));
++const unsigned long long set_array_size48 = 1 + (bm::id_max48 / set_sub_total_bits);
+ const unsigned set_top_array_size = bm::set_array_size48;
+ const id64_t set_total_blocks = id64_t(bm::set_top_array_size) * set_sub_array_size;
+-//bm::id_max / (bm::gap_max_bits * bm::set_sub_array_size);
+ #else
+ const unsigned id_max = bm::id_max32;
+ const unsigned set_top_array_size = bm::set_array_size32;
+@@ -228,8 +228,8 @@
+ };
+
+ template<bool T> const char _copyright<T>::_p[] =
+- "BitMagic C++ Library. v.6.0.0 (c) 2002-2020 Anatoliy Kuznetsov.";
+-template<bool T> const unsigned _copyright<T>::_v[3] = {6, 0, 0};
++ "BitMagic C++ Library. v.6.4.0 (c) 2002-2020 Anatoliy Kuznetsov.";
++template<bool T> const unsigned _copyright<T>::_v[3] = {6, 4, 0};
+
+
+
+Index: c++/include/util/bitset/bmsse_util.h
+===================================================================
+--- a/c++/include/util/bitset/bmsse_util.h (revision 90103)
++++ b/c++/include/util/bitset/bmsse_util.h (revision 90104)
+@@ -823,9 +823,6 @@
+ inline
+ void sse2_invert_block(__m128i* dst)
+ {
+- //__m128i mZ = _mm_setzero_si128();
+- //__m128i maskF = _mm_cmpeq_epi8(mZ, mZ); // 0xFF..
+-
+ __m128i maskF = _mm_set1_epi32(~0u);
+ __m128i* BMRESTRICT dst_end =
+ (__m128i*)((bm::word_t*)(dst) + bm::set_block_size);
+Index: c++/include/util/bitset/bmsparsevec_util.h
+===================================================================
+--- a/c++/include/util/bitset/bmsparsevec_util.h (revision 90103)
++++ b/c++/include/util/bitset/bmsparsevec_util.h (revision 90104)
+@@ -70,7 +70,7 @@
+ /*!
+ \brief Move content from the argument address resolver
+ */
+- void move_from(bvps_addr_resolver& addr_res) BMNOEXEPT;
++ void move_from(bvps_addr_resolver& addr_res) BMNOEXCEPT;
+
+ /*!
+ \brief Resolve id to integer id (address)
+@@ -82,7 +82,7 @@
+
+ \return true if id is known and resolved successfully
+ */
+- bool resolve(size_type id_from, size_type* id_to) const;
++ bool resolve(size_type id_from, size_type* id_to) const BMNOEXCEPT;
+
+ /*!
+ \brief Resolve id to integer id (address) without sync check
+@@ -94,7 +94,7 @@
+
+ \return true if id is known and resolved successfully
+ */
+- bool get(size_type id_from, size_type* id_to) const;
++ bool get(size_type id_from, size_type* id_to) const BMNOEXCEPT;
+
+ /*!
+ \brief Set id (bit) to address resolver
+@@ -146,7 +146,7 @@
+ /*!
+ \brief equality comparison
+ */
+- bool equal(const bvps_addr_resolver& addr_res) const;
++ bool equal(const bvps_addr_resolver& addr_res) const BMNOEXCEPT;
+
+ protected:
+ void construct_rs_index();
+@@ -437,7 +437,7 @@
+
+
+ template<class BV>
+-void bvps_addr_resolver<BV>::move_from(bvps_addr_resolver& addr_res) BMNOEXEPT
++void bvps_addr_resolver<BV>::move_from(bvps_addr_resolver& addr_res) BMNOEXCEPT
+ {
+ if (this != &addr_res)
+ {
+@@ -459,7 +459,8 @@
+ //---------------------------------------------------------------------
+
+ template<class BV>
+-bool bvps_addr_resolver<BV>::resolve(size_type id_from, size_type* id_to) const
++bool bvps_addr_resolver<BV>::resolve(size_type id_from,
++ size_type* id_to) const BMNOEXCEPT
+ {
+ BM_ASSERT(id_to);
+ if (in_sync_)
+@@ -484,7 +485,8 @@
+ //---------------------------------------------------------------------
+
+ template<class BV>
+-bool bvps_addr_resolver<BV>::get(size_type id_from, size_type* id_to) const
++bool bvps_addr_resolver<BV>::get(size_type id_from,
++ size_type* id_to) const BMNOEXCEPT
+ {
+ BM_ASSERT(id_to);
+ BM_ASSERT(in_sync_);
+@@ -529,10 +531,10 @@
+ //---------------------------------------------------------------------
+
+ template<class BV>
+-bool bvps_addr_resolver<BV>::equal(const bvps_addr_resolver& addr_res) const
++bool bvps_addr_resolver<BV>::equal(
++ const bvps_addr_resolver& addr_res) const BMNOEXCEPT
+ {
+- int cmp = addr_bv_.compare(addr_res.addr_bv_);
+- return (cmp == 0);
++ return addr_bv_.equal(addr_res.addr_bv_);
+ }
+
+ //---------------------------------------------------------------------
+Index: c++/include/util/bitset/bmalloc.h
+===================================================================
+--- a/c++/include/util/bitset/bmalloc.h (revision 90103)
++++ b/c++/include/util/bitset/bmalloc.h (revision 90104)
+@@ -73,13 +73,10 @@
+ ptr = (bm::word_t*) ::_mm_malloc(n * sizeof(bm::word_t), BM_ALLOC_ALIGN);
+ #endif
+ #else
+- ptr = (bm::word_t*) ::malloc(n * sizeof(bm::word_t));
++ ptr = (bm::word_t*) ::malloc(n * sizeof(bm::word_t));
+ #endif
+-
+ if (!ptr)
+- {
+ throw std::bad_alloc();
+- }
+ return ptr;
+ }
+
+@@ -87,7 +84,7 @@
+ The member function frees storage for an array of n bm::word_t
+ elements, by calling free.
+ */
+- static void deallocate(bm::word_t* p, size_t)
++ static void deallocate(bm::word_t* p, size_t) BMNOEXCEPT
+ {
+ #ifdef BM_ALLOC_ALIGN
+ # ifdef _MSC_VER
+@@ -120,9 +117,7 @@
+ {
+ void* ptr = ::malloc(n * sizeof(void*));
+ if (!ptr)
+- {
+ throw std::bad_alloc();
+- }
+ return ptr;
+ }
+
+@@ -130,7 +125,7 @@
+ The member function frees storage for an array of n bm::word_t
+ elements, by calling free.
+ */
+- static void deallocate(void* p, size_t)
++ static void deallocate(void* p, size_t) BMNOEXCEPT
+ {
+ ::free(p);
+ }
+@@ -147,7 +142,7 @@
+ n_pool_max_size = BM_DEFAULT_POOL_SIZE
+ };
+
+- pointer_pool_array() : size_(0)
++ pointer_pool_array() : pool_ptr_(0), size_(0)
+ {
+ allocate_pool(n_pool_max_size);
+ }
+@@ -164,7 +159,7 @@
+ /// Push pointer to the pool (if it is not full)
+ ///
+ /// @return 0 if pointer is not accepted (pool is full)
+- unsigned push(void* ptr)
++ unsigned push(void* ptr) BMNOEXCEPT
+ {
+ if (size_ == n_pool_max_size - 1)
+ return 0;
+@@ -174,9 +169,9 @@
+
+ /// Get a pointer if there are any vacant
+ ///
+- void* pop()
++ void* pop() BMNOEXCEPT
+ {
+- if (size_ == 0)
++ if (!size_)
+ return 0;
+ return pool_ptr_[--size_];
+ }
+@@ -183,12 +178,13 @@
+ private:
+ void allocate_pool(size_t pool_size)
+ {
++ BM_ASSERT(!pool_ptr_);
+ pool_ptr_ = (void**)::malloc(sizeof(void*) * pool_size);
+ if (!pool_ptr_)
+ throw std::bad_alloc();
+ }
+
+- void free_pool()
++ void free_pool() BMNOEXCEPT
+ {
+ ::free(pool_ptr_);
+ }
+@@ -218,21 +214,19 @@
+ bm::word_t* alloc_bit_block()
+ {
+ bm::word_t* ptr = (bm::word_t*)block_pool_.pop();
+- if (ptr == 0)
++ if (!ptr)
+ ptr = block_alloc_.allocate(bm::set_block_size, 0);
+ return ptr;
+ }
+
+- void free_bit_block(bm::word_t* block)
++ void free_bit_block(bm::word_t* block) BMNOEXCEPT
+ {
+ BM_ASSERT(IS_VALID_ADDR(block));
+ if (!block_pool_.push(block))
+- {
+ block_alloc_.deallocate(block, bm::set_block_size);
+- }
+ }
+
+- void free_pools()
++ void free_pools() BMNOEXCEPT
+ {
+ bm::word_t* block;
+ do
+@@ -267,19 +261,19 @@
+
+ public:
+
+- mem_alloc(const BA& block_alloc = BA(), const PA& ptr_alloc = PA())
++ mem_alloc(const BA& block_alloc = BA(), const PA& ptr_alloc = PA()) BMNOEXCEPT
+ : block_alloc_(block_alloc),
+ ptr_alloc_(ptr_alloc),
+ alloc_pool_p_(0)
+ {}
+
+- mem_alloc(const mem_alloc& ma)
++ mem_alloc(const mem_alloc& ma) BMNOEXCEPT
+ : block_alloc_(ma.block_alloc_),
+ ptr_alloc_(ma.ptr_alloc_),
+ alloc_pool_p_(0) // do not inherit pool (has to be explicitly defined)
+ {}
+
+- mem_alloc& operator=(const mem_alloc& ma)
++ mem_alloc& operator=(const mem_alloc& ma) BMNOEXCEPT
+ {
+ block_alloc_ = ma.block_alloc_;
+ ptr_alloc_ = ma.ptr_alloc_;
+@@ -289,7 +283,7 @@
+
+ /*! @brief Returns copy of the block allocator object
+ */
+- block_allocator_type get_block_allocator() const
++ block_allocator_type get_block_allocator() const BMNOEXCEPT
+ {
+ return BA(block_alloc_);
+ }
+@@ -296,19 +290,19 @@
+
+ /*! @brief Returns copy of the ptr allocator object
+ */
+- ptr_allocator_type get_ptr_allocator() const
++ ptr_allocator_type get_ptr_allocator() const BMNOEXCEPT
+ {
+ return PA(block_alloc_);
+ }
+
+ /*! @brief set pointer to external pool */
+- void set_pool(allocator_pool_type* pool)
++ void set_pool(allocator_pool_type* pool) BMNOEXCEPT
+ {
+ alloc_pool_p_ = pool;
+ }
+
+ /*! @brief get pointer to allocation pool (if set) */
+- allocator_pool_type* get_pool()
++ allocator_pool_type* get_pool() BMNOEXCEPT
+ {
+ return alloc_pool_p_;
+ }
+@@ -328,7 +322,7 @@
+
+ /*! @brief Frees bit block allocated by alloc_bit_block.
+ */
+- void free_bit_block(bm::word_t* block, unsigned alloc_factor = 1)
++ void free_bit_block(bm::word_t* block, unsigned alloc_factor = 1) BMNOEXCEPT
+ {
+ BM_ASSERT(IS_VALID_ADDR(block));
+ if (alloc_pool_p_ && alloc_factor == 1)
+@@ -377,7 +371,7 @@
+
+ /*! @brief Frees block of pointers.
+ */
+- void free_ptr(void* p, size_t size)
++ void free_ptr(void* p, size_t size) BMNOEXCEPT
+ {
+ if (p)
+ ptr_alloc_.deallocate(p, size);
+@@ -427,7 +421,7 @@
+ ///
+ /// @internal
+ inline
+-void aligned_free(void* ptr)
++void aligned_free(void* ptr) BMNOEXCEPT
+ {
+ if (!ptr)
+ return;
+Index: c++/include/util/bitset/bmsparsevec_serial.h
+===================================================================
+--- a/c++/include/util/bitset/bmsparsevec_serial.h (revision 90103)
++++ b/c++/include/util/bitset/bmsparsevec_serial.h (revision 90104)
+@@ -927,7 +927,8 @@
+
+ BM_ASSERT(h1 == 'B' && (h2 == 'M' || h2 == 'C'));
+
+- if (h1 != 'B' && (h2 != 'M' || h2 != 'C')) // no magic header?
++ bool sig2_ok = (h2 == 'M' || h2 == 'C');
++ if (h1 != 'B' || !sig2_ok) //&& (h2 != 'M' || h2 != 'C')) // no magic header?
+ raise_invalid_header();
+
+ unsigned char bv_bo = dec.get_8(); (void) bv_bo;
+Index: c++/include/util/bitset/bmfunc.h
+===================================================================
+--- a/c++/include/util/bitset/bmfunc.h (revision 90103)
++++ b/c++/include/util/bitset/bmfunc.h (revision 90104)
+@@ -39,12 +39,12 @@
+ inline
+ bm::id_t bit_block_calc_count_range(const bm::word_t* block,
+ bm::word_t left,
+- bm::word_t right);
++ bm::word_t right) BMNOEXCEPT;
+
+ inline
+ bm::id_t bit_block_any_range(const bm::word_t* block,
+ bm::word_t left,
+- bm::word_t right);
++ bm::word_t right) BMNOEXCEPT;
+
+ /*!
+ @brief Structure with statistical information about memory
+@@ -64,7 +64,7 @@
+ unsigned long long gaps_by_level[bm::gap_levels]; ///< number of GAP blocks at each level
+
+ /// cound bit block
+- void add_bit_block()
++ void add_bit_block() BMNOEXCEPT
+ {
+ ++bit_blocks;
+ size_t mem_used = sizeof(bm::word_t) * bm::set_block_size;
+@@ -73,7 +73,7 @@
+ }
+
+ /// count gap block
+- void add_gap_block(unsigned capacity, unsigned length)
++ void add_gap_block(unsigned capacity, unsigned length) BMNOEXCEPT
+ {
+ ++gap_blocks;
+ size_t mem_used = (capacity * sizeof(gap_word_t));
+@@ -93,7 +93,7 @@
+ }
+
+ /// Reset statisctics
+- void reset()
++ void reset() BMNOEXCEPT
+ {
+ bit_blocks = gap_blocks = ptr_sub_blocks = bv_count = 0;
+ max_serialize_mem = memory_used = gap_cap_overhead = 0;
+@@ -102,10 +102,11 @@
+ }
+
+ /// Sum data from another sttructure
+- void add(const bv_statistics& st)
++ void add(const bv_statistics& st) BMNOEXCEPT
+ {
+ bit_blocks += st.bit_blocks;
+ gap_blocks += st.gap_blocks;
++ ptr_sub_blocks += st.ptr_sub_blocks;
+ bv_count += st.bv_count;
+ max_serialize_mem += st.max_serialize_mem + 8;
+ memory_used += st.memory_used;
+@@ -121,6 +122,8 @@
+ {
+ First first;
+ Second second;
++
++ pair(First f, Second s) : first(f), second(s) {}
+ };
+
+ /**
+@@ -141,7 +144,8 @@
+ \internal
+ */
+ template<typename BI_TYPE>
+-void get_block_coord(BI_TYPE nb, unsigned& i, unsigned& j)
++BMFORCEINLINE
++void get_block_coord(BI_TYPE nb, unsigned& i, unsigned& j) BMNOEXCEPT
+ {
+ i = unsigned(nb >> bm::set_array_shift); // top block address
+ j = unsigned(nb & bm::set_array_mask); // address in sub-block
+@@ -148,18 +152,28 @@
+ }
+
+ /**
+- \brief ad-hoc conditional expressions
++ Compute bit address of the first bit in a superblock
+ \internal
+ */
+-template <bool b> struct conditional
++template<typename RTYPE>
++BMFORCEINLINE RTYPE get_super_block_start(unsigned i) BMNOEXCEPT
+ {
+- static bool test() { return true; }
+-};
+-template <> struct conditional<false>
++ return RTYPE(i) * bm::set_sub_total_bits;
++}
++
++/**
++ Compute bit address of the first bit in a block
++ \internal
++*/
++template<typename RTYPE>
++BMFORCEINLINE RTYPE get_block_start(unsigned i, unsigned j) BMNOEXCEPT
+ {
+- static bool test() { return false; }
+-};
++ RTYPE idx = bm::get_super_block_start<RTYPE>(i);
++ idx += (j) * bm::gap_max_bits;
++ return idx;
++}
+
++
+ /*!
+ @defgroup gapfunc GAP functions
+ GAP functions implement different opereations on GAP compressed blocks (internals)
+@@ -177,42 +191,12 @@
+ */
+
+
+-
+-
+ /*!
+- Returns BSR value
+- @ingroup bitfunc
+-*/
+-template <class T>
+-unsigned bit_scan_reverse(T value)
+-{
+- BM_ASSERT(value);
+-
+- if (bm::conditional<sizeof(T)==8>::test())
+- {
+- #if defined(BM_USE_GCC_BUILD)
+- return (unsigned) (63 - __builtin_clzll(value));
+- #else
+- bm::id64_t v8 = value;
+- v8 >>= 32;
+- unsigned v = (unsigned)v8;
+- if (v)
+- {
+- v = bm::bit_scan_reverse32(v);
+- return v + 32;
+- }
+- #endif
+- }
+- return bit_scan_reverse32((unsigned)value);
+-}
+-
+-
+-/*!
+ Returns bit count
+ @ingroup bitfunc
+ */
+ BMFORCEINLINE
+-bm::id_t word_bitcount(bm::id_t w)
++bm::id_t word_bitcount(bm::id_t w) BMNOEXCEPT
+ {
+ #if defined(BMSSE42OPT) || defined(BMAVX2OPT)
+ return bm::id_t(_mm_popcnt_u32(w));
+@@ -230,7 +214,7 @@
+ }
+
+ inline
+-int parallel_popcnt_32(unsigned int n)
++int parallel_popcnt_32(unsigned int n) BMNOEXCEPT
+ {
+ unsigned int tmp;
+
+@@ -245,7 +229,7 @@
+ @ingroup bitfunc
+ */
+ BMFORCEINLINE
+-unsigned word_bitcount64(bm::id64_t x)
++unsigned word_bitcount64(bm::id64_t x) BMNOEXCEPT
+ {
+ #if defined(BMSSE42OPT) || defined(BMAVX2OPT)
+ #if defined(BM64_SSE4) || defined(BM64_AVX2) || defined(BM64_AVX512)
+@@ -270,7 +254,7 @@
+
+ inline
+ unsigned bitcount64_4way(bm::id64_t x, bm::id64_t y,
+- bm::id64_t u, bm::id64_t v)
++ bm::id64_t u, bm::id64_t v) BMNOEXCEPT
+ {
+ const bm::id64_t m1 = 0x5555555555555555U;
+ const bm::id64_t m2 = 0x3333333333333333U;
+@@ -400,7 +384,8 @@
+ /*! @brief Adaptor to copy 1 bits to array
+ @internal
+ */
+-template<typename B> class copy_to_array_functor
++template<typename B>
++class copy_to_array_functor
+ {
+ public:
+ copy_to_array_functor(B* bits): bp_(bits)
+@@ -408,10 +393,10 @@
+
+ B* ptr() { return bp_; }
+
+- void operator()(unsigned bit_idx) { *bp_++ = (B)bit_idx; }
++ void operator()(unsigned bit_idx) BMNOEXCEPT { *bp_++ = (B)bit_idx; }
+
+ void operator()(unsigned bit_idx0,
+- unsigned bit_idx1)
++ unsigned bit_idx1) BMNOEXCEPT
+ {
+ bp_[0] = (B)bit_idx0; bp_[1] = (B)bit_idx1;
+ bp_+=2;
+@@ -419,7 +404,7 @@
+
+ void operator()(unsigned bit_idx0,
+ unsigned bit_idx1,
+- unsigned bit_idx2)
++ unsigned bit_idx2) BMNOEXCEPT
+ {
+ bp_[0] = (B)bit_idx0; bp_[1] = (B)bit_idx1; bp_[2] = (B)bit_idx2;
+ bp_+=3;
+@@ -428,7 +413,7 @@
+ void operator()(unsigned bit_idx0,
+ unsigned bit_idx1,
+ unsigned bit_idx2,
+- unsigned bit_idx3)
++ unsigned bit_idx3) BMNOEXCEPT
+ {
+ bp_[0] = (B)bit_idx0; bp_[1] = (B)bit_idx1;
+ bp_[2] = (B)bit_idx2; bp_[3] = (B)bit_idx3;
+@@ -451,7 +436,8 @@
+
+ @ingroup bitfunc
+ */
+-template<typename T,typename B> unsigned bit_list(T w, B* bits)
++template<typename T,typename B>
++unsigned bit_list(T w, B* bits) BMNOEXCEPT
+ {
+ copy_to_array_functor<B> func(bits);
+ bit_for_each(w, func);
+@@ -468,7 +454,8 @@
+
+ @ingroup bitfunc
+ */
+-template<typename T,typename B> unsigned bit_list_4(T w, B* bits)
++template<typename T,typename B>
++unsigned bit_list_4(T w, B* bits) BMNOEXCEPT
+ {
+ copy_to_array_functor<B> func(bits);
+ bit_for_each_4(w, func);
+@@ -486,7 +473,8 @@
+ @internal
+ */
+ template<typename B>
+-unsigned short bitscan_popcnt(bm::id_t w, B* bits, unsigned short offs)
++unsigned short
++bitscan_popcnt(bm::id_t w, B* bits, unsigned short offs) BMNOEXCEPT
+ {
+ unsigned pos = 0;
+ while (w)
+@@ -508,7 +496,7 @@
+ @internal
+ */
+ template<typename B>
+-unsigned short bitscan_popcnt(bm::id_t w, B* bits)
++unsigned short bitscan_popcnt(bm::id_t w, B* bits) BMNOEXCEPT
+ {
+ unsigned pos = 0;
+ while (w)
+@@ -529,29 +517,48 @@
+ @ingroup bitfunc
+ */
+ template<typename B>
+-unsigned short bitscan_popcnt64(bm::id64_t w, B* bits)
++unsigned short bitscan_popcnt64(bm::id64_t w, B* bits) BMNOEXCEPT
+ {
+ unsigned short pos = 0;
+ while (w)
+ {
+- bm::id64_t t = w & -w;
++ bm::id64_t t = bmi_blsi_u64(w); // w & -w;
+ bits[pos++] = (B) bm::word_bitcount64(t - 1);
+- w &= w - 1;
++ w = bmi_bslr_u64(w); // w &= w - 1;
+ }
+ return pos;
+ }
+
++/*!
++ \brief Unpacks 64-bit word into list of ON bit indexes using popcnt method
++ \param w - value
++ \param bits - pointer on the result array
++ \param offs - value to add to bit position (programmed shift)
++ \return number of bits in the list
++ @ingroup bitfunc
++*/
++template<typename B>
++unsigned short
++bitscan_popcnt64(bm::id64_t w, B* bits, unsigned short offs) BMNOEXCEPT
++{
++ unsigned short pos = 0;
++ while (w)
++ {
++ bm::id64_t t = bmi_blsi_u64(w); // w & -w;
++ bits[pos++] = B(bm::word_bitcount64(t - 1) + offs);
++ w = bmi_bslr_u64(w); // w &= w - 1;
++ }
++ return pos;
++}
++
++
+ template<typename V, typename B>
+-unsigned short bitscan(V w, B* bits)
++unsigned short bitscan(V w, B* bits) BMNOEXCEPT
+ {
+ if (bm::conditional<sizeof(V) == 8>::test())
+- {
+ return bm::bitscan_popcnt64(w, bits);
+- }
+ else
+- {
+ return bm::bitscan_popcnt((bm::word_t)w, bits);
+- }
+ }
+
+ // --------------------------------------------------------------
+@@ -566,7 +573,7 @@
+ \return selected value (inxed of bit set)
+ */
+ inline
+-unsigned word_select64_linear(bm::id64_t w, unsigned rank)
++unsigned word_select64_linear(bm::id64_t w, unsigned rank) BMNOEXCEPT
+ {
+ BM_ASSERT(w);
+ BM_ASSERT(rank);
+@@ -589,7 +596,7 @@
+ \return selected value (inxed of bit set)
+ */
+ inline
+-unsigned word_select64_bitscan(bm::id64_t w, unsigned rank)
++unsigned word_select64_bitscan(bm::id64_t w, unsigned rank) BMNOEXCEPT
+ {
+ BM_ASSERT(w);
+ BM_ASSERT(rank);
+@@ -616,7 +623,7 @@
+ \return selected value (inxed of bit set)
+ */
+ inline
+-unsigned word_select64(bm::id64_t w, unsigned rank)
++unsigned word_select64(bm::id64_t w, unsigned rank) BMNOEXCEPT
+ {
+ #if defined(BMI2_SELECT64)
+ return BMI2_SELECT64(w, rank);
+@@ -642,7 +649,7 @@
+ @internal
+ */
+ BMFORCEINLINE
+-bm::id64_t widx_to_digest_mask(unsigned w_idx)
++bm::id64_t widx_to_digest_mask(unsigned w_idx) BMNOEXCEPT
+ {
+ bm::id64_t mask(1ull);
+ return mask << (w_idx / bm::set_block_digest_wave_size);
+@@ -657,7 +664,7 @@
+ @internal
+ */
+ BMFORCEINLINE
+-bm::id64_t digest_mask(unsigned from, unsigned to)
++bm::id64_t digest_mask(unsigned from, unsigned to) BMNOEXCEPT
+ {
+ BM_ASSERT(from <= to);
+
+@@ -680,7 +687,8 @@
+ @internal
+ */
+ inline
+-bool check_zero_digest(bm::id64_t digest, unsigned bitpos_from, unsigned bitpos_to)
++bool check_zero_digest(bm::id64_t digest,
++ unsigned bitpos_from, unsigned bitpos_to) BMNOEXCEPT
+ {
+ bm::id64_t mask = bm::digest_mask(bitpos_from, bitpos_to);
+ return !(digest & mask);
+@@ -695,7 +703,7 @@
+ @internal
+ */
+ inline
+-void block_init_digest0(bm::word_t* const block, bm::id64_t digest)
++void block_init_digest0(bm::word_t* const block, bm::id64_t digest) BMNOEXCEPT
+ {
+ unsigned off;
+ for (unsigned i = 0; i < 64; ++i)
+@@ -725,7 +733,7 @@
+ @internal
+ */
+ inline
+-bm::id64_t calc_block_digest0(const bm::word_t* const block)
++bm::id64_t calc_block_digest0(const bm::word_t* const block) BMNOEXCEPT
+ {
+ bm::id64_t digest0 = 0;
+ unsigned off;
+@@ -766,7 +774,8 @@
+ @internal
+ */
+ inline
+-bm::id64_t update_block_digest0(const bm::word_t* const block, bm::id64_t digest)
++bm::id64_t
++update_block_digest0(const bm::word_t* const block, bm::id64_t digest) BMNOEXCEPT
+ {
+ const bm::id64_t mask(1ull);
+ bm::id64_t d = digest;
+@@ -807,7 +816,7 @@
+
+ /// Returns true if set operation is constant (bitcount)
+ inline
+-bool is_const_set_operation(set_operation op)
++bool is_const_set_operation(set_operation op) BMNOEXCEPT
+ {
+ return (int(op) >= int(set_COUNT));
+ }
+@@ -816,7 +825,7 @@
+ Convert set operation to operation
+ */
+ inline
+-bm::operation setop2op(bm::set_operation op)
++bm::operation setop2op(bm::set_operation op) BMNOEXCEPT
+ {
+ BM_ASSERT(op == set_AND ||
+ op == set_OR ||
+@@ -863,7 +872,7 @@
+ // version with minimal branching, super-scalar friendly
+ //
+ inline
+- static bm::id64_t block_type(const bm::word_t* bp)
++ static bm::id64_t block_type(const bm::word_t* bp) BMNOEXCEPT
+ {
+ bm::id64_t type;
+ if (bm::conditional<sizeof(void*) == 8>::test())
+@@ -884,11 +893,11 @@
+ }
+
+ BMFORCEINLINE
+- static bool is_full_block(const bm::word_t* bp)
++ static bool is_full_block(const bm::word_t* bp) BMNOEXCEPT
+ { return (bp == _block._p || bp == _block._p_fullp); }
+
+ BMFORCEINLINE
+- static bool is_valid_block_addr(const bm::word_t* bp)
++ static bool is_valid_block_addr(const bm::word_t* bp) BMNOEXCEPT
+ { return (bp && !(bp == _block._p || bp == _block._p_fullp)); }
+
+ static all_set_block _block;
+@@ -899,7 +908,7 @@
+
+ /// XOR swap two scalar variables
+ template<typename W>
+-void xor_swap(W& x, W& y)
++void xor_swap(W& x, W& y) BMNOEXCEPT
+ {
+ BM_ASSERT(&x != &y);
+ x ^= y;
+@@ -913,7 +922,7 @@
+ @internal
+ */
+ template<typename N>
+-bool find_not_null_ptr(bm::word_t*** arr, N start, N size, N* pos)
++bool find_not_null_ptr(bm::word_t*** arr, N start, N size, N* pos) BMNOEXCEPT
+ {
+ BM_ASSERT(pos);
+ // BM_ASSERT(start < size);
+@@ -1035,7 +1044,7 @@
+ @ingroup bitfunc
+ */
+ inline
+-bool bit_is_all_zero(const bm::word_t* BMRESTRICT start)
++bool bit_is_all_zero(const bm::word_t* BMRESTRICT start) BMNOEXCEPT
+ {
+ #if defined(VECT_IS_ZERO_BLOCK)
+ return VECT_IS_ZERO_BLOCK(start);
+@@ -1062,7 +1071,7 @@
+ @ingroup gapfunc
+ */
+ BMFORCEINLINE
+-bool gap_is_all_zero(const bm::gap_word_t* buf)
++bool gap_is_all_zero(const bm::gap_word_t* BMRESTRICT buf) BMNOEXCEPT
+ {
+ // (almost) branchless variant:
+ return (!(*buf & 1u)) & (!(bm::gap_max_bits - 1 - buf[1]));
+@@ -1075,7 +1084,7 @@
+ @ingroup gapfunc
+ */
+ BMFORCEINLINE
+-bool gap_is_all_one(const bm::gap_word_t* buf)
++bool gap_is_all_one(const bm::gap_word_t* BMRESTRICT buf) BMNOEXCEPT
+ {
+ return ((*buf & 1u) && (buf[1] == bm::gap_max_bits - 1));
+ }
+@@ -1088,7 +1097,7 @@
+ @ingroup gapfunc
+ */
+ BMFORCEINLINE
+-bm::gap_word_t gap_length(const bm::gap_word_t* buf)
++bm::gap_word_t gap_length(const bm::gap_word_t* BMRESTRICT buf) BMNOEXCEPT
+ {
+ return (bm::gap_word_t)((*buf >> 3) + 1);
+ }
+@@ -1103,7 +1112,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned gap_capacity(const T* buf, const T* glevel_len)
++unsigned
++gap_capacity(const T* BMRESTRICT buf, const T* BMRESTRICT glevel_len) BMNOEXCEPT
+ {
+ return glevel_len[(*buf >> 1) & 3];
+ }
+@@ -1118,7 +1128,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned gap_limit(const T* buf, const T* glevel_len)
++unsigned
++gap_limit(const T* BMRESTRICT buf, const T* BMRESTRICT glevel_len) BMNOEXCEPT
+ {
+ return glevel_len[(*buf >> 1) & 3]-4;
+ }
+@@ -1132,7 +1143,7 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-T gap_level(const T* buf)
++T gap_level(const T* BMRESTRICT buf) BMNOEXCEPT
+ {
+ return T((*buf >> 1) & 3u);
+ }
+@@ -1149,7 +1160,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned gap_find_last(const T* buf, unsigned* last)
++unsigned
++gap_find_last(const T* BMRESTRICT buf, unsigned* BMRESTRICT last) BMNOEXCEPT
+ {
+ BM_ASSERT(last);
+
+@@ -1179,7 +1191,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned gap_find_first(const T* buf, unsigned* first)
++unsigned
++gap_find_first(const T* BMRESTRICT buf, unsigned* BMRESTRICT first) BMNOEXCEPT
+ {
+ BM_ASSERT(first);
+
+@@ -1206,24 +1219,30 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned gap_bfind(const T* buf, unsigned pos, unsigned* is_set)
++unsigned gap_bfind(const T* BMRESTRICT buf,
++ unsigned pos, unsigned* BMRESTRICT is_set) BMNOEXCEPT
+ {
+ BM_ASSERT(pos < bm::gap_max_bits);
+- *is_set = (*buf) & 1;
++ #undef VECT_GAP_BFIND // TODO: VECTOR bfind causes performance degradation
++ #ifdef VECT_GAP_BFIND
++ return VECT_GAP_BFIND(buf, pos, is_set);
++ #else
++ *is_set = (*buf) & 1;
+
+- unsigned start = 1;
+- unsigned end = 1 + ((*buf) >> 3);
++ unsigned start = 1;
++ unsigned end = 1 + ((*buf) >> 3);
+
+- while ( start != end )
+- {
+- unsigned curr = (start + end) >> 1;
+- if ( buf[curr] < pos )
+- start = curr + 1;
+- else
+- end = curr;
+- }
+- *is_set ^= ((start-1) & 1);
+- return start;
++ while ( start != end )
++ {
++ unsigned curr = (start + end) >> 1;
++ if ( buf[curr] < pos )
++ start = curr + 1;
++ else
++ end = curr;
++ }
++ *is_set ^= ((start-1) & 1);
++ return start;
++ #endif
+ }
+
+
+@@ -1234,7 +1253,8 @@
+ \return true if position is in "1" gap
+ @ingroup gapfunc
+ */
+-template<typename T> unsigned gap_test(const T* buf, unsigned pos)
++template<typename T>
++unsigned gap_test(const T* BMRESTRICT buf, unsigned pos) BMNOEXCEPT
+ {
+ BM_ASSERT(pos < bm::gap_max_bits);
+
+@@ -1277,7 +1297,7 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned gap_test_unr(const T* buf, const unsigned pos)
++unsigned gap_test_unr(const T* BMRESTRICT buf, const unsigned pos) BMNOEXCEPT
+ {
+ BM_ASSERT(pos < bm::gap_max_bits);
+
+@@ -1286,84 +1306,10 @@
+ return (*buf) & 1;
+ }
+ #if defined(BMSSE2OPT)
+- unsigned start = 1;
+- unsigned end = 1 + ((*buf) >> 3);
+- unsigned dsize = end - start;
+-
+- if (dsize < 17)
+- {
+- start = bm::sse2_gap_find(buf + 1, (bm::gap_word_t)pos, dsize);
+- unsigned res = ((*buf) & 1) ^ ((start) & 1);
+- BM_ASSERT(buf[start + 1] >= pos);
+- BM_ASSERT(buf[start] < pos || (start == 0));
+- BM_ASSERT(res == bm::gap_test(buf, pos));
+- return res;
+- }
+- unsigned arr_end = end;
+- while (start != end)
+- {
+- unsigned curr = (start + end) >> 1;
+- if (buf[curr] < pos)
+- start = curr + 1;
+- else
+- end = curr;
+-
+- unsigned size = end - start;
+- if (size < 16)
+- {
+- size += (end != arr_end);
+- unsigned idx = bm::sse2_gap_find(buf + start, (bm::gap_word_t)pos, size);
+- start += idx;
+-
+- BM_ASSERT(buf[start] >= pos);
+- BM_ASSERT(buf[start - 1] < pos || (start == 1));
+- break;
+- }
+- }
+-
+- unsigned res = ((*buf) & 1) ^ ((--start) & 1);
+-
++ unsigned res = bm::sse2_gap_test(buf, pos);
+ BM_ASSERT(res == bm::gap_test(buf, pos));
+- return res;
+-//#endif
+ #elif defined(BMSSE42OPT)
+- unsigned start = 1;
+- unsigned end = 1 + ((*buf) >> 3);
+- unsigned dsize = end - start;
+-
+- if (dsize < 17)
+- {
+- start = bm::sse4_gap_find(buf+1, (bm::gap_word_t)pos, dsize);
+- unsigned res = ((*buf) & 1) ^ ((start) & 1);
+- BM_ASSERT(buf[start+1] >= pos);
+- BM_ASSERT(buf[start] < pos || (start==0));
+- BM_ASSERT(res == bm::gap_test(buf, pos));
+- return res;
+- }
+- unsigned arr_end = end;
+- while (start != end)
+- {
+- unsigned curr = (start + end) >> 1;
+- if (buf[curr] < pos)
+- start = curr + 1;
+- else
+- end = curr;
+-
+- unsigned size = end - start;
+- if (size < 16)
+- {
+- size += (end != arr_end);
+- unsigned idx = bm::sse4_gap_find(buf + start, (bm::gap_word_t)pos, size);
+- start += idx;
+-
+- BM_ASSERT(buf[start] >= pos);
+- BM_ASSERT(buf[start - 1] < pos || (start == 1));
+- break;
+- }
+- }
+-
+- unsigned res = ((*buf) & 1) ^ ((--start) & 1);
+-
++ unsigned res = bm::sse42_gap_test(buf, pos);
+ BM_ASSERT(res == bm::gap_test(buf, pos));
+ #elif defined(BMAVX2OPT)
+ unsigned res = bm::avx2_gap_test(buf, pos);
+@@ -1378,21 +1324,22 @@
+ \internal
+ */
+ template<typename T, typename N, typename F>
+-void for_each_nzblock_range(T*** root, N top_size, N nb_from, N nb_to, F& f)
++void for_each_nzblock_range(T*** root,
++ N top_size, N nb_from, N nb_to, F& f) BMNOEXCEPT
+ {
+ BM_ASSERT(top_size);
+ if (nb_from > nb_to)
+ return;
+- unsigned i_from = nb_from >> bm::set_array_shift;
+- unsigned j_from = nb_from & bm::set_array_mask;
+- unsigned i_to = nb_to >> bm::set_array_shift;
+- unsigned j_to = nb_to & bm::set_array_mask;
++ unsigned i_from = unsigned(nb_from >> bm::set_array_shift);
++ unsigned j_from = unsigned(nb_from & bm::set_array_mask);
++ unsigned i_to = unsigned(nb_to >> bm::set_array_shift);
++ unsigned j_to = unsigned(nb_to & bm::set_array_mask);
+
+ if (i_from >= top_size)
+ return;
+ if (i_to >= top_size)
+ {
+- i_to = top_size-1;
++ i_to = unsigned(top_size-1);
+ j_to = bm::set_sub_array_size-1;
+ }
+
+@@ -1400,16 +1347,12 @@
+ {
+ T** blk_blk = root[i];
+ if (!blk_blk)
+- {
+ continue;
+- }
+ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
+ {
+ unsigned j = (i == i_from) ? j_from : 0;
+ if (!j && (i != i_to)) // full sub-block
+- {
+- f.add_full(bm::set_sub_array_size * bm::gap_max_bits);
+- }
++ f.add_full(bm::set_sub_total_bits);
+ else
+ {
+ do
+@@ -1426,13 +1369,10 @@
+ do
+ {
+ if (blk_blk[j])
+- {
+ f(blk_blk[j]);
+- }
+ if ((i == i_to) && (j == j_to))
+ return;
+- ++j;
+- } while (j < bm::set_sub_array_size);
++ } while (++j < bm::set_sub_array_size);
+ }
+ } // for i
+ }
+@@ -1672,7 +1612,7 @@
+ Function returns if function-predicate returns true
+ */
+ template<typename T, typename BI, typename F>
+-bool for_each_nzblock_if(T*** root, BI size1, F& f)
++bool for_each_nzblock_if(T*** root, BI size1, F& f) BMNOEXCEPT
+ {
+ BI block_idx = 0;
+ for (BI i = 0; i < size1; ++i)
+@@ -1754,14 +1694,11 @@
+ /*! Computes SUM of all elements of the sequence
+ */
+ template<typename T>
+-bm::id64_t sum_arr(T* first, T* last)
++bm::id64_t sum_arr(const T* first, const T* last) BMNOEXCEPT
+ {
+ bm::id64_t sum = 0;
+- while (first < last)
+- {
++ for (;first < last; ++first)
+ sum += *first;
+- ++first;
+- }
+ return sum;
+ }
+
+@@ -1775,7 +1712,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-void gap_split(const T* buf, T* arr0, T* arr1, T& arr0_cnt, T& arr1_cnt)
++void gap_split(const T* buf,
++ T* arr0, T* arr1, T& arr0_cnt, T& arr1_cnt) BMNOEXCEPT
+ {
+ const T* pcurr = buf;
+ unsigned len = (*pcurr >> 3);
+@@ -1834,7 +1772,7 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned gap_bit_count(const T* buf, unsigned dsize=0)
++unsigned gap_bit_count(const T* buf, unsigned dsize=0) BMNOEXCEPT
+ {
+ const T* pcurr = buf;
+ if (dsize == 0)
+@@ -1850,14 +1788,8 @@
+ bits_counter += *pcurr + 1;
+ ++pcurr;
+ }
+- ++pcurr; // set GAP to 1
+-
+- while (pcurr <= pend)
+- {
++ for (++pcurr; pcurr <= pend; pcurr += 2)
+ bits_counter += *pcurr - *(pcurr-1);
+- pcurr += 2; // jump to the next positive GAP
+- }
+-
+ return bits_counter;
+ }
+
+@@ -1867,7 +1799,8 @@
+ \return Number of non-zero bits.
+ @ingroup gapfunc
+ */
+-template<typename T> unsigned gap_bit_count_unr(const T* buf)
++template<typename T>
++unsigned gap_bit_count_unr(const T* buf) BMNOEXCEPT
+ {
+ const T* pcurr = buf;
+ unsigned dsize = (*pcurr >> 3);
+@@ -1918,7 +1851,7 @@
+ {
+ cnt += *pcurr - *(pcurr - 1);
+ }
+- BM_ASSERT(cnt == gap_bit_count(buf));
++ BM_ASSERT(cnt == bm::gap_bit_count(buf));
+ return cnt;
+ }
+
+@@ -1933,9 +1866,11 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned gap_bit_count_range(const T* const buf, unsigned left, unsigned right)
++unsigned gap_bit_count_range(const T* const buf,
++ unsigned left, unsigned right) BMNOEXCEPT
+ {
+ BM_ASSERT(left <= right);
++ BM_ASSERT(right < bm::gap_max_bits);
+
+ const T* pcurr = buf;
+ const T* pend = pcurr + (*pcurr >> 3);
+@@ -1966,6 +1901,140 @@
+ }
+
+ /*!
++ \brief Test if all bits are 1 in GAP buffer in the [left, right] range.
++ \param buf - GAP buffer pointer.
++ \param left - leftmost bit index to start from
++ \param right- rightmost bit index
++ \return true if all bits are "11111"
++ @ingroup gapfunc
++*/
++template<typename T>
++bool gap_is_all_one_range(const T* const BMRESTRICT buf,
++ unsigned left, unsigned right) BMNOEXCEPT
++{
++ BM_ASSERT(left <= right);
++ BM_ASSERT(right < bm::gap_max_bits);
++
++ unsigned is_set;
++ unsigned start_pos = bm::gap_bfind(buf, left, &is_set);
++ if (!is_set) // GAP is 0
++ return false;
++ const T* const pcurr = buf + start_pos;
++ return (right <= *pcurr);
++}
++
++/*!
++ \brief Test if any bits are 1 in GAP buffer in the [left, right] range.
++ \param buf - GAP buffer pointer.
++ \param left - leftmost bit index to start from
++ \param right- rightmost bit index
++ \return true if at least 1 "00010"
++ @ingroup gapfunc
++*/
++template<typename T>
++bool gap_any_range(const T* const BMRESTRICT buf,
++ unsigned left, unsigned right) BMNOEXCEPT
++{
++ BM_ASSERT(left <= right);
++ BM_ASSERT(right < bm::gap_max_bits);
++
++ unsigned is_set;
++ unsigned start_pos = bm::gap_bfind(buf, left, &is_set);
++ const T* const pcurr = buf + start_pos;
++
++ if (!is_set) // start GAP is 0 ...
++ {
++ if (right <= *pcurr) // ...bit if the interval goes into at least 1 blk
++ return false; // .. nope
++ return true;
++ }
++ return true;
++}
++
++/*!
++ \brief Test if any bits are 1 in GAP buffer in the [left, right] range
++ and flanked with 0s
++ \param buf - GAP buffer pointer.
++ \param left - leftmost bit index to start from
++ \param right- rightmost bit index
++ \return true if "011110"
++ @ingroup gapfunc
++*/
++template<typename T>
++bool gap_is_interval(const T* const BMRESTRICT buf,
++ unsigned left, unsigned right) BMNOEXCEPT
++{
++ BM_ASSERT(left <= right);
++ BM_ASSERT(left > 0); // cannot check left-1 otherwise
++ BM_ASSERT(right < bm::gap_max_bits-1); // cannot check right+1 otherwise
++
++ unsigned is_set;
++ unsigned start_pos = bm::gap_bfind(buf, left, &is_set);
++
++ const T* pcurr = buf + start_pos;
++ if (!is_set || (right != *pcurr) || (start_pos <= 1))
++ return false;
++ --pcurr;
++ if (*pcurr != left-1)
++ return false;
++ return true;
++}
++
++/**
++ \brief Searches for the last 1 bit in the 111 interval of a GAP block
++ \param buf - BIT block buffer
++ \param nbit - bit index to start checking from
++ \param pos - [out] found value
++
++ \return false if not found
++ @ingroup gapfunc
++*/
++template<typename T>
++bool gap_find_interval_end(const T* const BMRESTRICT buf,
++ unsigned nbit, unsigned* BMRESTRICT pos) BMNOEXCEPT
++{
++ BM_ASSERT(pos);
++ BM_ASSERT(nbit < bm::gap_max_bits);
++
++ unsigned is_set;
++ unsigned start_pos = bm::gap_bfind(buf, nbit, &is_set);
++ if (!is_set)
++ return false;
++ *pos = buf[start_pos];
++ return true;
++}
++
++
++/**
++ \brief Searches for the first 1 bit in the 111 interval of a GAP block
++ \param buf - BIT block buffer
++ \param nbit - bit index to start checking from
++ \param pos - [out] found value
++
++ \return false if not found
++ @ingroup gapfunc
++*/
++template<typename T>
++bool gap_find_interval_start(const T* const BMRESTRICT buf,
++ unsigned nbit, unsigned* BMRESTRICT pos) BMNOEXCEPT
++{
++ BM_ASSERT(pos);
++ BM_ASSERT(nbit < bm::gap_max_bits);
++
++ unsigned is_set;
++ unsigned start_pos = bm::gap_bfind(buf, nbit, &is_set);
++ if (!is_set)
++ return false;
++ --start_pos;
++ if (!start_pos)
++ *pos = 0;
++ else
++ *pos = buf[start_pos]+1;
++ return true;
++}
++
++
++/*!
+ \brief GAP block find position for the rank
+
+ \param block - bit block buffer pointer
+@@ -1982,7 +2051,7 @@
+ SIZE_TYPE gap_find_rank(const T* const block,
+ SIZE_TYPE rank,
+ unsigned nbit_from,
+- unsigned& nbit_pos)
++ unsigned& nbit_pos) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(rank);
+@@ -2025,11 +2094,14 @@
+ \brief Counts 1 bits in GAP buffer in the closed [0, right] range.
+ \param buf - GAP buffer pointer.
+ \param right- rightmost bit index
+- \return Number of non-zero bits.
++ \param is_corrected - if true the result will be rank corrected
++ if right bit == true count=count-1
++ \return Number of non-zero bits
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned gap_bit_count_to(const T* const buf, T right)
++unsigned gap_bit_count_to(const T* const buf, T right,
++ bool is_corrected=false) BMNOEXCEPT
+ {
+ const T* pcurr = buf;
+ const T* pend = pcurr + (*pcurr >> 3);
+@@ -2042,6 +2114,7 @@
+ if (right <= *pcurr) // we are in the target block right now
+ {
+ bits_counter = (right + 1u) & is_set; // & is_set == if (is_set)
++ bits_counter -= (is_set & unsigned(is_corrected));
+ return bits_counter;
+ }
+ bits_counter += (*pcurr + 1u) & is_set;
+@@ -2051,10 +2124,14 @@
+ {
+ bits_counter += (*pcurr - prev_gap) & is_set;
+ if (pcurr == pend)
++ {
++ bits_counter -= (is_set & unsigned(is_corrected));
+ return bits_counter;
++ }
+ prev_gap = *pcurr++;
+ }
+ bits_counter += (right - prev_gap) & is_set;
++ bits_counter -= (is_set & unsigned(is_corrected));
+ return bits_counter;
+ }
+
+@@ -2110,7 +2187,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-T* gap_2_dgap(const T* gap_buf, T* dgap_buf, bool copy_head=true)
++T* gap_2_dgap(const T* BMRESTRICT gap_buf,
++ T* BMRESTRICT dgap_buf, bool copy_head=true) BMNOEXCEPT
+ {
+ if (copy_head) // copy GAP header
+ {
+@@ -2135,7 +2213,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-void dgap_2_gap(const T* dgap_buf, T* gap_buf, T gap_header=0)
++void dgap_2_gap(const T* BMRESTRICT dgap_buf,
++ T* BMRESTRICT gap_buf, T gap_header=0) BMNOEXCEPT
+ {
+ const T* pcurr = dgap_buf;
+ unsigned len;
+@@ -2175,7 +2254,8 @@
+
+ @ingroup gapfunc
+ */
+-template<typename T> int gapcmp(const T* buf1, const T* buf2)
++template<typename T>
++int gapcmp(const T* buf1, const T* buf2) BMNOEXCEPT
+ {
+ const T* pcurr1 = buf1;
+ const T* pend1 = pcurr1 + (*pcurr1 >> 3);
+@@ -2213,9 +2293,7 @@
+ return (bitval1) ? 1 : -1;
+ }
+ }
+-
+ ++pcurr1; ++pcurr2;
+-
+ bitval1 ^= 1;
+ bitval2 ^= 1;
+ }
+@@ -2235,7 +2313,7 @@
+ template<typename T>
+ bool gap_find_first_diff(const T* BMRESTRICT buf1,
+ const T* BMRESTRICT buf2,
+- unsigned* BMRESTRICT pos)
++ unsigned* BMRESTRICT pos) BMNOEXCEPT
+ {
+ BM_ASSERT(buf1 && buf2 && pos);
+
+@@ -2253,8 +2331,9 @@
+ return false;
+ }
+
++// -------------------------------------------------------------------------
++//
+
+-
+ /*!
+ \brief Abstract operation for GAP buffers.
+ Receives functor F as a template argument
+@@ -2264,7 +2343,6 @@
+ can be 0 or 1 (1 inverts the vector)
+ \param vect2 - operand 2 GAP encoded buffer.
+ \param vect2_mask - same as vect1_mask
+- \param f - operation functor.
+ \param dlen - destination length after the operation
+
+ \note Internal function.
+@@ -2278,8 +2356,7 @@
+ unsigned vect1_mask,
+ const T* BMRESTRICT vect2,
+ unsigned vect2_mask,
+- F& f,
+- unsigned& dlen)
++ unsigned& dlen) BMNOEXCEPT2
+ {
+ const T* cur1 = vect1;
+ const T* cur2 = vect2;
+@@ -2287,7 +2364,7 @@
+ T bitval1 = (T)((*cur1++ & 1) ^ vect1_mask);
+ T bitval2 = (T)((*cur2++ & 1) ^ vect2_mask);
+
+- T bitval = (T) f(bitval1, bitval2);
++ T bitval = (T) F::op(bitval1, bitval2);
+ T bitval_prev = bitval;
+
+ T* res = dest;
+@@ -2297,7 +2374,7 @@
+ T c1 = *cur1; T c2 = *cur2;
+ while (1)
+ {
+- bitval = (T) f(bitval1, bitval2);
++ bitval = (T) F::op(bitval1, bitval2);
+
+ // Check if GAP value changes and we need to
+ // start the next one
+@@ -2327,7 +2404,6 @@
+ }
+ ++cur2; c2 = *cur2;
+ }
+-
+ } // while
+
+ dlen = (unsigned)(res - dest);
+@@ -2334,12 +2410,12 @@
+ *dest = (T)((*dest & 7) + (dlen << 3));
+ }
+
++
+ /*!
+ \brief Abstract operation for GAP buffers (predicts legth)
+ Receives functor F as a template argument
+ \param vect1 - operand 1 GAP encoded buffer.
+ \param vect2 - operand 2 GAP encoded buffer.
+- \param f - operation functor.
+ \param dlen - destination length after the operation
+ \param limit - maximum target length limit,
+ returns false if limit is reached
+@@ -2354,9 +2430,8 @@
+ template<typename T, class F>
+ bool gap_buff_dry_op(const T* BMRESTRICT vect1,
+ const T* BMRESTRICT vect2,
+- F& f,
+ unsigned& dlen,
+- unsigned limit)
++ unsigned limit) BMNOEXCEPT2
+ {
+ const T* cur1 = vect1;
+ const T* cur2 = vect2;
+@@ -2364,7 +2439,7 @@
+ T bitval1 = (T)((*cur1++ & 1));
+ T bitval2 = (T)((*cur2++ & 1));
+
+- T bitval = (T) f(bitval1, bitval2);
++ T bitval = (T) F::op(bitval1, bitval2);
+ T bitval_prev = bitval;
+
+ unsigned len = 1;
+@@ -2372,7 +2447,7 @@
+ T c1 = *cur1; T c2 = *cur2;
+ while (1)
+ {
+- bitval = (T) f(bitval1, bitval2);
++ bitval = (T) F::op(bitval1, bitval2);
+
+ // Check if GAP value changes and we need to
+ // start the next one
+@@ -2418,7 +2493,6 @@
+ can be 0 or 1 (1 inverts the vector)
+ \param vect2 - operand 2 GAP encoded buffer.
+ \param vect2_mask - same as vect1_mask
+- \param f - operation functor.
+ \note Internal function.
+ \return non zero value if operation result returns any 1 bit
+
+@@ -2428,8 +2502,7 @@
+ unsigned gap_buff_any_op(const T* BMRESTRICT vect1,
+ unsigned vect1_mask,
+ const T* BMRESTRICT vect2,
+- unsigned vect2_mask,
+- F f)
++ unsigned vect2_mask) BMNOEXCEPT2
+ {
+ const T* cur1 = vect1;
+ const T* cur2 = vect2;
+@@ -2437,7 +2510,7 @@
+ unsigned bitval1 = (*cur1++ & 1) ^ vect1_mask;
+ unsigned bitval2 = (*cur2++ & 1) ^ vect2_mask;
+
+- unsigned bitval = f(bitval1, bitval2);
++ unsigned bitval = F::op(bitval1, bitval2);
+ if (bitval)
+ return bitval;
+ unsigned bitval_prev = bitval;
+@@ -2444,7 +2517,7 @@
+
+ while (1)
+ {
+- bitval = f(bitval1, bitval2);
++ bitval = F::op(bitval1, bitval2);
+ if (bitval)
+ return bitval;
+
+@@ -2468,10 +2541,8 @@
+ {
+ break;
+ }
+-
+ ++cur1;
+- bitval1 ^= 1;
+- bitval2 ^= 1;
++ bitval1 ^= 1; bitval2 ^= 1;
+ }
+ ++cur2;
+ }
+@@ -2488,13 +2559,12 @@
+ Receives functor F as a template argument
+ \param vect1 - operand 1 GAP encoded buffer.
+ \param vect2 - operand 2 GAP encoded buffer.
+- \param f - operation functor.
+ \note Internal function.
+
+ @ingroup gapfunc
+ */
+ template<typename T, class F>
+-unsigned gap_buff_count_op(const T* vect1, const T* vect2, F f)
++unsigned gap_buff_count_op(const T* vect1, const T* vect2) BMNOEXCEPT2
+ {
+ unsigned count;// = 0;
+ const T* cur1 = vect1;
+@@ -2502,18 +2572,15 @@
+
+ unsigned bitval1 = (*cur1++ & 1);
+ unsigned bitval2 = (*cur2++ & 1);
+- unsigned bitval = count = f(bitval1, bitval2);
++ unsigned bitval = count = F::op(bitval1, bitval2);
+ unsigned bitval_prev = bitval;
+
+- //if (bitval) ++count;
+-
+ T res, res_prev;
+ res = res_prev = 0;
+
+ while (1)
+ {
+- bitval = f(bitval1, bitval2);
+-
++ bitval = F::op(bitval1, bitval2);
+ // Check if GAP value changes and we need to
+ // start the next one.
+ if (bitval != bitval_prev)
+@@ -2530,8 +2597,7 @@
+ count += res - res_prev;
+ res_prev = res;
+ }
+- ++cur1;
+- bitval1 ^= 1;
++ ++cur1; bitval1 ^= 1;
+ }
+ else // >=
+ {
+@@ -2548,13 +2614,10 @@
+ else // equal
+ {
+ if (*cur2 == (bm::gap_max_bits - 1))
+- {
+ break;
+- }
+
+ ++cur1;
+- bitval1 ^= 1;
+- bitval2 ^= 1;
++ bitval1 ^= 1; bitval2 ^= 1;
+ }
+ ++cur2;
+ }
+@@ -2565,6 +2628,10 @@
+ }
+
+
++#ifdef __GNUG__
++#pragma GCC diagnostic push
++#pragma GCC diagnostic ignored "-Wconversion"
++#endif
+
+ /*!
+ \brief Sets or clears bit in the GAP buffer.
+@@ -2582,11 +2649,11 @@
+ unsigned gap_set_value(unsigned val,
+ T* BMRESTRICT buf,
+ unsigned pos,
+- unsigned* BMRESTRICT is_set)
++ unsigned* BMRESTRICT is_set) BMNOEXCEPT
+ {
+ BM_ASSERT(pos < bm::gap_max_bits);
+- unsigned curr = gap_bfind(buf, pos, is_set);
+
++ unsigned curr = bm::gap_bfind(buf, pos, is_set);
+ T end = (T)(*buf >> 3);
+ if (*is_set == val)
+ {
+@@ -2601,10 +2668,10 @@
+
+ // Special case, first bit GAP operation. There is no platform beside it.
+ // initial flag must be inverted.
+- if (pos == 0)
++ if (!pos)
+ {
+ *buf ^= 1;
+- if ( buf[1] ) // We need to insert a 1 bit platform here.
++ if (buf[1]) // We need to insert a 1 bit GAP here
+ {
+ ::memmove(&buf[2], &buf[1], (end - 1) * sizeof(gap_word_t));
+ buf[1] = 0;
+@@ -2612,52 +2679,125 @@
+ }
+ else // Only 1 bit in the GAP. We need to delete the first GAP.
+ {
+- pprev = buf + 1;
+- pcurr = pprev + 1;
+- do
+- {
+- *pprev++ = *pcurr++;
+- } while (pcurr < pend);
+- --end;
++ pprev = buf + 1; pcurr = pprev + 1;
++ goto copy_gaps;
+ }
+ }
+- else if (curr > 1 && ((unsigned)(*pprev))+1 == pos) // Left border bit
++ else
++ if (curr > 1 && ((unsigned)(*pprev))+1 == pos) // Left border bit
+ {
+ ++(*pprev);
+ if (*pprev == *pcurr) // Curr. GAP to be merged with prev.GAP.
+ {
+ --end;
+- if (pcurr != pend) // GAP merge: 2 GAPS to be deleted
++ if (pcurr != pend) // GAP merge: 2 GAPS to be deleted
+ {
++ ++pcurr;
++ copy_gaps:
+ --end;
+- ++pcurr;
+- do
+- {
+- *pprev++ = *pcurr++;
+- } while (pcurr < pend);
++ do { *pprev++ = *pcurr++; } while (pcurr < pend);
+ }
+ }
+ }
+- else if (*pcurr == pos) // Rightmost bit in the GAP. Border goes left.
++ else
++ if (*pcurr == pos) // Rightmost bit in the GAP. Border goes left.
+ {
+- --(*pcurr);
+- if (pcurr == pend)
++ --(*pcurr);
++ end += (pcurr == pend);
++ }
++ else // Worst case: split current GAP
++ {
++ if (*pcurr != bm::gap_max_bits-1) // last gap does not need memmove
++ ::memmove(pcurr+2, pcurr, (end - curr + 1)*(sizeof(T)));
++ end += 2;
++ pcurr[0] = (T)(pos-1);
++ pcurr[1] = (T)pos;
++ }
++
++ // Set correct length word and last border word
++ *buf = (T)((*buf & 7) + (end << 3));
++ buf[end] = bm::gap_max_bits-1;
++ return end;
++}
++
++/*!
++ \brief Sets or clears bit in the GAP buffer.
++
++ \param val - new bit value
++ \param buf - GAP buffer.
++ \param pos - Index of bit to set.
++
++ \return New GAP buffer length.
++
++ @ingroup gapfunc
++*/
++template<typename T>
++unsigned gap_set_value(unsigned val,
++ T* BMRESTRICT buf,
++ unsigned pos) BMNOEXCEPT
++{
++ BM_ASSERT(pos < bm::gap_max_bits);
++ unsigned is_set;
++ unsigned curr = bm::gap_bfind(buf, pos, &is_set);
++ T end = (T)(*buf >> 3);
++ if (is_set == val)
++ return end;
++
++ T* pcurr = buf + curr;
++ T* pprev = pcurr - 1;
++ T* pend = buf + end;
++
++ // Special case, first bit GAP operation. There is no platform beside it.
++ // initial flag must be inverted.
++ if (!pos)
++ {
++ *buf ^= 1;
++ if (buf[1]) // We need to insert a 1 bit GAP here
+ {
+- ++end;
++ ::memmove(&buf[2], &buf[1], (end - 1) * sizeof(gap_word_t));
++ buf[1] = 0;
++ ++end;
+ }
++ else // Only 1 bit in the GAP. We need to delete the first GAP.
++ {
++ pprev = buf + 1; pcurr = pprev + 1;
++ goto copy_gaps;
++ }
+ }
+- else // Worst case we need to split current block.
++ else
++ if (curr > 1 && ((unsigned)(*pprev))+1 == pos) // Left border bit
+ {
+- ::memmove(pcurr+2, pcurr,(end - curr + 1)*sizeof(T));
+- *pcurr++ = (T)(pos - 1);
+- *pcurr = (T)pos;
+- end = (T)(end + 2);
++ ++(*pprev);
++ if (*pprev == *pcurr) // Curr. GAP to be merged with prev.GAP.
++ {
++ --end;
++ if (pcurr != pend) // GAP merge: 2 GAPS to be deleted
++ {
++ ++pcurr;
++ copy_gaps:
++ --end;
++ do { *pprev++ = *pcurr++; } while (pcurr < pend);
++ }
++ }
+ }
++ else
++ if (*pcurr == pos) // Rightmost bit in the GAP. Border goes left.
++ {
++ --(*pcurr);
++ end += (pcurr == pend);
++ }
++ else // Worst case: split current GAP
++ {
++ if (*pcurr != bm::gap_max_bits-1) // last gap does not need memmove
++ ::memmove(pcurr+2, pcurr, (end - curr + 1)*(sizeof(T)));
++ end += 2;
++ pcurr[0] = (T)(pos-1);
++ pcurr[1] = (T)pos;
++ }
+
+- // Set correct length word.
++ // Set correct length word and last border word
+ *buf = (T)((*buf & 7) + (end << 3));
+-
+- buf[end] = bm::gap_max_bits - 1;
++ buf[end] = bm::gap_max_bits-1;
+ return end;
+ }
+
+@@ -2672,7 +2812,7 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned gap_add_value(T* buf, unsigned pos)
++unsigned gap_add_value(T* buf, unsigned pos) BMNOEXCEPT
+ {
+ BM_ASSERT(pos < bm::gap_max_bits);
+
+@@ -2684,7 +2824,7 @@
+
+ // Special case, first bit GAP operation. There is no platform beside it.
+ // initial flag must be inverted.
+- if (pos == 0)
++ if (!pos)
+ {
+ *buf ^= 1;
+ if ( buf[1] ) // We need to insert a 1 bit platform here.
+@@ -2695,13 +2835,9 @@
+ }
+ else // Only 1 bit in the GAP. We need to delete the first GAP.
+ {
+- pprev = buf + 1;
+- pcurr = pprev + 1;
+- do
+- {
+- *pprev++ = *pcurr++;
+- } while (pcurr < pend);
++ pprev = buf + 1; pcurr = pprev + 1;
+ --end;
++ do { *pprev++ = *pcurr++; } while (pcurr < pend);
+ }
+ }
+ else if (((unsigned)(*pprev))+1 == pos && (curr > 1) ) // Left border bit
+@@ -2710,40 +2846,32 @@
+ if (*pprev == *pcurr) // Curr. GAP to be merged with prev.GAP.
+ {
+ --end;
+- if (pcurr != pend) // GAP merge: 2 GAPS to be deleted
+- {
+- // TODO: should never get here...
+- --end;
+- ++pcurr;
+- do
+- {
+- *pprev++ = *pcurr++;
+- } while (pcurr < pend);
+- }
+- }
++ BM_ASSERT(pcurr == pend);
++ }
+ }
+ else if (*pcurr == pos) // Rightmost bit in the GAP. Border goes left.
+ {
+ --(*pcurr);
+- if (pcurr == pend)
+- {
+- ++end;
+- }
++ end += (pcurr == pend);
+ }
+ else // Worst case we need to split current block.
+ {
+- *pcurr++ = (T)(pos - 1);
+- *pcurr = (T)pos;
++ pcurr[0] = (T)(pos-1);
++ pcurr[1] = (T)pos;
+ end = (T)(end+2);
+ }
+
+ // Set correct length word.
+ *buf = (T)((*buf & 7) + (end << 3));
+-
+ buf[end] = bm::gap_max_bits - 1;
+ return end;
+ }
+
++#ifdef __GNUG__
++#pragma GCC diagnostic pop
++#endif
++
++
+ /*!
+ @brief Right shift GAP block by 1 bit
+ @param buf - block pointer
+@@ -2754,7 +2882,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-bool gap_shift_r1(T* buf, unsigned co_flag, unsigned* new_len)
++bool gap_shift_r1(T* BMRESTRICT buf,
++ unsigned co_flag, unsigned* BMRESTRICT new_len) BMNOEXCEPT
+ {
+ BM_ASSERT(new_len);
+ bool co;
+@@ -2802,7 +2931,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-bool gap_shift_l1(T* buf, unsigned co_flag, unsigned* new_len)
++bool gap_shift_l1(T* BMRESTRICT buf,
++ unsigned co_flag, unsigned* BMRESTRICT new_len) BMNOEXCEPT
+ {
+ BM_ASSERT(new_len);
+ unsigned is_set;
+@@ -2860,7 +2990,7 @@
+ */
+
+ template<typename T>
+-unsigned gap_set_array(T* buf, const T* arr, unsigned len)
++unsigned gap_set_array(T* buf, const T* arr, unsigned len) BMNOEXCEPT
+ {
+ *buf = (T)((*buf & 6u) + (1u << 3)); // gap header setup
+
+@@ -2921,8 +3051,7 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned bit_array_compute_gaps(const T* arr,
+- unsigned len)
++unsigned bit_array_compute_gaps(const T* arr, unsigned len) BMNOEXCEPT
+ {
+ unsigned gap_count = 1;
+ T prev = arr[0];
+@@ -2954,9 +3083,9 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned gap_block_find(const T* buf,
++unsigned gap_block_find(const T* BMRESTRICT buf,
+ unsigned nbit,
+- bm::id_t* prev)
++ bm::id_t* BMRESTRICT prev) BMNOEXCEPT
+ {
+ BM_ASSERT(nbit < bm::gap_max_bits);
+
+@@ -2968,19 +3097,20 @@
+ *prev = nbit;
+ return 1u;
+ }
+-
+ unsigned val = buf[gap_idx] + 1;
+ *prev = val;
+-
+ return (val != bm::gap_max_bits); // no bug here.
+ }
+
++//------------------------------------------------------------------------
++
++
+ /*!
+ \brief Set 1 bit in a block
+ @ingroup bitfunc
+ */
+ BMFORCEINLINE
+-void set_bit(unsigned* dest, unsigned bitpos)
++void set_bit(unsigned* dest, unsigned bitpos) BMNOEXCEPT
+ {
+ unsigned nbit = unsigned(bitpos & bm::set_block_mask);
+ unsigned nword = unsigned(nbit >> bm::set_word_shift);
+@@ -2993,7 +3123,7 @@
+ @ingroup bitfunc
+ */
+ BMFORCEINLINE
+-void clear_bit(unsigned* dest, unsigned bitpos)
++void clear_bit(unsigned* dest, unsigned bitpos) BMNOEXCEPT
+ {
+ unsigned nbit = unsigned(bitpos & bm::set_block_mask);
+ unsigned nword = unsigned(nbit >> bm::set_word_shift);
+@@ -3007,7 +3137,7 @@
+ @ingroup bitfunc
+ */
+ BMFORCEINLINE
+-unsigned test_bit(const unsigned* block, unsigned bitpos)
++unsigned test_bit(const unsigned* block, unsigned bitpos) BMNOEXCEPT
+ {
+ unsigned nbit = unsigned(bitpos & bm::set_block_mask);
+ unsigned nword = unsigned(nbit >> bm::set_word_shift);
+@@ -3025,7 +3155,7 @@
+ @ingroup bitfunc
+ */
+ inline
+-void or_bit_block(unsigned* dest, unsigned bitpos, unsigned bitcount)
++void or_bit_block(unsigned* dest, unsigned bitpos, unsigned bitcount) BMNOEXCEPT
+ {
+ const unsigned maskFF = ~0u;
+
+@@ -3072,7 +3202,7 @@
+ @ingroup bitfunc
+ */
+ inline
+-void sub_bit_block(unsigned* dest, unsigned bitpos, unsigned bitcount)
++void sub_bit_block(unsigned* dest, unsigned bitpos, unsigned bitcount) BMNOEXCEPT
+ {
+ const unsigned maskFF = ~0u;
+
+@@ -3121,7 +3251,7 @@
+ */
+ inline void xor_bit_block(unsigned* dest,
+ unsigned bitpos,
+- unsigned bitcount)
++ unsigned bitcount) BMNOEXCEPT
+ {
+ unsigned nbit = unsigned(bitpos & bm::set_block_mask);
+ unsigned nword = unsigned(nbit >> bm::set_word_shift);
+@@ -3175,7 +3305,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-void gap_sub_to_bitset(unsigned* dest, const T* pcurr)
++void gap_sub_to_bitset(unsigned* BMRESTRICT dest,
++ const T* BMRESTRICT pcurr) BMNOEXCEPT
+ {
+ BM_ASSERT(dest && pcurr);
+
+@@ -3203,7 +3334,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-void gap_sub_to_bitset(unsigned* dest, const T* pcurr, bm::id64_t digest0)
++void gap_sub_to_bitset(unsigned* BMRESTRICT dest,
++ const T* BMRESTRICT pcurr, bm::id64_t digest0) BMNOEXCEPT
+ {
+ BM_ASSERT(dest && pcurr);
+
+@@ -3261,7 +3393,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-void gap_xor_to_bitset(unsigned* dest, const T* pcurr)
++void gap_xor_to_bitset(unsigned* BMRESTRICT dest,
++ const T* BMRESTRICT pcurr) BMNOEXCEPT
+ {
+ BM_ASSERT(dest && pcurr);
+
+@@ -3288,7 +3421,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-void gap_add_to_bitset(unsigned* dest, const T* pcurr, unsigned len)
++void gap_add_to_bitset(unsigned* BMRESTRICT dest,
++ const T* BMRESTRICT pcurr, unsigned len) BMNOEXCEPT
+ {
+ BM_ASSERT(dest && pcurr);
+
+@@ -3321,7 +3455,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-void gap_add_to_bitset(unsigned* dest, const T* pcurr)
++void gap_add_to_bitset(unsigned* BMRESTRICT dest,
++ const T* BMRESTRICT pcurr) BMNOEXCEPT
+ {
+ unsigned len = (*pcurr >> 3);
+ gap_add_to_bitset(dest, pcurr, len);
+@@ -3336,7 +3471,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-void gap_and_to_bitset(unsigned* dest, const T* pcurr)
++void gap_and_to_bitset(unsigned* BMRESTRICT dest,
++ const T* BMRESTRICT pcurr) BMNOEXCEPT
+ {
+ BM_ASSERT(dest && pcurr);
+
+@@ -3370,7 +3506,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-void gap_and_to_bitset(unsigned* dest, const T* pcurr, bm::id64_t digest0)
++void gap_and_to_bitset(unsigned* BMRESTRICT dest,
++ const T* BMRESTRICT pcurr, bm::id64_t digest0) BMNOEXCEPT
+ {
+ BM_ASSERT(dest && pcurr);
+ if (!digest0)
+@@ -3431,7 +3568,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-bm::id_t gap_bitset_and_count(const unsigned* block, const T* pcurr)
++bm::id_t gap_bitset_and_count(const unsigned* BMRESTRICT block,
++ const T* BMRESTRICT pcurr) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ const T* pend = pcurr + (*pcurr >> 3);
+@@ -3458,7 +3596,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-bm::id_t gap_bitset_and_any(const unsigned* block, const T* pcurr)
++bm::id_t gap_bitset_and_any(const unsigned* BMRESTRICT block,
++ const T* BMRESTRICT pcurr) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+
+@@ -3487,7 +3626,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-bm::id_t gap_bitset_sub_count(const unsigned* block, const T* buf)
++bm::id_t gap_bitset_sub_count(const unsigned* BMRESTRICT block,
++ const T* BMRESTRICT buf) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+
+@@ -3521,7 +3661,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-bm::id_t gap_bitset_sub_any(const unsigned* block, const T* buf)
++bm::id_t gap_bitset_sub_any(const unsigned* BMRESTRICT block,
++ const T* BMRESTRICT buf) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+
+@@ -3558,7 +3699,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-bm::id_t gap_bitset_xor_count(const unsigned* block, const T* buf)
++bm::id_t gap_bitset_xor_count(const unsigned* BMRESTRICT block,
++ const T* BMRESTRICT buf) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+
+@@ -3595,7 +3737,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-bm::id_t gap_bitset_xor_any(const unsigned* block, const T* buf)
++bm::id_t gap_bitset_xor_any(const unsigned* BMRESTRICT block,
++ const T* BMRESTRICT buf) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+
+@@ -3632,10 +3775,10 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-bm::id_t gap_bitset_or_count(const unsigned* block, const T* buf)
++bm::id_t gap_bitset_or_count(const unsigned* BMRESTRICT block,
++ const T* BMRESTRICT buf) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+-
+ const T* pcurr = buf;
+ const T* pend = pcurr + (*pcurr >> 3);
+ ++pcurr;
+@@ -3664,7 +3807,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-bm::id_t gap_bitset_or_any(const unsigned* block, const T* buf)
++bm::id_t gap_bitset_or_any(const unsigned* BMRESTRICT block,
++ const T* BMRESTRICT buf) BMNOEXCEPT
+ {
+ bool b = !bm::gap_is_all_zero(buf) ||
+ !bm::bit_is_all_zero(block);
+@@ -3682,7 +3826,7 @@
+ @ingroup bitfunc
+ */
+ inline
+-void bit_block_set(bm::word_t* BMRESTRICT dst, bm::word_t value)
++void bit_block_set(bm::word_t* BMRESTRICT dst, bm::word_t value) BMNOEXCEPT
+ {
+ #ifdef BMVECTOPT
+ VECT_SET_BLOCK(dst, value);
+@@ -3700,7 +3844,8 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-void gap_convert_to_bitset(unsigned* dest, const T* buf)
++void gap_convert_to_bitset(unsigned* BMRESTRICT dest,
++ const T* BMRESTRICT buf) BMNOEXCEPT
+ {
+ bm::bit_block_set(dest, 0);
+ bm::gap_add_to_bitset(dest, buf);
+@@ -3721,13 +3866,12 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-unsigned* gap_convert_to_bitset_smart(unsigned* dest,
+- const T* buf,
+- id_t set_max)
++unsigned* gap_convert_to_bitset_smart(unsigned* BMRESTRICT dest,
++ const T* BMRESTRICT buf,
++ id_t set_max) BMNOEXCEPT
+ {
+ if (buf[1] == set_max - 1)
+ return (buf[0] & 1) ? FULL_BLOCK_REAL_ADDR : 0;
+-
+ bm::gap_convert_to_bitset(dest, buf);
+ return dest;
+ }
+@@ -3742,7 +3886,8 @@
+ @ingroup gapfunc
+ @internal
+ */
+-template<typename T> unsigned gap_control_sum(const T* buf)
++template<typename T>
++unsigned gap_control_sum(const T* buf) BMNOEXCEPT
+ {
+ unsigned end = *buf >> 3;
+
+@@ -3755,7 +3900,6 @@
+ ++pcurr;
+ }
+ ++pcurr; // now we are in GAP "1" again
+-
+ while (pcurr <= pend)
+ {
+ BM_ASSERT(*pcurr > *(pcurr-1));
+@@ -3773,9 +3917,8 @@
+
+ @ingroup gapfunc
+ */
+-template<class T> void gap_set_all(T* buf,
+- unsigned set_max,
+- unsigned value)
++template<class T>
++void gap_set_all(T* buf, unsigned set_max, unsigned value) BMNOEXCEPT
+ {
+ BM_ASSERT(value == 0 || value == 1);
+ *buf = (T)((*buf & 6u) + (1u << 3) + value);
+@@ -3796,8 +3939,7 @@
+ void gap_init_range_block(T* buf,
+ T from,
+ T to,
+- T value)
+- //unsigned set_max)
++ T value) BMNOEXCEPT
+ {
+ BM_ASSERT(value == 0 || value == 1);
+ const unsigned set_max = bm::bits_in_block;
+@@ -3844,7 +3986,7 @@
+
+ @ingroup gapfunc
+ */
+-template<typename T> void gap_invert(T* buf)
++template<typename T> void gap_invert(T* buf) BMNOEXCEPT
+ {
+ *buf ^= 1;
+ }
+@@ -3863,7 +4005,7 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-void set_gap_level(T* buf, int level)
++void set_gap_level(T* buf, int level) BMNOEXCEPT
+ {
+ BM_ASSERT(level >= 0);
+ BM_ASSERT(unsigned(level) < bm::gap_levels);
+@@ -3885,7 +4027,7 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-inline int gap_calc_level(unsigned len, const T* glevel_len)
++int gap_calc_level(unsigned len, const T* glevel_len) BMNOEXCEPT
+ {
+ if (len <= unsigned(glevel_len[0]-4)) return 0;
+ if (len <= unsigned(glevel_len[1]-4)) return 1;
+@@ -3906,10 +4048,11 @@
+ @ingroup gapfunc
+ */
+ template<typename T>
+-inline unsigned gap_free_elements(const T* buf, const T* glevel_len)
++inline unsigned gap_free_elements(const T* BMRESTRICT buf,
++ const T* BMRESTRICT glevel_len) BMNOEXCEPT
+ {
+- unsigned len = gap_length(buf);
+- unsigned capacity = gap_capacity(buf, glevel_len);
++ unsigned len = bm::gap_length(buf);
++ unsigned capacity = bm::gap_capacity(buf, glevel_len);
+ return capacity - len;
+ }
+
+@@ -3923,7 +4066,7 @@
+ @ingroup bitfunc
+ */
+ template<typename T>
+-int bitcmp(const T* buf1, const T* buf2, unsigned len)
++int bitcmp(const T* buf1, const T* buf2, unsigned len) BMNOEXCEPT
+ {
+ BM_ASSERT(len);
+ const T* pend1 = buf1 + len;
+@@ -3948,8 +4091,9 @@
+ @ingroup bitfunc
+ */
+ inline
+-bool bit_find_first_diff(const bm::word_t* blk1, const bm::word_t* blk2,
+- unsigned* pos)
++bool bit_find_first_diff(const bm::word_t* BMRESTRICT blk1,
++ const bm::word_t* BMRESTRICT blk2,
++ unsigned* BMRESTRICT pos) BMNOEXCEPT
+ {
+ BM_ASSERT(blk1 && blk2 && pos);
+ #ifdef VECT_BIT_FIND_DIFF
+@@ -3969,7 +4113,7 @@
+ if (diff)
+ {
+ unsigned idx = bm::count_trailing_zeros_u64(diff);
+- *pos = unsigned(idx + (i * 8u * sizeof(bm::wordop_t)));
++ *pos = unsigned(idx + (i * 8u * unsigned(sizeof(bm::wordop_t))));
+ return true;
+ }
+ } // for
+@@ -3997,7 +4141,7 @@
+ \brief Converts bit block to GAP.
+ \param dest - Destinatio GAP buffer.
+ \param block - Source bitblock buffer.
+- \param dest_len - length of the dest. buffer.
++ \param dest_len - length of the destination buffer.
+ \return New length of GAP block or 0 if conversion failed
+ (insufficicent space).
+
+@@ -4006,7 +4150,7 @@
+ inline
+ unsigned bit_block_to_gap(gap_word_t* BMRESTRICT dest,
+ const unsigned* BMRESTRICT block,
+- unsigned dest_len)
++ unsigned dest_len) BMNOEXCEPT
+ {
+ const unsigned* BMRESTRICT block_end = block + bm::set_block_size;
+ gap_word_t* BMRESTRICT pcurr = dest;
+@@ -4083,10 +4227,15 @@
+ }
+ #endif
+
++/**
++ Convert bit block to GAP representation
++ @internal
++ @ingroup bitfunc
++*/
+ inline
+ unsigned bit_to_gap(gap_word_t* BMRESTRICT dest,
+ const unsigned* BMRESTRICT block,
+- unsigned dest_len)
++ unsigned dest_len) BMNOEXCEPT
+ {
+ #if defined(VECT_BIT_TO_GAP)
+ return VECT_BIT_TO_GAP(dest, block, dest_len);
+@@ -4159,10 +4308,10 @@
+ D gap_convert_to_arr(D* BMRESTRICT dest,
+ const T* BMRESTRICT buf,
+ unsigned dest_len,
+- bool invert = false)
++ bool invert = false) BMNOEXCEPT
+ {
+- BMREGISTER const T* BMRESTRICT pcurr = buf;
+- BMREGISTER const T* pend = pcurr + (*pcurr >> 3);
++ const T* BMRESTRICT pcurr = buf;
++ const T* pend = pcurr + (*pcurr >> 3);
+
+ D* BMRESTRICT dest_curr = dest;
+ ++pcurr;
+@@ -4215,7 +4364,7 @@
+ @ingroup bitfunc
+ */
+ inline
+-bm::id_t bit_block_count(const bm::word_t* block)
++bm::id_t bit_block_count(const bm::word_t* block) BMNOEXCEPT
+ {
+ const bm::word_t* block_end = block + bm::set_block_size;
+ bm::id_t count = 0;
+@@ -4278,8 +4427,12 @@
+ @ingroup bitfunc
+ */
+ inline
+-bm::id_t bit_block_count(const bm::word_t* const block, bm::id64_t digest)
++bm::id_t bit_block_count(const bm::word_t* const block,
++ bm::id64_t digest) BMNOEXCEPT
+ {
++#ifdef VECT_BIT_COUNT_DIGEST
++ return VECT_BIT_COUNT_DIGEST(block, digest);
++#else
+ bm::id_t count = 0;
+ bm::id64_t d = digest;
+ while (d)
+@@ -4304,6 +4457,7 @@
+ d = bm::bmi_bslr_u64(d); // d &= d - 1;
+ } // while
+ return count;
++#endif
+ }
+
+
+@@ -4318,7 +4472,7 @@
+ */
+ inline
+ bm::id_t bit_block_calc_count(const bm::word_t* block,
+- const bm::word_t* block_end)
++ const bm::word_t* block_end) BMNOEXCEPT
+ {
+ bm::id_t count = 0;
+ bm::word_t acc = *block++;
+@@ -4352,7 +4506,7 @@
+ @ingroup bitfunc
+ */
+ inline
+-bm::id_t bit_count_change(bm::word_t w)
++bm::id_t bit_count_change(bm::word_t w) BMNOEXCEPT
+ {
+ unsigned count = 1;
+ w ^= (w >> 1);
+@@ -4368,7 +4522,7 @@
+ @internal
+ */
+ inline
+-unsigned bit_block_change32(const bm::word_t* block, unsigned size)
++unsigned bit_block_change32(const bm::word_t* block, unsigned size) BMNOEXCEPT
+ {
+ unsigned gap_count = 1;
+
+@@ -4416,7 +4570,8 @@
+ @internal
+ */
+ inline
+-void bit_block_change_bc(const bm::word_t* block, unsigned* gc, unsigned* bc)
++void bit_block_change_bc(const bm::word_t* BMRESTRICT block,
++ unsigned* BMRESTRICT gc, unsigned* BMRESTRICT bc) BMNOEXCEPT
+ {
+ BM_ASSERT(gc);
+ BM_ASSERT(bc);
+@@ -4441,7 +4596,7 @@
+ @ingroup bitfunc
+ */
+ inline
+-unsigned bit_block_calc_change(const bm::word_t* block)
++unsigned bit_block_calc_change(const bm::word_t* block) BMNOEXCEPT
+ {
+ #if defined(VECT_BLOCK_CHANGE)
+ return VECT_BLOCK_CHANGE(block, bm::set_block_size);
+@@ -4450,8 +4605,80 @@
+ #endif
+ }
+
++/*!
++ Check if all bits are 1 in [left, right] range
++ @ingroup bitfunc
++*/
++inline
++bool bit_block_is_all_one_range(const bm::word_t* const BMRESTRICT block,
++ bm::word_t left,
++ bm::word_t right) BMNOEXCEPT
++{
++ BM_ASSERT(left <= right);
++ BM_ASSERT(right <= bm::gap_max_bits-1);
+
++ unsigned nword, nbit, bitcount, temp;
++ nbit = left & bm::set_word_mask;
++ const bm::word_t* word =
++ block + (nword = unsigned(left >> bm::set_word_shift));
++ if (left == right) // special case (only 1 bit to check)
++ return (*word >> nbit) & 1u;
+
++ if (nbit) // starting position is not aligned
++ {
++ unsigned right_margin = nbit + right - left;
++ if (right_margin < 32)
++ {
++ unsigned mask =
++ block_set_table<true>::_right[nbit] &
++ block_set_table<true>::_left[right_margin];
++ return mask == (*word & mask);
++ }
++ temp = *word & block_set_table<true>::_right[nbit];
++ if (temp != block_set_table<true>::_right[nbit])
++ return false;
++ bitcount = (right - left + 1u) - (32 - nbit);
++ ++word;
++ }
++ else
++ {
++ bitcount = right - left + 1u;
++ }
++
++ // now when we are word aligned, we can scan the bit-stream
++ const bm::id64_t maskFF64 = ~0ull;
++ const bm::word_t maskFF = ~0u;
++ // loop unrolled to evaluate 4 words at a time
++ // SIMD showed no advantage, unless evaluate sub-wave intervals
++ //
++ for ( ;bitcount >= 128; bitcount-=128, word+=4)
++ {
++ bm::id64_t w64_0 = bm::id64_t(word[0]) + (bm::id64_t(word[1]) << 32);
++ bm::id64_t w64_1 = bm::id64_t(word[2]) + (bm::id64_t(word[3]) << 32);
++ if ((w64_0 ^ maskFF64) | (w64_1 ^ maskFF64))
++ return false;
++ } // for
++
++ for ( ;bitcount >= 32; bitcount-=32, ++word)
++ {
++ if (*word != maskFF)
++ return false;
++ } // for
++ BM_ASSERT(bitcount < 32);
++
++ if (bitcount) // we have a tail to count
++ {
++ temp = *word & block_set_table<true>::_left[bitcount-1];
++ if (temp != block_set_table<true>::_left[bitcount-1])
++ return false;
++ }
++
++ return true;
++}
++
++
++
++
+ /*!
+ Function calculates number of 1 bits in the given array of words in
+ the range between left anf right bits (borders included)
+@@ -4462,7 +4689,7 @@
+ inline
+ bm::id_t bit_block_calc_count_range(const bm::word_t* block,
+ bm::word_t left,
+- bm::word_t right)
++ bm::word_t right) BMNOEXCEPT
+ {
+ BM_ASSERT(left <= right);
+ BM_ASSERT(right <= bm::gap_max_bits-1);
+@@ -4530,7 +4757,7 @@
+ */
+ inline
+ bm::id_t bit_block_calc_count_to(const bm::word_t* block,
+- bm::word_t right)
++ bm::word_t right) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ if (!right) // special case, first bit check
+@@ -4586,7 +4813,7 @@
+ @ingroup bitfunc
+ */
+ inline
+-void bit_block_rotate_left_1(bm::word_t* block)
++void bit_block_rotate_left_1(bm::word_t* block) BMNOEXCEPT
+ {
+ bm::word_t co_flag = (block[0] >> 31) & 1; // carry over bit
+ for (unsigned i = 0; i < bm::set_block_size-1; ++i)
+@@ -4602,7 +4829,7 @@
+ @ingroup bitfunc
+ */
+ inline
+-void bit_block_rotate_left_1_unr(bm::word_t* block)
++void bit_block_rotate_left_1_unr(bm::word_t* block) BMNOEXCEPT
+ {
+ bm::word_t co_flag = (block[0] >> 31) & 1; // carry over bit
+ const unsigned unroll_factor = 4;
+@@ -4638,7 +4865,8 @@
+ @ingroup bitfunc
+ */
+ inline
+-bm::word_t bit_block_insert(bm::word_t* block, unsigned bitpos, bool value)
++bm::word_t bit_block_insert(bm::word_t* BMRESTRICT block,
++ unsigned bitpos, bool value) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(bitpos < 65536);
+@@ -4686,8 +4914,9 @@
+ @ingroup bitfunc
+ */
+ inline
+-bool bit_block_shift_r1(bm::word_t* block,
+- bm::word_t* empty_acc, bm::word_t co_flag)
++bool bit_block_shift_r1(bm::word_t* BMRESTRICT block,
++ bm::word_t* BMRESTRICT empty_acc,
++ bm::word_t co_flag) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(empty_acc);
+@@ -4715,8 +4944,9 @@
+ @ingroup bitfunc
+ */
+ inline
+-bool bit_block_shift_r1_unr(bm::word_t* block,
+- bm::word_t* empty_acc, bm::word_t co_flag)
++bool bit_block_shift_r1_unr(bm::word_t* BMRESTRICT block,
++ bm::word_t* BMRESTRICT empty_acc,
++ bm::word_t co_flag) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(empty_acc);
+@@ -4740,7 +4970,7 @@
+ */
+ inline
+ bool bit_block_shift_l1(bm::word_t* block,
+- bm::word_t* empty_acc, bm::word_t co_flag)
++ bm::word_t* empty_acc, bm::word_t co_flag) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(empty_acc);
+@@ -4770,7 +5000,8 @@
+ */
+ inline
+ bool bit_block_shift_l1_unr(bm::word_t* block,
+- bm::word_t* empty_acc, bm::word_t co_flag)
++ bm::word_t* empty_acc,
++ bm::word_t co_flag) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(empty_acc);
+@@ -4791,7 +5022,9 @@
+ @ingroup bitfunc
+ */
+ inline
+-void bit_block_erase(bm::word_t* block, unsigned bitpos, bool carry_over)
++void bit_block_erase(bm::word_t* block,
++ unsigned bitpos,
++ bool carry_over) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(bitpos < 65536);
+@@ -4799,7 +5032,7 @@
+ if (!bitpos)
+ {
+ bm::word_t acc;
+- bit_block_shift_l1_unr(block, &acc, carry_over);
++ bm::bit_block_shift_l1_unr(block, &acc, carry_over);
+ return;
+ }
+
+@@ -4848,7 +5081,7 @@
+ bool bit_block_shift_r1_and(bm::word_t* BMRESTRICT block,
+ bm::word_t co_flag,
+ const bm::word_t* BMRESTRICT mask_block,
+- bm::id64_t* BMRESTRICT digest)
++ bm::id64_t* BMRESTRICT digest) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(mask_block);
+@@ -4896,7 +5129,7 @@
+
+ block[d_base] = co_flag & mask_block[d_base];
+ if (block[d_base])
+- d |= dmask; // update d
++ d |= dmask; // update digest
+ co_flag = 0;
+ }
+ }
+@@ -4920,7 +5153,7 @@
+ bool bit_block_shift_r1_and_unr(bm::word_t* BMRESTRICT block,
+ bm::word_t co_flag,
+ const bm::word_t* BMRESTRICT mask_block,
+- bm::id64_t* BMRESTRICT digest)
++ bm::id64_t* BMRESTRICT digest) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(mask_block);
+@@ -4942,9 +5175,9 @@
+ @ingroup bitfunc
+ */
+ inline
+-bm::id_t bit_block_any_range(const bm::word_t* block,
++bm::id_t bit_block_any_range(const bm::word_t* const BMRESTRICT block,
+ bm::word_t left,
+- bm::word_t right)
++ bm::word_t right) BMNOEXCEPT
+ {
+ BM_ASSERT(left <= right);
+
+@@ -4969,8 +5202,7 @@
+ unsigned mask =
+ block_set_table<true>::_right[nbit] &
+ block_set_table<true>::_left[right_margin];
+- acc = *word & mask;
+- return acc;
++ return *word & mask;
+ }
+ else
+ {
+@@ -4982,22 +5214,26 @@
+ ++word;
+ }
+
+- // now when we are word aligned, we can check bits the usual way
+- for ( ;bitcount >= 32; bitcount -= 32)
++ // loop unrolled to evaluate 4 words at a time
++ // SIMD showed no advantage, unless evaluate sub-wave intervals
++ //
++ for ( ;bitcount >= 128; bitcount-=128, word+=4)
+ {
+- acc = *word++;
+- if (acc)
++ acc = word[0] | word[1] | word[2] | word[3];
++ if (acc)
+ return acc;
+- }
++ } // for
+
+- if (bitcount) // we have a tail to count
++ acc = 0;
++ for ( ;bitcount >= 32; bitcount -= 32)
+ {
+- acc = (*word) & block_set_table<true>::_left[bitcount-1];
+- if (acc)
+- return acc;
+- }
++ acc |= *word++;
++ } // for
+
+- return 0;
++ if (bitcount) // we have a tail to count
++ acc |= (*word) & block_set_table<true>::_left[bitcount-1];
++
++ return acc;
+ }
+
+ // ----------------------------------------------------------------------
+@@ -5005,7 +5241,8 @@
+ /*! Function inverts block of bits
+ @ingroup bitfunc
+ */
+-template<typename T> void bit_invert(T* start)
++template<typename T>
++void bit_invert(T* start) BMNOEXCEPT
+ {
+ BM_ASSERT(IS_VALID_ADDR((bm::word_t*)start));
+ #ifdef BMVECTOPT
+@@ -5029,7 +5266,7 @@
+ @ingroup bitfunc
+ */
+ inline
+-bool is_bits_one(const bm::wordop_t* start)
++bool is_bits_one(const bm::wordop_t* start) BMNOEXCEPT
+ {
+ #if defined(BMSSE42OPT) || defined(BMAVX2OPT)
+ return VECT_IS_ONE_BLOCK(start);
+@@ -5050,35 +5287,311 @@
+
+ // ----------------------------------------------------------------------
+
+-// GAP blocks manipulation functions:
++/*! @brief Returns "true" if all bits are 1 in the block [left, right]
++ Function check for block varieties
++ @internal
++*/
++inline
++bool block_is_all_one_range(const bm::word_t* const BMRESTRICT block,
++ unsigned left, unsigned right) BMNOEXCEPT
++{
++ BM_ASSERT(left <= right);
++ BM_ASSERT(right < bm::gap_max_bits);
++ if (block)
++ {
++ if (BM_IS_GAP(block))
++ return bm::gap_is_all_one_range(BMGAP_PTR(block), left, right);
++ if (block == FULL_BLOCK_FAKE_ADDR)
++ return true;
++ return bm::bit_block_is_all_one_range(block, left, right);
++ }
++ return false;
++}
+
+-/*! \brief GAP and functor */
+-BMFORCEINLINE unsigned and_op(unsigned v1, unsigned v2)
++/*! @brief Returns "true" if all bits are 1 in the block [left, right]
++ and border bits are 0
++ @internal
++*/
++inline
++bool block_is_interval(const bm::word_t* const BMRESTRICT block,
++ unsigned left, unsigned right) BMNOEXCEPT
+ {
+- return v1 & v2;
++ BM_ASSERT(left <= right);
++ BM_ASSERT(right < bm::gap_max_bits-1);
++
++ if (block)
++ {
++ bool is_left, is_right, all_one;
++ if (BM_IS_GAP(block))
++ {
++ const bm::gap_word_t* gap = BMGAP_PTR(block);
++ all_one = bm::gap_is_interval(gap, left, right);
++ return all_one;
++ }
++ else // bit-block
++ {
++ if (block == FULL_BLOCK_FAKE_ADDR)
++ return false;
++ unsigned nword = ((left-1) >> bm::set_word_shift);
++ is_left = block[nword] & (1u << ((left-1) & bm::set_word_mask));
++ if (is_left == false)
++ {
++ nword = ((right + 1) >> bm::set_word_shift);
++ is_right = block[nword] & (1u << ((right + 1) & bm::set_word_mask));
++ if (is_right == false)
++ {
++ all_one = bm::bit_block_is_all_one_range(block, left, right);
++ return all_one;
++ }
++ }
++ }
++ }
++
++ return false;
+ }
+
++// ----------------------------------------------------------------------
+
+-/*! \brief GAP xor functor */
+-BMFORCEINLINE unsigned xor_op(unsigned v1, unsigned v2)
++/**
++ \brief Searches for the last 1 bit in the 111 interval of a BIT block
++ \param block - BIT buffer
++ \param nbit - bit index to start checking from
++ \param pos - [out] found value
++
++ \return false if not found
++ @ingroup bitfunc
++*/
++inline
++bool bit_block_find_interval_end(const bm::word_t* BMRESTRICT block,
++ unsigned nbit, unsigned* BMRESTRICT pos) BMNOEXCEPT
+ {
+- return v1 ^ v2;
++ BM_ASSERT(block);
++ BM_ASSERT(pos);
++
++ unsigned nword = unsigned(nbit >> bm::set_word_shift);
++ unsigned bit_pos = (nbit & bm::set_word_mask);
++ bm::word_t w = block[nword];
++ w &= (1u << bit_pos);
++ if (!w)
++ return false;
++
++ if (nbit == bm::gap_max_bits-1)
++ {
++ *pos = bm::gap_max_bits-1;
++ return true;
++ }
++ *pos = nbit;
++
++ ++nbit;
++ nword = unsigned(nbit >> bm::set_word_shift);
++ bit_pos = (nbit & bm::set_word_mask);
++
++ w = (~block[nword]) >> bit_pos;
++ w <<= bit_pos; // clear the trailing bits
++ if (w)
++ {
++ bit_pos = bm::bit_scan_forward32(w); // trailing zeros
++ *pos = unsigned(bit_pos + (nword * 8u * unsigned(sizeof(bm::word_t)))-1);
++ return true;
++ }
++
++ for (++nword; nword < bm::set_block_size; ++nword)
++ {
++ w = ~block[nword];
++ if (w)
++ {
++ bit_pos = bm::bit_scan_forward32(w); // trailing zeros
++ *pos = unsigned(bit_pos + (nword * 8u * unsigned(sizeof(bm::word_t)))-1);
++ return true;
++ }
++ } // for nword
++
++ // 0 not found, all block is 1s...
++ *pos = bm::gap_max_bits-1;
++ return true;
+ }
+
+
+-/*! \brief GAP or functor */
+-BMFORCEINLINE unsigned or_op(unsigned v1, unsigned v2)
++/*! @brief Find end of the current 111 interval
++ @return search result code 0 - not found, 1 found, 2 - found at the end
++ @internal
++*/
++inline
++unsigned block_find_interval_end(const bm::word_t* BMRESTRICT block,
++ unsigned nbit_from,
++ unsigned* BMRESTRICT found_nbit) BMNOEXCEPT
+ {
+- return v1 | v2;
++ BM_ASSERT(block && found_nbit);
++ BM_ASSERT(nbit_from < bm::gap_max_bits);
++
++ bool b;
++ if (BM_IS_GAP(block))
++ {
++ const bm::gap_word_t* gap = BMGAP_PTR(block);
++ b = bm::gap_find_interval_end(gap, nbit_from, found_nbit);
++ if (b && *found_nbit == bm::gap_max_bits-1)
++ return 2; // end of block, keep searching
++ }
++ else // bit-block
++ {
++ if (IS_FULL_BLOCK(block))
++ {
++ *found_nbit = bm::gap_max_bits-1;
++ return 2;
++ }
++ b = bm::bit_block_find_interval_end(block, nbit_from, found_nbit);
++ if (b && *found_nbit == bm::gap_max_bits-1)
++ return 2; // end of block, keep searching
++ }
++ return b;
+ }
+
+-/*! \brief GAP or functor */
+-BMFORCEINLINE unsigned sub_op(unsigned v1, unsigned v2)
++// ----------------------------------------------------------------------
++
++/**
++ \brief Searches for the first 1 bit in the 111 interval of a BIT block
++ \param block - BIT buffer
++ \param nbit - bit index to start checking from
++ \param pos - [out] found value
++
++ \return false if not found
++ @ingroup bitfunc
++*/
++inline
++bool bit_block_find_interval_start(const bm::word_t* BMRESTRICT block,
++ unsigned nbit, unsigned* BMRESTRICT pos) BMNOEXCEPT
+ {
+- return v1 & ~v2;
++ BM_ASSERT(block);
++ BM_ASSERT(pos);
++
++ unsigned nword = unsigned(nbit >> bm::set_word_shift);
++ unsigned bit_pos = (nbit & bm::set_word_mask);
++ bm::word_t w = block[nword];
++ w &= (1u << bit_pos);
++ if (!w)
++ return false;
++
++ if (nbit == 0)
++ {
++ *pos = 0;
++ return true;
++ }
++ *pos = nbit;
++
++ --nbit;
++ nword = unsigned(nbit >> bm::set_word_shift);
++ bit_pos = (nbit & bm::set_word_mask);
++
++ w = (~block[nword]) & block_set_table<true>::_left[bit_pos];
++ if (w)
++ {
++ bit_pos = bm::bit_scan_reverse32(w);
++ *pos = unsigned(bit_pos + (nword * 8u * unsigned(sizeof(bm::word_t)))+1);
++ return true;
++ }
++
++ if (nword)
++ {
++ for (--nword; true; --nword)
++ {
++ w = ~block[nword];
++ if (w)
++ {
++ bit_pos = bm::bit_scan_reverse32(w); // trailing zeros
++ *pos = unsigned(bit_pos + (nword * 8u * unsigned(sizeof(bm::word_t)))+1);
++ return true;
++ }
++ if (!nword)
++ break;
++ } // for nword
++ }
++
++ // 0 not found, all block is 1s...
++ *pos = 0;
++ return true;
+ }
+
+
++/*! @brief Find start of the current 111 interval
++ @return search result code 0 - not found, 1 found, 2 - found at the start
++ @internal
++*/
++inline
++unsigned block_find_interval_start(const bm::word_t* BMRESTRICT block,
++ unsigned nbit_from,
++ unsigned* BMRESTRICT found_nbit) BMNOEXCEPT
++{
++ BM_ASSERT(block && found_nbit);
++ BM_ASSERT(nbit_from < bm::gap_max_bits);
++ bool b;
++ if (BM_IS_GAP(block))
++ {
++ const bm::gap_word_t* gap = BMGAP_PTR(block);
++ b = bm::gap_find_interval_start(gap, nbit_from, found_nbit);
++ if (b && *found_nbit == 0)
++ return 2; // start of block, keep searching
++ }
++ else // bit-block
++ {
++ if (IS_FULL_BLOCK(block))
++ {
++ *found_nbit = 0;
++ return 2;
++ }
++ b = bm::bit_block_find_interval_start(block, nbit_from, found_nbit);
++ if (b && *found_nbit == 0)
++ return 2; // start of block, keep searching
++ }
++ return b;
++}
++
++// ----------------------------------------------------------------------
++
++/*! @brief Returns "true" if one bit is set in the block [left, right]
++ Function check for block varieties
++ @internal
++*/
++inline
++bool block_any_range(const bm::word_t* const BMRESTRICT block,
++ unsigned left, unsigned right) BMNOEXCEPT
++{
++ BM_ASSERT(left <= right);
++ BM_ASSERT(right < bm::gap_max_bits);
++ if (!block)
++ return false;
++ if (BM_IS_GAP(block))
++ return bm::gap_any_range(BMGAP_PTR(block), left, right);
++ if (IS_FULL_BLOCK(block))
++ return true;
++ return bm::bit_block_any_range(block, left, right);
++}
++
++// ----------------------------------------------------------------------
++
++/*! @brief Returns "true" if one bit is set in the block
++ Function check for block varieties
++ @internal
++*/
++inline
++bool block_any(const bm::word_t* const BMRESTRICT block) BMNOEXCEPT
++{
++ if (!block)
++ return false;
++ if (IS_FULL_BLOCK(block))
++ return true;
++ bool all_zero = (BM_IS_GAP(block)) ?
++ bm::gap_is_all_zero(BMGAP_PTR(block))
++ : bm::bit_is_all_zero(block);
++ return !all_zero;
++}
++
++
++
++// ----------------------------------------------------------------------
++
++// GAP blocks manipulation functions:
++
++
+ /*!
+ \brief GAP AND operation.
+
+@@ -5095,13 +5608,14 @@
+
+ @ingroup gapfunc
+ */
+-BMFORCEINLINE
++inline
+ gap_word_t* gap_operation_and(const gap_word_t* BMRESTRICT vect1,
+ const gap_word_t* BMRESTRICT vect2,
+ gap_word_t* BMRESTRICT tmp_buf,
+- unsigned& dsize)
++ unsigned& dsize) BMNOEXCEPT
+ {
+- bm::gap_buff_op(tmp_buf, vect1, 0, vect2, 0, bm::and_op, dsize);
++ bm::gap_buff_op<bm::gap_word_t, bm::and_func>(
++ tmp_buf, vect1, 0, vect2, 0, dsize);
+ return tmp_buf;
+ }
+
+@@ -5119,11 +5633,11 @@
+
+ @ingroup gapfunc
+ */
+-BMFORCEINLINE
++inline
+ unsigned gap_operation_any_and(const gap_word_t* BMRESTRICT vect1,
+- const gap_word_t* BMRESTRICT vect2)
++ const gap_word_t* BMRESTRICT vect2) BMNOEXCEPT
+ {
+- return gap_buff_any_op(vect1, 0, vect2, 0, and_op);
++ return gap_buff_any_op<bm::gap_word_t, bm::and_func>(vect1, 0, vect2, 0);
+ }
+
+
+@@ -5138,9 +5652,9 @@
+ */
+ inline
+ unsigned gap_count_and(const gap_word_t* BMRESTRICT vect1,
+- const gap_word_t* BMRESTRICT vect2)
++ const gap_word_t* BMRESTRICT vect2) BMNOEXCEPT
+ {
+- return gap_buff_count_op(vect1, vect2, and_op);
++ return bm::gap_buff_count_op<bm::gap_word_t, bm::and_func>(vect1, vect2);
+ }
+
+
+@@ -5165,9 +5679,10 @@
+ gap_word_t* gap_operation_xor(const gap_word_t* BMRESTRICT vect1,
+ const gap_word_t* BMRESTRICT vect2,
+ gap_word_t* BMRESTRICT tmp_buf,
+- unsigned& dsize)
++ unsigned& dsize) BMNOEXCEPT
+ {
+- gap_buff_op(tmp_buf, vect1, 0, vect2, 0, bm::xor_op, dsize);
++ bm::gap_buff_op<bm::gap_word_t, bm::xor_func>(
++ tmp_buf, vect1, 0, vect2, 0, dsize);
+ return tmp_buf;
+ }
+
+@@ -5178,9 +5693,10 @@
+ bool gap_operation_dry_xor(const gap_word_t* BMRESTRICT vect1,
+ const gap_word_t* BMRESTRICT vect2,
+ unsigned& dsize,
+- unsigned limit)
++ unsigned limit) BMNOEXCEPT
+ {
+- return gap_buff_dry_op(vect1, vect2, bm::xor_op, dsize, limit);
++ return
++ bm::gap_buff_dry_op<bm::gap_word_t, bm::xor_func>(vect1, vect2, dsize, limit);
+ }
+
+
+@@ -5200,9 +5716,9 @@
+ */
+ BMFORCEINLINE
+ unsigned gap_operation_any_xor(const gap_word_t* BMRESTRICT vect1,
+- const gap_word_t* BMRESTRICT vect2)
++ const gap_word_t* BMRESTRICT vect2) BMNOEXCEPT
+ {
+- return gap_buff_any_op(vect1, 0, vect2, 0, bm::xor_op);
++ return gap_buff_any_op<bm::gap_word_t, bm::xor_func>(vect1, 0, vect2, 0);
+ }
+
+ /*!
+@@ -5214,11 +5730,11 @@
+
+ @ingroup gapfunc
+ */
+-BMFORCEINLINE
++BMFORCEINLINE
+ unsigned gap_count_xor(const gap_word_t* BMRESTRICT vect1,
+- const gap_word_t* BMRESTRICT vect2)
++ const gap_word_t* BMRESTRICT vect2) BMNOEXCEPT
+ {
+- return gap_buff_count_op(vect1, vect2, bm::xor_op);
++ return bm::gap_buff_count_op<bm::gap_word_t, bm::xor_func>(vect1, vect2);
+ }
+
+
+@@ -5243,10 +5759,10 @@
+ gap_word_t* gap_operation_or(const gap_word_t* BMRESTRICT vect1,
+ const gap_word_t* BMRESTRICT vect2,
+ gap_word_t* BMRESTRICT tmp_buf,
+- unsigned& dsize)
++ unsigned& dsize) BMNOEXCEPT
+ {
+- gap_buff_op(tmp_buf, vect1, 1, vect2, 1, bm::and_op, dsize);
+- gap_invert(tmp_buf);
++ bm::gap_buff_op<bm::gap_word_t, bm::and_func>(tmp_buf, vect1, 1, vect2, 1, dsize);
++ bm::gap_invert(tmp_buf);
+ return tmp_buf;
+ }
+
+@@ -5261,9 +5777,9 @@
+ */
+ BMFORCEINLINE
+ unsigned gap_count_or(const gap_word_t* BMRESTRICT vect1,
+- const gap_word_t* BMRESTRICT vect2)
++ const gap_word_t* BMRESTRICT vect2) BMNOEXCEPT
+ {
+- return gap_buff_count_op(vect1, vect2, bm::or_op);
++ return gap_buff_count_op<bm::gap_word_t, bm::or_func>(vect1, vect2);
+ }
+
+
+@@ -5285,12 +5801,14 @@
+
+ @ingroup gapfunc
+ */
+-inline gap_word_t* gap_operation_sub(const gap_word_t* BMRESTRICT vect1,
+- const gap_word_t* BMRESTRICT vect2,
+- gap_word_t* BMRESTRICT tmp_buf,
+- unsigned& dsize)
++inline
++gap_word_t* gap_operation_sub(const gap_word_t* BMRESTRICT vect1,
++ const gap_word_t* BMRESTRICT vect2,
++ gap_word_t* BMRESTRICT tmp_buf,
++ unsigned& dsize) BMNOEXCEPT
+ {
+- gap_buff_op(tmp_buf, vect1, 0, vect2, 1, and_op, dsize);
++ bm::gap_buff_op<bm::gap_word_t, bm::and_func>( // no bug here
++ tmp_buf, vect1, 0, vect2, 1, dsize);
+ return tmp_buf;
+ }
+
+@@ -5309,11 +5827,13 @@
+
+ @ingroup gapfunc
+ */
+-BMFORCEINLINE
++inline
+ unsigned gap_operation_any_sub(const gap_word_t* BMRESTRICT vect1,
+- const gap_word_t* BMRESTRICT vect2)
++ const gap_word_t* BMRESTRICT vect2) BMNOEXCEPT
+ {
+- return gap_buff_any_op(vect1, 0, vect2, 1, bm::and_op);
++ return
++ bm::gap_buff_any_op<bm::gap_word_t, bm::and_func>( // no bug here
++ vect1, 0, vect2, 1);
+ }
+
+
+@@ -5328,9 +5848,9 @@
+ */
+ BMFORCEINLINE
+ unsigned gap_count_sub(const gap_word_t* BMRESTRICT vect1,
+- const gap_word_t* BMRESTRICT vect2)
++ const gap_word_t* BMRESTRICT vect2) BMNOEXCEPT
+ {
+- return gap_buff_count_op(vect1, vect2, bm::sub_op);
++ return bm::gap_buff_count_op<bm::gap_word_t, bm::sub_func>(vect1, vect2);
+ }
+
+
+@@ -5348,7 +5868,8 @@
+ @ingroup bitfunc
+ */
+ inline
+-void bit_block_copy(bm::word_t* BMRESTRICT dst, const bm::word_t* BMRESTRICT src)
++void bit_block_copy(bm::word_t* BMRESTRICT dst,
++ const bm::word_t* BMRESTRICT src) BMNOEXCEPT
+ {
+ #ifdef BMVECTOPT
+ VECT_COPY_BLOCK(dst, src);
+@@ -5366,7 +5887,8 @@
+ @ingroup bitfunc
+ */
+ inline
+-void bit_block_stream(bm::word_t* BMRESTRICT dst, const bm::word_t* BMRESTRICT src)
++void bit_block_stream(bm::word_t* BMRESTRICT dst,
++ const bm::word_t* BMRESTRICT src) BMNOEXCEPT
+ {
+ #ifdef VECT_STREAM_BLOCK
+ VECT_STREAM_BLOCK(dst, src);
+@@ -5388,7 +5910,8 @@
+ @ingroup bitfunc
+ */
+ inline
+-bm::id64_t bit_block_and(bm::word_t* BMRESTRICT dst, const bm::word_t* BMRESTRICT src)
++bm::id64_t bit_block_and(bm::word_t* BMRESTRICT dst,
++ const bm::word_t* BMRESTRICT src) BMNOEXCEPT
+ {
+ BM_ASSERT(dst);
+ BM_ASSERT(src);
+@@ -5428,7 +5951,7 @@
+ inline
+ bm::id64_t bit_block_and(bm::word_t* BMRESTRICT dst,
+ const bm::word_t* BMRESTRICT src,
+- bm::id64_t digest)
++ bm::id64_t digest) BMNOEXCEPT
+ {
+ BM_ASSERT(dst);
+ BM_ASSERT(src);
+@@ -5486,7 +6009,7 @@
+ const bm::word_t* BMRESTRICT src1,
+ const bm::word_t* BMRESTRICT src2,
+ const bm::word_t* BMRESTRICT src3,
+- bm::id64_t digest)
++ bm::id64_t digest) BMNOEXCEPT
+ {
+ BM_ASSERT(dst);
+ BM_ASSERT(src0 && src1 && src2 && src3);
+@@ -5551,7 +6074,7 @@
+ bm::id64_t bit_block_and_2way(bm::word_t* BMRESTRICT dst,
+ const bm::word_t* BMRESTRICT src1,
+ const bm::word_t* BMRESTRICT src2,
+- bm::id64_t digest)
++ bm::id64_t digest) BMNOEXCEPT
+ {
+ BM_ASSERT(dst);
+ BM_ASSERT(src1 && src2);
+@@ -5612,7 +6135,7 @@
+ */
+ inline
+ unsigned bit_block_and_count(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ unsigned count;
+ const bm::word_t* src1_end = src1 + bm::set_block_size;
+@@ -5661,7 +6184,7 @@
+ */
+ inline
+ unsigned bit_block_and_any(const bm::word_t* src1,
+- const bm::word_t* src2)
++ const bm::word_t* src2) BMNOEXCEPT
+ {
+ unsigned count = 0;
+ const bm::word_t* src1_end = src1 + bm::set_block_size;
+@@ -5691,7 +6214,7 @@
+ */
+ inline
+ unsigned bit_block_xor_count(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ unsigned count;
+ const bm::word_t* BMRESTRICT src1_end = src1 + bm::set_block_size;
+@@ -5740,7 +6263,7 @@
+ */
+ inline
+ unsigned bit_block_xor_any(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ unsigned count = 0;
+ const bm::word_t* BMRESTRICT src1_end = src1 + bm::set_block_size;
+@@ -5767,7 +6290,7 @@
+ */
+ inline
+ unsigned bit_block_sub_count(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ unsigned count;
+ const bm::word_t* BMRESTRICT src1_end = src1 + bm::set_block_size;
+@@ -5815,7 +6338,7 @@
+ */
+ inline
+ unsigned bit_block_sub_any(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ unsigned count = 0;
+ const bm::word_t* BMRESTRICT src1_end = src1 + bm::set_block_size;
+@@ -5844,7 +6367,7 @@
+ */
+ inline
+ unsigned bit_block_or_count(const bm::word_t* src1,
+- const bm::word_t* src2)
++ const bm::word_t* src2) BMNOEXCEPT
+ {
+ unsigned count;
+ const bm::word_t* src1_end = src1 + bm::set_block_size;
+@@ -5892,7 +6415,7 @@
+ */
+ inline
+ unsigned bit_block_or_any(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ unsigned count = 0;
+ const bm::word_t* BMRESTRICT src1_end = src1 + bm::set_block_size;
+@@ -5924,7 +6447,7 @@
+ @ingroup bitfunc
+ */
+ inline bm::word_t* bit_operation_and(bm::word_t* BMRESTRICT dst,
+- const bm::word_t* BMRESTRICT src)
++ const bm::word_t* BMRESTRICT src) BMNOEXCEPT
+ {
+ BM_ASSERT(dst || src);
+
+@@ -5988,7 +6511,7 @@
+ */
+ inline
+ bm::id_t bit_operation_and_count(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ if (IS_EMPTY_BLOCK(src1) || IS_EMPTY_BLOCK(src2))
+ return 0;
+@@ -6012,7 +6535,7 @@
+ */
+ inline
+ bm::id_t bit_operation_and_any(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ if (IS_EMPTY_BLOCK(src1) || IS_EMPTY_BLOCK(src2))
+ return 0;
+@@ -6037,7 +6560,7 @@
+ */
+ inline
+ bm::id_t bit_operation_sub_count(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ if (src1 == src2)
+ return 0;
+@@ -6074,7 +6597,7 @@
+ */
+ inline
+ bm::id_t bit_operation_sub_count_inv(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ return bit_operation_sub_count(src2, src1);
+ }
+@@ -6092,7 +6615,7 @@
+ */
+ inline
+ bm::id_t bit_operation_sub_any(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ if (IS_EMPTY_BLOCK(src1))
+ return 0;
+@@ -6127,7 +6650,7 @@
+ */
+ inline
+ bm::id_t bit_operation_or_count(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ if (IS_FULL_BLOCK(src1) || IS_FULL_BLOCK(src2))
+ return bm::gap_max_bits;
+@@ -6164,7 +6687,7 @@
+ */
+ inline
+ bm::id_t bit_operation_or_any(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ if (IS_EMPTY_BLOCK(src1))
+ {
+@@ -6197,7 +6720,7 @@
+ */
+ inline
+ bool bit_block_or(bm::word_t* BMRESTRICT dst,
+- const bm::word_t* BMRESTRICT src)
++ const bm::word_t* BMRESTRICT src) BMNOEXCEPT
+ {
+ #ifdef BMVECTOPT
+ return VECT_OR_BLOCK(dst, src);
+@@ -6235,7 +6758,7 @@
+ inline
+ bool bit_block_or_2way(bm::word_t* BMRESTRICT dst,
+ const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ #ifdef BMVECTOPT
+ return VECT_OR_BLOCK_2WAY(dst, src1, src2);
+@@ -6274,7 +6797,7 @@
+ inline
+ bm::id64_t bit_block_xor_2way(bm::word_t* BMRESTRICT dst,
+ const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ #ifdef BMVECTOPT
+ return VECT_XOR_BLOCK_2WAY(dst, src1, src2);
+@@ -6315,7 +6838,7 @@
+ inline
+ bool bit_block_or_3way(bm::word_t* BMRESTRICT dst,
+ const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ #ifdef BMVECTOPT
+ return VECT_OR_BLOCK_3WAY(dst, src1, src2);
+@@ -6361,7 +6884,7 @@
+ const bm::word_t* BMRESTRICT src1,
+ const bm::word_t* BMRESTRICT src2,
+ const bm::word_t* BMRESTRICT src3,
+- const bm::word_t* BMRESTRICT src4)
++ const bm::word_t* BMRESTRICT src4) BMNOEXCEPT
+ {
+ #ifdef BMVECTOPT
+ return VECT_OR_BLOCK_5WAY(dst, src1, src2, src3, src4);
+@@ -6407,7 +6930,7 @@
+ */
+ inline
+ bm::word_t* bit_operation_or(bm::word_t* BMRESTRICT dst,
+- const bm::word_t* BMRESTRICT src)
++ const bm::word_t* BMRESTRICT src) BMNOEXCEPT
+ {
+ BM_ASSERT(dst || src);
+
+@@ -6467,7 +6990,7 @@
+ */
+ inline
+ bm::id64_t bit_block_sub(bm::word_t* BMRESTRICT dst,
+- const bm::word_t* BMRESTRICT src)
++ const bm::word_t* BMRESTRICT src) BMNOEXCEPT
+ {
+ #ifdef BMVECTOPT
+ bm::id64_t acc = VECT_SUB_BLOCK(dst, src);
+@@ -6504,7 +7027,7 @@
+ inline
+ bm::id64_t bit_block_sub(bm::word_t* BMRESTRICT dst,
+ const bm::word_t* BMRESTRICT src,
+- bm::id64_t digest)
++ bm::id64_t digest) BMNOEXCEPT
+ {
+ BM_ASSERT(dst);
+ BM_ASSERT(src);
+@@ -6565,7 +7088,7 @@
+ bm::id64_t bit_block_sub_2way(bm::word_t* BMRESTRICT dst,
+ const bm::word_t* BMRESTRICT src1,
+ const bm::word_t* BMRESTRICT src2,
+- bm::id64_t digest)
++ bm::id64_t digest) BMNOEXCEPT
+ {
+ BM_ASSERT(dst);
+ BM_ASSERT(src1 && src2);
+@@ -6630,7 +7153,7 @@
+ */
+ inline
+ bm::word_t* bit_operation_sub(bm::word_t* BMRESTRICT dst,
+- const bm::word_t* BMRESTRICT src)
++ const bm::word_t* BMRESTRICT src) BMNOEXCEPT
+ {
+ BM_ASSERT(dst || src);
+
+@@ -6688,7 +7211,7 @@
+ */
+ inline
+ bm::id64_t bit_block_xor(bm::word_t* BMRESTRICT dst,
+- const bm::word_t* BMRESTRICT src)
++ const bm::word_t* BMRESTRICT src) BMNOEXCEPT
+ {
+ BM_ASSERT(dst);
+ BM_ASSERT(src);
+@@ -6724,7 +7247,7 @@
+ */
+ inline
+ void bit_andnot_arr_ffmask(bm::word_t* BMRESTRICT dst,
+- const bm::word_t* BMRESTRICT src)
++ const bm::word_t* BMRESTRICT src) BMNOEXCEPT
+ {
+ const bm::word_t* BMRESTRICT src_end = src + bm::set_block_size;
+ #ifdef BMVECTOPT
+@@ -6759,7 +7282,7 @@
+ */
+ inline
+ bm::word_t* bit_operation_xor(bm::word_t* BMRESTRICT dst,
+- const bm::word_t* BMRESTRICT src)
++ const bm::word_t* BMRESTRICT src) BMNOEXCEPT
+ {
+ BM_ASSERT(dst || src);
+ if (src == dst) return 0; // XOR rule
+@@ -6797,7 +7320,7 @@
+ */
+ inline
+ bm::id_t bit_operation_xor_count(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ if (src1 == src2)
+ return 0;
+@@ -6829,7 +7352,7 @@
+ */
+ inline
+ bm::id_t bit_operation_xor_any(const bm::word_t* BMRESTRICT src1,
+- const bm::word_t* BMRESTRICT src2)
++ const bm::word_t* BMRESTRICT src2) BMNOEXCEPT
+ {
+ if (src1 == src2)
+ return 0;
+@@ -6854,7 +7377,7 @@
+ @ingroup bitfunc
+ */
+ template<class T>
+-unsigned bit_count_nonzero_size(const T* blk, unsigned data_size)
++unsigned bit_count_nonzero_size(const T* blk, unsigned data_size) BMNOEXCEPT
+ {
+ BM_ASSERT(blk && data_size);
+ unsigned count = 0;
+@@ -6914,7 +7437,8 @@
+ @ingroup bitfunc
+ */
+ inline
+-unsigned bit_block_find(const bm::word_t* block, unsigned nbit, unsigned* pos)
++unsigned bit_block_find(const bm::word_t* BMRESTRICT block,
++ unsigned nbit, unsigned* BMRESTRICT pos) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(pos);
+@@ -6934,7 +7458,7 @@
+ if (w)
+ {
+ bit_pos = bm::bit_scan_forward32(w); // trailing zeros
+- *pos = unsigned(bit_pos + (nword * 8u * sizeof(bm::word_t)));
++ *pos = unsigned(bit_pos + (nword * 8u * unsigned(sizeof(bm::word_t))));
+ return 1;
+ }
+
+@@ -6944,7 +7468,7 @@
+ if (w)
+ {
+ bit_pos = bm::bit_scan_forward32(w); // trailing zeros
+- *pos = unsigned(bit_pos + (i * 8u * sizeof(bm::word_t)));
++ *pos = unsigned(bit_pos + (i * 8u * unsigned(sizeof(bm::word_t))));
+ return w;
+ }
+ } // for i
+@@ -6952,6 +7476,8 @@
+ }
+
+
++
++
+ /*!
+ \brief BIT block find the last set bit (backward search)
+
+@@ -6962,7 +7488,8 @@
+ @ingroup bitfunc
+ */
+ inline
+-unsigned bit_find_last(const bm::word_t* block, unsigned* last)
++unsigned bit_find_last(const bm::word_t* BMRESTRICT block,
++ unsigned* BMRESTRICT last) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(last);
+@@ -6975,7 +7502,7 @@
+ if (w)
+ {
+ unsigned idx = bm::bit_scan_reverse(w);
+- *last = unsigned(idx + (i * 8u * sizeof(bm::word_t)));
++ *last = unsigned(idx + (i * 8u * unsigned(sizeof(bm::word_t))));
+ return w;
+ }
+ if (i == 0)
+@@ -6995,7 +7522,8 @@
+ @internal
+ */
+ inline
+-bool bit_find_first(const bm::word_t* block, unsigned* pos)
++bool bit_find_first(const bm::word_t* BMRESTRICT block,
++ unsigned* BMRESTRICT pos) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(pos);
+@@ -7009,7 +7537,7 @@
+ if (w)
+ {
+ unsigned idx = bm::bit_scan_forward32(w); // trailing zeros
+- *pos = unsigned(idx + (i * 8u * sizeof(bm::word_t)));
++ *pos = unsigned(idx + (i * 8u * unsigned(sizeof(bm::word_t))));
+ return w;
+ }
+ } // for i
+@@ -7029,9 +7557,9 @@
+ @ingroup bitfunc
+ */
+ inline
+-unsigned bit_find_first(const bm::word_t* block,
+- unsigned* first,
+- bm::id64_t digest)
++unsigned bit_find_first(const bm::word_t* BMRESTRICT block,
++ unsigned* BMRESTRICT first,
++ bm::id64_t digest) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(first);
+@@ -7047,7 +7575,7 @@
+ if (w)
+ {
+ unsigned idx = bit_scan_forward32(w); // trailing zeros
+- *first = unsigned(idx + (i * 8u * sizeof(bm::word_t)));
++ *first = unsigned(idx + (i * 8u * unsigned(sizeof(bm::word_t))));
+ return w;
+ }
+ } // for i
+@@ -7067,9 +7595,9 @@
+ @ingroup bitfunc
+ */
+ inline
+-bool bit_find_first_if_1(const bm::word_t* block,
+- unsigned* first,
+- bm::id64_t digest)
++bool bit_find_first_if_1(const bm::word_t* BMRESTRICT block,
++ unsigned* BMRESTRICT first,
++ bm::id64_t digest) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(first);
+@@ -7127,7 +7655,7 @@
+ SIZE_TYPE bit_find_rank(const bm::word_t* const block,
+ SIZE_TYPE rank,
+ unsigned nbit_from,
+- unsigned& nbit_pos)
++ unsigned& nbit_pos) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ BM_ASSERT(rank);
+@@ -7206,7 +7734,7 @@
+ SIZE_TYPE block_find_rank(const bm::word_t* const block,
+ SIZE_TYPE rank,
+ unsigned nbit_from,
+- unsigned& nbit_pos)
++ unsigned& nbit_pos) BMNOEXCEPT
+ {
+ if (BM_IS_GAP(block))
+ {
+@@ -7230,7 +7758,7 @@
+ bm::set_representation best_representation(unsigned bit_count,
+ unsigned total_possible_bitcount,
+ unsigned gap_count,
+- unsigned block_size)
++ unsigned block_size) BMNOEXCEPT
+ {
+ unsigned arr_size = unsigned(sizeof(bm::gap_word_t) * bit_count + sizeof(bm::gap_word_t));
+ unsigned gap_size = unsigned(sizeof(bm::gap_word_t) * gap_count + sizeof(bm::gap_word_t));
+@@ -7268,15 +7796,16 @@
+ const unsigned* BMRESTRICT src,
+ bm::id_t bits,
+ unsigned dest_len,
+- unsigned mask = 0)
++ unsigned mask = 0) BMNOEXCEPT
+ {
+ T* BMRESTRICT pcurr = dest;
+- for (unsigned bit_idx=0; bit_idx < bits; ++src,bit_idx += unsigned(sizeof(*src) * 8))
++ for (unsigned bit_idx=0; bit_idx < bits;
++ ++src,bit_idx += unsigned(sizeof(*src) * 8))
+ {
+ unsigned val = *src ^ mask; // invert value by XOR 0xFF..
+ if (val == 0)
+ continue;
+- if (pcurr + sizeof(val)*8 >= dest + dest_len) // insufficient space
++ if (pcurr + unsigned(sizeof(val)*8) >= dest + dest_len) // insufficient space
+ return 0;
+ // popscan loop to decode bits in a word
+ while (val)
+@@ -7299,7 +7828,7 @@
+ @internal
+ */
+ inline
+-bool check_block_zero(const bm::word_t* blk, bool deep_scan)
++bool check_block_zero(const bm::word_t* blk, bool deep_scan) BMNOEXCEPT
+ {
+ if (!blk) return true;
+ if (IS_FULL_BLOCK(blk)) return false;
+@@ -7323,7 +7852,7 @@
+ @internal
+ */
+ inline
+-bool check_block_one(const bm::word_t* blk, bool deep_scan)
++bool check_block_one(const bm::word_t* blk, bool deep_scan) BMNOEXCEPT
+ {
+ if (blk == 0) return false;
+
+@@ -7348,7 +7877,7 @@
+ template<typename T>
+ unsigned gap_overhead(const T* length,
+ const T* length_end,
+- const T* glevel_len)
++ const T* glevel_len) BMNOEXCEPT
+ {
+ BM_ASSERT(length && length_end && glevel_len);
+
+@@ -7375,7 +7904,7 @@
+ template<typename T>
+ bool improve_gap_levels(const T* length,
+ const T* length_end,
+- T* glevel_len)
++ T* glevel_len) BMNOEXCEPT
+ {
+ BM_ASSERT(length && length_end && glevel_len);
+
+@@ -7473,7 +8002,7 @@
+ inline
+ bool block_find_first_diff(const bm::word_t* BMRESTRICT blk,
+ const bm::word_t* BMRESTRICT arg_blk,
+- unsigned* BMRESTRICT pos)
++ unsigned* BMRESTRICT pos) BMNOEXCEPT
+ {
+ // If one block is zero we check if the other one has at least
+ // one bit ON
+@@ -7568,7 +8097,7 @@
+ bitblock_get_adapter(const bm::word_t* bit_block) : b_(bit_block) {}
+
+ BMFORCEINLINE
+- bm::word_t get_32() { return *b_++; }
++ bm::word_t get_32() BMNOEXCEPT { return *b_++; }
+ private:
+ const bm::word_t* b_;
+ };
+@@ -7597,9 +8126,9 @@
+ public:
+ bitblock_sum_adapter() : sum_(0) {}
+ BMFORCEINLINE
+- void push_back(bm::word_t w) { this->sum_+= w; }
++ void push_back(bm::word_t w) BMNOEXCEPT { this->sum_+= w; }
+ /// Get accumulated sum
+- bm::word_t sum() const { return this->sum_; }
++ bm::word_t sum() const BMNOEXCEPT { return this->sum_; }
+ private:
+ bm::word_t sum_;
+ };
+@@ -7619,7 +8148,7 @@
+ cnt_(0)
+ {}
+
+- bm::word_t get_32()
++ bm::word_t get_32() BMNOEXCEPT
+ {
+ if (cnt_ < from_ || cnt_ > to_)
+ {
+@@ -7645,7 +8174,7 @@
+ void bit_recomb(It1& it1, It2& it2,
+ BinaryOp& op,
+ Encoder& enc,
+- unsigned block_size = bm::set_block_size)
++ unsigned block_size = bm::set_block_size) BMNOEXCEPT
+ {
+ for (unsigned i = 0; i < block_size; ++i)
+ {
+@@ -7659,37 +8188,37 @@
+ /// Bit AND functor
+ template<typename W> struct bit_AND
+ {
+- W operator()(W w1, W w2) { return w1 & w2; }
++ W operator()(W w1, W w2) BMNOEXCEPT { return w1 & w2; }
+ };
+
+ /// Bit OR functor
+ template<typename W> struct bit_OR
+ {
+- W operator()(W w1, W w2) { return w1 | w2; }
++ W operator()(W w1, W w2) BMNOEXCEPT { return w1 | w2; }
+ };
+
+ /// Bit SUB functor
+ template<typename W> struct bit_SUB
+ {
+- W operator()(W w1, W w2) { return w1 & ~w2; }
++ W operator()(W w1, W w2) BMNOEXCEPT { return w1 & ~w2; }
+ };
+
+ /// Bit XOR functor
+ template<typename W> struct bit_XOR
+ {
+- W operator()(W w1, W w2) { return w1 ^ w2; }
++ W operator()(W w1, W w2) BMNOEXCEPT { return w1 ^ w2; }
+ };
+
+ /// Bit ASSIGN functor
+ template<typename W> struct bit_ASSIGN
+ {
+- W operator()(W, W w2) { return w2; }
++ W operator()(W, W w2) BMNOEXCEPT { return w2; }
+ };
+
+ /// Bit COUNT functor
+ template<typename W> struct bit_COUNT
+ {
+- W operator()(W w1, W w2)
++ W operator()(W w1, W w2) BMNOEXCEPT
+ {
+ w1 = 0;
+ BM_INCWORD_BITCOUNT(w1, w2);
+@@ -7700,7 +8229,7 @@
+ /// Bit COUNT AND functor
+ template<typename W> struct bit_COUNT_AND
+ {
+- W operator()(W w1, W w2)
++ W operator()(W w1, W w2) BMNOEXCEPT
+ {
+ W r = 0;
+ BM_INCWORD_BITCOUNT(r, w1 & w2);
+@@ -7711,7 +8240,7 @@
+ /// Bit COUNT XOR functor
+ template<typename W> struct bit_COUNT_XOR
+ {
+- W operator()(W w1, W w2)
++ W operator()(W w1, W w2) BMNOEXCEPT
+ {
+ W r = 0;
+ BM_INCWORD_BITCOUNT(r, w1 ^ w2);
+@@ -7722,7 +8251,7 @@
+ /// Bit COUNT OR functor
+ template<typename W> struct bit_COUNT_OR
+ {
+- W operator()(W w1, W w2)
++ W operator()(W w1, W w2) BMNOEXCEPT
+ {
+ W r = 0;
+ BM_INCWORD_BITCOUNT(r, w1 | w2);
+@@ -7734,7 +8263,7 @@
+ /// Bit COUNT SUB AB functor
+ template<typename W> struct bit_COUNT_SUB_AB
+ {
+- W operator()(W w1, W w2)
++ W operator()(W w1, W w2) BMNOEXCEPT
+ {
+ W r = 0;
+ BM_INCWORD_BITCOUNT(r, w1 & (~w2));
+@@ -7745,7 +8274,7 @@
+ /// Bit SUB BA functor
+ template<typename W> struct bit_COUNT_SUB_BA
+ {
+- W operator()(W w1, W w2)
++ W operator()(W w1, W w2) BMNOEXCEPT
+ {
+ W r = 0;
+ BM_INCWORD_BITCOUNT(r, w2 & (~w1));
+@@ -7756,7 +8285,7 @@
+ /// Bit COUNT A functor
+ template<typename W> struct bit_COUNT_A
+ {
+- W operator()(W w1, W )
++ W operator()(W w1, W ) BMNOEXCEPT
+ {
+ W r = 0;
+ BM_INCWORD_BITCOUNT(r, w1);
+@@ -7767,7 +8296,7 @@
+ /// Bit COUNT B functor
+ template<typename W> struct bit_COUNT_B
+ {
+- W operator()(W, W w2)
++ W operator()(W, W w2) BMNOEXCEPT
+ {
+ W r = 0;
+ BM_INCWORD_BITCOUNT(r, w2);
+@@ -7858,8 +8387,11 @@
+ 0, // set_COUNT_B
+ };
+
+-
+-const unsigned short set_bitscan_wave_size = 2;
++/**
++ Size of bit decode wave in words
++ @internal
++ */
++const unsigned short set_bitscan_wave_size = 4;
+ /*!
+ \brief Unpacks word wave (Nx 32-bit words)
+ \param w_ptr - pointer on wave start
+@@ -7870,7 +8402,9 @@
+ @internal
+ */
+ inline
+-unsigned short bitscan_wave(const bm::word_t* w_ptr, unsigned char* bits)
++unsigned short
++bitscan_wave(const bm::word_t* BMRESTRICT w_ptr,
++ unsigned char* BMRESTRICT bits) BMNOEXCEPT
+ {
+ bm::word_t w0, w1;
+ unsigned short cnt0;
+@@ -7882,12 +8416,20 @@
+ // combine into 64-bit word and scan (when HW popcnt64 is available)
+ bm::id64_t w = (bm::id64_t(w1) << 32) | w0;
+ cnt0 = (unsigned short) bm::bitscan_popcnt64(w, bits);
++
++ w0 = w_ptr[2];
++ w1 = w_ptr[3];
++ w = (bm::id64_t(w1) << 32) | w0;
++ cnt0 += bm::bitscan_popcnt64(w, bits + cnt0, 64);
+ #else
+- unsigned short cnt1;
+ // decode wave as two 32-bit bitscan decodes
+- cnt0 = w0 ? bm::bitscan_popcnt(w0, bits) : 0;
+- cnt1 = w1 ? bm::bitscan_popcnt(w1, bits + cnt0, 32) : 0;
+- cnt0 = (unsigned short)(cnt0 + cnt1);
++ cnt0 = bm::bitscan_popcnt(w0, bits);
++ cnt0 += bm::bitscan_popcnt(w1, bits + cnt0, 32);
++
++ w0 = w_ptr[2];
++ w1 = w_ptr[3];
++ cnt0 += bm::bitscan_popcnt(w0, bits + cnt0, 64);
++ cnt0 += bm::bitscan_popcnt(w1, bits + cnt0, 64+32);
+ #endif
+ return cnt0;
+ }
+@@ -7899,9 +8441,11 @@
+ @internal
+ */
+ inline
+-void bit_block_gather_scatter(unsigned* arr, const bm::word_t* blk,
+- const unsigned* idx, unsigned size, unsigned start,
+- unsigned bit_idx)
++void bit_block_gather_scatter(unsigned* BMRESTRICT arr,
++ const bm::word_t* BMRESTRICT blk,
++ const unsigned* BMRESTRICT idx,
++ unsigned size, unsigned start,
++ unsigned bit_idx) BMNOEXCEPT
+ {
+ typedef unsigned TRGW;
+ typedef unsigned IDX;
+@@ -7930,8 +8474,10 @@
+ @internal
+ */
+ template<typename TRGW, typename IDX, typename SZ>
+-void bit_block_gather_scatter(TRGW* arr, const bm::word_t* blk,
+- const IDX* idx, SZ size, SZ start, unsigned bit_idx)
++void bit_block_gather_scatter(TRGW* BMRESTRICT arr,
++ const bm::word_t* BMRESTRICT blk,
++ const IDX* BMRESTRICT idx,
++ SZ size, SZ start, unsigned bit_idx) BMNOEXCEPT
+ {
+ // TODO: SIMD for 64-bit index sizes and 64-bit target value size
+ //
+@@ -7943,15 +8489,17 @@
+ {
+ const SZ base = start + k;
+ const unsigned nbitA = unsigned(idx[base] & bm::set_block_mask);
+- arr[base] |= (TRGW(bool(blk[nbitA >> bm::set_word_shift] & (mask1 << (nbitA & bm::set_word_mask)))) << bit_idx);
++ arr[base] |= (TRGW(bool(blk[nbitA >> bm::set_word_shift] &
++ (mask1 << (nbitA & bm::set_word_mask)))) << bit_idx);
+ const unsigned nbitB = unsigned(idx[base + 1] & bm::set_block_mask);
+- arr[base+1] |= (TRGW(bool(blk[nbitB >> bm::set_word_shift] & (mask1 << (nbitB & bm::set_word_mask)))) << bit_idx);
++ arr[base+1] |= (TRGW(bool(blk[nbitB >> bm::set_word_shift] &
++ (mask1 << (nbitB & bm::set_word_mask)))) << bit_idx);
+ } // for k
+-
+ for (; k < len; ++k)
+ {
+ unsigned nbit = unsigned(idx[start + k] & bm::set_block_mask);
+- arr[start + k] |= (TRGW(bool(blk[nbit >> bm::set_word_shift] & (mask1 << (nbit & bm::set_word_mask)))) << bit_idx);
++ arr[start + k] |= (TRGW(bool(blk[nbit >> bm::set_word_shift] &
++ (mask1 << (nbit & bm::set_word_mask)))) << bit_idx);
+ } // for k
+ }
+
+@@ -7968,7 +8516,8 @@
+ @internal
+ */
+ inline
+-bm::id64_t idx_arr_block_lookup_u64(const bm::id64_t* idx, bm::id64_t size, bm::id64_t nb, bm::id64_t start)
++bm::id64_t idx_arr_block_lookup_u64(const bm::id64_t* idx,
++ bm::id64_t size, bm::id64_t nb, bm::id64_t start) BMNOEXCEPT
+ {
+ BM_ASSERT(idx);
+ BM_ASSERT(start < size);
+@@ -7993,7 +8542,8 @@
+ @internal
+ */
+ inline
+-unsigned idx_arr_block_lookup_u32(const unsigned* idx, unsigned size, unsigned nb, unsigned start)
++unsigned idx_arr_block_lookup_u32(const unsigned* idx,
++ unsigned size, unsigned nb, unsigned start) BMNOEXCEPT
+ {
+ BM_ASSERT(idx);
+ BM_ASSERT(start < size);
+@@ -8027,7 +8577,7 @@
+ inline
+ void set_block_bits_u64(bm::word_t* BMRESTRICT block,
+ const bm::id64_t* BMRESTRICT idx,
+- bm::id64_t start, bm::id64_t stop)
++ bm::id64_t start, bm::id64_t stop) BMNOEXCEPT
+ {
+ // TODO: SIMD for 64-bit mode
+ for (bm::id64_t i = start; i < stop; ++i)
+@@ -8036,8 +8586,7 @@
+ unsigned nbit = unsigned(n & bm::set_block_mask);
+ unsigned nword = nbit >> bm::set_word_shift;
+ nbit &= bm::set_word_mask;
+- bm::word_t mask = (1u << nbit);
+- block[nword] |= mask;
++ block[nword] |= (1u << nbit);
+ } // for i
+ }
+
+@@ -8058,7 +8607,7 @@
+ inline
+ void set_block_bits_u32(bm::word_t* BMRESTRICT block,
+ const unsigned* BMRESTRICT idx,
+- unsigned start, unsigned stop )
++ unsigned start, unsigned stop ) BMNOEXCEPT
+ {
+ #if defined(VECT_SET_BLOCK_BITS)
+ VECT_SET_BLOCK_BITS(block, idx, start, stop);
+@@ -8069,8 +8618,7 @@
+ unsigned nbit = unsigned(n & bm::set_block_mask);
+ unsigned nword = nbit >> bm::set_word_shift;
+ nbit &= bm::set_word_mask;
+- bm::word_t mask = (1u << nbit);
+- block[nword] |= mask;
++ block[nword] |= (1u << nbit);
+ } // for i
+ #endif
+ }
+@@ -8084,7 +8632,8 @@
+ @internal
+ */
+ inline
+-bool block_ptr_array_range(bm::word_t** arr, unsigned& left, unsigned& right)
++bool block_ptr_array_range(bm::word_t** arr,
++ unsigned& left, unsigned& right) BMNOEXCEPT
+ {
+ BM_ASSERT(arr);
+
+@@ -8119,7 +8668,7 @@
+ */
+ inline
+ unsigned lower_bound_linear_u32(const unsigned* arr, unsigned target,
+- unsigned from, unsigned to)
++ unsigned from, unsigned to) BMNOEXCEPT
+ {
+ BM_ASSERT(arr);
+ BM_ASSERT(from <= to);
+@@ -8141,8 +8690,9 @@
+ @internal
+ */
+ inline
+-unsigned lower_bound_linear_u64(const unsigned long long* arr, unsigned long long target,
+- unsigned from, unsigned to)
++unsigned lower_bound_linear_u64(const unsigned long long* arr,
++ unsigned long long target,
++ unsigned from, unsigned to) BMNOEXCEPT
+ {
+ BM_ASSERT(arr);
+ BM_ASSERT(from <= to);
+@@ -8166,7 +8716,7 @@
+ */
+ inline
+ unsigned lower_bound_u32(const unsigned* arr, unsigned target,
+- unsigned from, unsigned to)
++ unsigned from, unsigned to) BMNOEXCEPT
+ {
+ BM_ASSERT(arr);
+ BM_ASSERT(from <= to);
+@@ -8200,8 +8750,9 @@
+ @internal
+ */
+ inline
+-unsigned lower_bound_u64(const unsigned long long* arr, unsigned long long target,
+- unsigned from, unsigned to)
++unsigned lower_bound_u64(const unsigned long long* arr,
++ unsigned long long target,
++ unsigned from, unsigned to) BMNOEXCEPT
+ {
+ BM_ASSERT(arr);
+ BM_ASSERT(from <= to);
+@@ -8238,7 +8789,8 @@
+ */
+ #ifdef BM64ADDR
+ inline
+-bm::id64_t block_to_global_index(unsigned i, unsigned j, unsigned block_idx)
++bm::id64_t block_to_global_index(unsigned i, unsigned j,
++ unsigned block_idx) BMNOEXCEPT
+ {
+ bm::id64_t base_idx = bm::id64_t(i) * bm::set_sub_array_size * bm::gap_max_bits;
+ base_idx += j * bm::gap_max_bits;
+@@ -8246,7 +8798,8 @@
+ }
+ #else
+ inline
+-bm::id_t block_to_global_index(unsigned i, unsigned j, unsigned block_idx)
++bm::id_t block_to_global_index(unsigned i, unsigned j,
++ unsigned block_idx) BMNOEXCEPT
+ {
+ unsigned base_idx = i * bm::set_sub_array_size * bm::gap_max_bits;
+ base_idx += j * bm::gap_max_bits;
+@@ -8274,7 +8827,7 @@
+ @internal
+ */
+ inline
+-bm::id64_t ptrp_test(ptr_payload_t ptr, bm::gap_word_t v)
++bm::id64_t ptrp_test(ptr_payload_t ptr, bm::gap_word_t v) BMNOEXCEPT
+ {
+ if (v == 0)
+ {
+Index: c++/include/util/bitset/bmdbg.h
+===================================================================
+--- a/c++/include/util/bitset/bmdbg.h (revision 90103)
++++ b/c++/include/util/bitset/bmdbg.h (revision 90104)
+@@ -510,15 +510,15 @@
+ }
+
+ template<class BV>
+-unsigned compute_serialization_size(const BV& bv)
++size_t compute_serialization_size(const BV& bv)
+ {
+ BM_DECLARE_TEMP_BLOCK(tb)
+ unsigned char* buf = 0;
+- unsigned blob_size = 0;
++ typename BV::size_type blob_size = 0;
+ try
+ {
+ bm::serializer<BV> bvs(typename BV::allocator_type(), tb);
+- bvs.set_compression_level(4);
++ //bvs.set_compression_level(4);
+
+ typename BV::statistics st;
+ bv.calc_stat(&st);
+@@ -677,12 +677,12 @@
+ const typename SV::bvector_type* bv1 = sim_vec[k].get_first();
+ const typename SV::bvector_type* bv2 = sim_vec[k].get_second();
+
+- unsigned bv_size2 = compute_serialization_size(*bv2);
++ auto bv_size2 = compute_serialization_size(*bv2);
+
+ typename SV::bvector_type bvx(*bv2);
+ bvx ^= *bv1;
+
+- unsigned bv_size_x = compute_serialization_size(bvx);
++ auto bv_size_x = compute_serialization_size(bvx);
+ if (bv_size_x < bv_size2) // true savings
+ {
+ size_t diff = bv_size2 - bv_size_x;
+@@ -904,10 +904,15 @@
+ BM_ASSERT(!fname.empty());
+
+ bm::sparse_vector_serial_layout<SV> sv_lay;
+-
++
++ bm::sparse_vector_serializer<SV> sv_serializer;
++ sv_serializer.set_xor_ref(true);
++
++ sv_serializer.serialize(sv, sv_lay);
++/*
+ BM_DECLARE_TEMP_BLOCK(tb)
+ bm::sparse_vector_serialize(sv, sv_lay, tb);
+-
++*/
+ std::ofstream fout(fname.c_str(), std::ios::binary);
+ if (!fout.good())
+ {
+@@ -956,7 +961,7 @@
+ }
+
+
+-// comapre-check if sparse vector is excatly coresponds to vector
++// compare-check if sparse vector is excatly coresponds to vector
+ //
+ // returns 0 - if equal
+ // 1 - no size match
+@@ -979,6 +984,20 @@
+ }
+
+
++template<class SV, class BV>
++void convert_bv2sv(SV& sv, const BV& bv)
++{
++ typename SV::back_insert_iterator bit = sv.get_back_inserter();
++ typename BV::enumerator en = bv.first();
++ for (; en.valid(); ++en)
++ {
++ auto v = en.value();
++ bit = v;
++ }
++ bit.flush();
++}
++
++
+ } // namespace
+
+ #include "bmundef.h"
+Index: c++/include/util/bitset/bmundef.h
+===================================================================
+--- a/c++/include/util/bitset/bmundef.h (revision 90103)
++++ b/c++/include/util/bitset/bmundef.h (revision 90104)
+@@ -72,7 +72,10 @@
+
+ #undef VECT_ARR_BLOCK_LOOKUP
+ #undef VECT_SET_BLOCK_BITS
++
+ #undef VECT_BLOCK_CHANGE
++#undef VECT_BLOCK_CHANGE_BC
++
+ #undef VECT_BIT_TO_GAP
+
+ #undef VECT_AND_DIGEST
+@@ -80,7 +83,12 @@
+ #undef VECT_AND_DIGEST_5WAY
+ #undef VECT_BLOCK_SET_DIGEST
+
++#undef VECT_BLOCK_XOR_CHANGE
++#undef VECT_BIT_BLOCK_XOR
++
++#undef VECT_BIT_FIND_FIRST
+ #undef VECT_BIT_FIND_DIFF
++#undef VECT_GAP_BFIND
+
+ #undef BMI1_SELECT64
+ #undef BMI2_SELECT64
+Index: c++/include/util/bitset/bmalgo_impl.h
+===================================================================
+--- a/c++/include/util/bitset/bmalgo_impl.h (revision 90103)
++++ b/c++/include/util/bitset/bmalgo_impl.h (revision 90104)
+@@ -70,7 +70,7 @@
+ \ingroup distance
+ */
+ inline
+-distance_metric operation2metric(set_operation op)
++distance_metric operation2metric(set_operation op) BMNOEXCEPT
+ {
+ BM_ASSERT(is_const_set_operation(op));
+ if (op == set_COUNT) op = set_COUNT_B;
+@@ -95,11 +95,11 @@
+ distance_metric metric;
+ size_type result;
+
+- distance_metric_descriptor(distance_metric m)
++ distance_metric_descriptor(distance_metric m) BMNOEXCEPT
+ : metric(m),
+ result(0)
+ {}
+- distance_metric_descriptor()
++ distance_metric_descriptor() BMNOEXCEPT
+ : metric(bm::COUNT_XOR),
+ result(0)
+ {}
+@@ -107,7 +107,7 @@
+ /*!
+ \brief Sets metric result to 0
+ */
+- void reset()
++ void reset() BMNOEXCEPT
+ {
+ result = 0;
+ }
+@@ -125,7 +125,7 @@
+ void combine_count_operation_with_block(const bm::word_t* blk,
+ const bm::word_t* arg_blk,
+ distance_metric_descriptor* dmit,
+- distance_metric_descriptor* dmit_end)
++ distance_metric_descriptor* dmit_end) BMNOEXCEPT
+
+ {
+ gap_word_t* g1 = BMGAP_PTR(blk);
+@@ -340,7 +340,7 @@
+ */
+ inline
+ unsigned combine_count_and_operation_with_block(const bm::word_t* blk,
+- const bm::word_t* arg_blk)
++ const bm::word_t* arg_blk) BMNOEXCEPT
+ {
+ unsigned gap = BM_IS_GAP(blk);
+ unsigned arg_gap = BM_IS_GAP(arg_blk);
+@@ -381,7 +381,7 @@
+ const bm::word_t* arg_blk,
+ unsigned arg_gap,
+ distance_metric_descriptor* dmit,
+- distance_metric_descriptor* dmit_end)
++ distance_metric_descriptor* dmit_end) BMNOEXCEPT
+
+ {
+ gap_word_t* res=0;
+@@ -628,7 +628,7 @@
+ unsigned
+ combine_count_operation_with_block(const bm::word_t* blk,
+ const bm::word_t* arg_blk,
+- distance_metric metric)
++ distance_metric metric) BMNOEXCEPT
+ {
+ distance_metric_descriptor dmd(metric);
+ combine_count_operation_with_block(blk, //gap,
+@@ -649,7 +649,7 @@
+ unsigned gap,
+ const bm::word_t* arg_blk,
+ unsigned arg_gap,
+- distance_metric metric)
++ distance_metric metric) BMNOEXCEPT
+ {
+ distance_metric_descriptor dmd(metric);
+ combine_any_operation_with_block(blk, gap,
+@@ -668,7 +668,7 @@
+ inline
+ void distance_stage(const distance_metric_descriptor* dmit,
+ const distance_metric_descriptor* dmit_end,
+- bool* is_all_and)
++ bool* is_all_and) BMNOEXCEPT
+ {
+ for (const distance_metric_descriptor* it = dmit; it < dmit_end; ++it)
+ {
+@@ -702,7 +702,7 @@
+ void distance_operation(const BV& bv1,
+ const BV& bv2,
+ distance_metric_descriptor* dmit,
+- distance_metric_descriptor* dmit_end)
++ distance_metric_descriptor* dmit_end) BMNOEXCEPT
+ {
+ const typename BV::blocks_manager_type& bman1 = bv1.get_blocks_manager();
+ const typename BV::blocks_manager_type& bman2 = bv2.get_blocks_manager();
+@@ -787,7 +787,7 @@
+ */
+ template<class BV>
+ typename BV::size_type distance_and_operation(const BV& bv1,
+- const BV& bv2)
++ const BV& bv2) BMNOEXCEPT
+ {
+ const typename BV::blocks_manager_type& bman1 = bv1.get_blocks_manager();
+ const typename BV::blocks_manager_type& bman2 = bv2.get_blocks_manager();
+@@ -858,7 +858,7 @@
+ void distance_operation_any(const BV& bv1,
+ const BV& bv2,
+ distance_metric_descriptor* dmit,
+- distance_metric_descriptor* dmit_end)
++ distance_metric_descriptor* dmit_end) BMNOEXCEPT
+ {
+ const typename BV::blocks_manager_type& bman1 = bv1.get_blocks_manager();
+ const typename BV::blocks_manager_type& bman2 = bv2.get_blocks_manager();
+@@ -980,7 +980,8 @@
+ \internal
+ */
+ template<typename It, typename SIZE_TYPE>
+-It block_range_scan(It first, It last, SIZE_TYPE nblock, SIZE_TYPE* max_id)
++It block_range_scan(It first, It last,
++ SIZE_TYPE nblock, SIZE_TYPE* max_id) BMNOEXCEPT
+ {
+ SIZE_TYPE m = *max_id;
+ It right;
+@@ -1333,7 +1334,11 @@
+ typename BV::blocks_manager_type::block_idx_type st = 0;
+ bm::for_each_block(blk_root, bman.top_block_size(), func, st);
+
+- return func.count();
++ typename BV::size_type intervals = func.count();
++ bool last_bit_set = bv.test(bm::id_max-1);
++
++ intervals -= last_bit_set; // correct last (out of range) interval
++ return intervals;
+ }
+
+ /*!
+@@ -1514,7 +1519,7 @@
+
+
+ /*!
+- \brief for-each visitor, calls a special visitor functor for each 1 bit group
++ \brief for-each visitor, calls a visitor functor for each 1 bit group
+
+ \param block - bit block buffer pointer
+ \param offset - global block offset (number of bits)
+@@ -1527,6 +1532,7 @@
+ void for_each_bit_blk(const bm::word_t* block, SIZE_TYPE offset,
+ Func& bit_functor)
+ {
++ BM_ASSERT(block);
+ if (IS_FULL_BLOCK(block))
+ {
+ bit_functor.add_range(offset, bm::gap_max_bits);
+@@ -1547,7 +1553,111 @@
+ } while (block < block_end);
+ }
+
++/*!
++ \brief for-each range visitor, calls a visitor functor for each 1 bit group
+
++ \param block - bit block buffer pointer
++ \param offset - global block offset (number of bits)
++ \param left - bit addredd in block from [from..to]
++ \param right - bit addredd in block to [from..to]
++ \param bit_functor - functor must support .add_bits(offset, bits_ptr, size)
++
++ @ingroup bitfunc
++ @internal
++*/
++template<typename Func, typename SIZE_TYPE>
++void for_each_bit_blk(const bm::word_t* block, SIZE_TYPE offset,
++ unsigned left, unsigned right,
++ Func& bit_functor)
++{
++ BM_ASSERT(block);
++ BM_ASSERT(left <= right);
++ BM_ASSERT(right < bm::bits_in_block);
++
++ if (IS_FULL_BLOCK(block))
++ {
++ unsigned sz = right - left + 1;
++ bit_functor.add_range(offset + left, sz);
++ return;
++ }
++ unsigned char bits[bm::set_bitscan_wave_size*32];
++
++ unsigned cnt, nword, nbit, bitcount, temp;
++ nbit = left & bm::set_word_mask;
++ const bm::word_t* word =
++ block + (nword = unsigned(left >> bm::set_word_shift));
++ if (left == right) // special case (only 1 bit to check)
++ {
++ if ((*word >> nbit) & 1u)
++ {
++ bits[0] = (unsigned char)nbit;
++ bit_functor.add_bits(offset + (nword * 32), bits, 1);
++ }
++ return;
++ }
++
++ bitcount = right - left + 1u;
++ if (nbit) // starting position is not aligned
++ {
++ unsigned right_margin = nbit + right - left;
++ if (right_margin < 32)
++ {
++ unsigned mask =
++ block_set_table<true>::_right[nbit] &
++ block_set_table<true>::_left[right_margin];
++ temp = (*word & mask);
++ cnt = bm::bitscan_popcnt(temp, bits);
++ if (cnt)
++ bit_functor.add_bits(offset + (nword * 32), bits, cnt);
++
++ return;
++ }
++ temp = *word & block_set_table<true>::_right[nbit];
++ cnt = bm::bitscan_popcnt(temp, bits);
++ if (cnt)
++ bit_functor.add_bits(offset + (nword * 32), bits, cnt);
++ bitcount -= 32 - nbit;
++ ++word; ++nword;
++ }
++ else
++ {
++ bitcount = right - left + 1u;
++ }
++ BM_ASSERT(bm::set_bitscan_wave_size == 4);
++ // now when we are word aligned, we can scan the bit-stream
++ // loop unrolled to evaluate 4 words at a time
++ for ( ;bitcount >= 128;
++ bitcount-=128, word+=bm::set_bitscan_wave_size,
++ nword += bm::set_bitscan_wave_size)
++ {
++ cnt = bm::bitscan_wave(word, bits);
++ if (cnt)
++ bit_functor.add_bits(offset + (nword * 32), bits, cnt);
++ } // for
++
++ for ( ;bitcount >= 32; bitcount-=32, ++word)
++ {
++ temp = *word;
++ cnt = bm::bitscan_popcnt(temp, bits);
++ if (cnt)
++ bit_functor.add_bits(offset + (nword * 32), bits, cnt);
++ ++nword;
++ } // for
++
++ BM_ASSERT(bitcount < 32);
++
++ if (bitcount) // we have a tail to count
++ {
++ temp = *word & block_set_table<true>::_left[bitcount-1];
++ cnt = bm::bitscan_popcnt(temp, bits);
++ if (cnt)
++ bit_functor.add_bits(offset + (nword * 32), bits, cnt);
++ }
++
++}
++
++
++
+ /*!
+ \brief for-each visitor, calls a special visitor functor for each 1 bit range
+
+@@ -1577,7 +1687,224 @@
+ }
+ }
+
++/*!
++ \brief for-each visitor, calls a special visitor functor for each 1 bit range
+
++ \param buf - bit block buffer pointer
++ \param offset - global block offset (number of bits)
++ \param left - interval start [left..right]
++ \param right - intreval end [left..right]
++ \param bit_functor - functor must support .add_range(offset, bits_ptr, size)
++
++ @ingroup gapfunc
++ @internal
++*/
++template<typename T, typename Func, typename SIZE_TYPE>
++void for_each_gap_blk_range(const T* BMRESTRICT buf,
++ SIZE_TYPE offset,
++ unsigned left, unsigned right,
++ Func& bit_functor)
++{
++ BM_ASSERT(left <= right);
++ BM_ASSERT(right < bm::bits_in_block);
++
++ unsigned is_set;
++ unsigned start_pos = bm::gap_bfind(buf, left, &is_set);
++ const T* BMRESTRICT pcurr = buf + start_pos;
++
++ if (is_set)
++ {
++ if (right <= *pcurr)
++ {
++ bit_functor.add_range(offset + left, (right + 1)-left);
++ return;
++ }
++ bit_functor.add_range(offset + left, (*pcurr + 1)-left);
++ ++pcurr;
++ }
++
++ const T* BMRESTRICT pend = buf + (*buf >> 3);
++ for (++pcurr; pcurr <= pend; pcurr += 2)
++ {
++ T prev = *(pcurr-1);
++ if (right <= *pcurr)
++ {
++ int sz = int(right) - int(prev);
++ if (sz > 0)
++ bit_functor.add_range(offset + prev + 1, unsigned(sz));
++ return;
++ }
++ bit_functor.add_range(offset + prev + 1, *pcurr - prev);
++ } // for
++}
++
++
++
++/*! For each non-zero block in [from, to] executes supplied functor
++ \internal
++*/
++template<typename T, typename N, typename F>
++void for_each_bit_block_range(T*** root,
++ N top_size, N nb_from, N nb_to, F& f)
++{
++ BM_ASSERT(top_size);
++ if (nb_from > nb_to)
++ return;
++ unsigned i_from = unsigned(nb_from >> bm::set_array_shift);
++ unsigned j_from = unsigned(nb_from & bm::set_array_mask);
++ unsigned i_to = unsigned(nb_to >> bm::set_array_shift);
++ unsigned j_to = unsigned(nb_to & bm::set_array_mask);
++
++ if (i_from >= top_size)
++ return;
++ if (i_to >= top_size)
++ {
++ i_to = unsigned(top_size-1);
++ j_to = bm::set_sub_array_size-1;
++ }
++
++ for (unsigned i = i_from; i <= i_to; ++i)
++ {
++ T** blk_blk = root[i];
++ if (!blk_blk)
++ continue;
++ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
++ {
++ unsigned j = (i == i_from) ? j_from : 0;
++ if (!j && (i != i_to)) // full sub-block
++ {
++ N base_idx = bm::get_super_block_start<N>(i);
++ f.add_range(base_idx, bm::set_sub_total_bits);
++ }
++ else
++ {
++ do
++ {
++ N base_idx = bm::get_block_start<N>(i, j);
++ f.add_range(base_idx, bm::gap_max_bits);
++ if ((i == i_to) && (j == j_to))
++ return;
++ } while (++j < bm::set_sub_array_size);
++ }
++ }
++ else
++ {
++ unsigned j = (i == i_from) ? j_from : 0;
++ do
++ {
++ const T* block;
++ if (blk_blk[j])
++ {
++ N base_idx = bm::get_block_start<N>(i, j);
++ if (0 != (block = blk_blk[j]))
++ {
++ if (BM_IS_GAP(block))
++ {
++ bm::for_each_gap_blk(BMGAP_PTR(block), base_idx, f);
++ }
++ else
++ {
++ bm::for_each_bit_blk(block, base_idx, f);
++ }
++ }
++ }
++
++ if ((i == i_to) && (j == j_to))
++ return;
++ } while (++j < bm::set_sub_array_size);
++ }
++ } // for i
++}
++
++
++/**
++ Implementation of for_each_bit_range without boilerplave checks
++ @internal
++*/
++template<class BV, class Func>
++void for_each_bit_range_no_check(const BV& bv,
++ typename BV::size_type left,
++ typename BV::size_type right,
++ Func& bit_functor)
++{
++ typedef typename BV::size_type size_type;
++ typedef typename BV::block_idx_type block_idx_type;
++
++ const typename BV::blocks_manager_type& bman = bv.get_blocks_manager();
++ bm::word_t*** blk_root = bman.top_blocks_root();
++ if (!blk_root)
++ return;
++
++ block_idx_type nblock_left = (left >> bm::set_block_shift);
++ block_idx_type nblock_right = (right >> bm::set_block_shift);
++
++ unsigned i0, j0;
++ bm::get_block_coord(nblock_left, i0, j0);
++ const bm::word_t* block = bman.get_block_ptr(i0, j0);
++ unsigned nbit_left = unsigned(left & bm::set_block_mask);
++ size_type offset = nblock_left * bm::bits_in_block;
++
++ if (nblock_left == nblock_right) // hit in the same block
++ {
++ if (!block)
++ return;
++ unsigned nbit_right = unsigned(right & bm::set_block_mask);
++ if (BM_IS_GAP(block))
++ {
++ bm::for_each_gap_blk_range(BMGAP_PTR(block), offset,
++ nbit_left, nbit_right, bit_functor);
++ }
++ else
++ {
++ bm::for_each_bit_blk(block, offset, nbit_left, nbit_right,
++ bit_functor);
++ }
++ return;
++ }
++ // process left block
++ if (nbit_left && block)
++ {
++ if (BM_IS_GAP(block))
++ {
++ bm::for_each_gap_blk_range(BMGAP_PTR(block), offset,
++ nbit_left, bm::bits_in_block-1, bit_functor);
++ }
++ else
++ {
++ bm::for_each_bit_blk(block, offset, nbit_left, bm::bits_in_block-1,
++ bit_functor);
++ }
++ ++nblock_left;
++ }
++
++ // process all complete blocks in the middle
++ {
++ block_idx_type top_blocks_size = bman.top_block_size();
++ bm::for_each_bit_block_range(blk_root, top_blocks_size,
++ nblock_left, nblock_right-1, bit_functor);
++ }
++
++ unsigned nbit_right = unsigned(right & bm::set_block_mask);
++ bm::get_block_coord(nblock_right, i0, j0);
++ block = bman.get_block_ptr(i0, j0);
++
++ if (block)
++ {
++ offset = nblock_right * bm::bits_in_block;
++ if (BM_IS_GAP(block))
++ {
++ bm::for_each_gap_blk_range(BMGAP_PTR(block), offset,
++ 0, nbit_right, bit_functor);
++ }
++ else
++ {
++ bm::for_each_bit_blk(block, offset, 0, nbit_right, bit_functor);
++ }
++ }
++}
++
++
++
+ } // namespace bm
+
+ #ifdef _MSC_VER
+Index: c++/include/util/bitset/bmbmatrix.h
+===================================================================
+--- a/c++/include/util/bitset/bmbmatrix.h (revision 90103)
++++ b/c++/include/util/bitset/bmbmatrix.h (revision 90104)
+@@ -72,7 +72,7 @@
+ allocation_policy_type ap = allocation_policy_type(),
+ size_type bv_max_size = bm::id_max,
+ const allocator_type& alloc = allocator_type());
+- ~basic_bmatrix() BMNOEXEPT;
++ ~basic_bmatrix() BMNOEXCEPT;
+
+ /*! copy-ctor */
+ basic_bmatrix(const basic_bmatrix<BV>& bbm);
+@@ -84,10 +84,10 @@
+
+ #ifndef BM_NO_CXX11
+ /*! move-ctor */
+- basic_bmatrix(basic_bmatrix<BV>&& bbm) BMNOEXEPT;
++ basic_bmatrix(basic_bmatrix<BV>&& bbm) BMNOEXCEPT;
+
+ /*! move assignmment operator */
+- basic_bmatrix<BV>& operator = (basic_bmatrix<BV>&& bbm) BMNOEXEPT
++ basic_bmatrix<BV>& operator = (basic_bmatrix<BV>&& bbm) BMNOEXCEPT
+ {
+ if (this != &bbm)
+ {
+@@ -98,7 +98,8 @@
+ }
+ #endif
+
+- void set_allocator_pool(allocator_pool_type* pool_ptr) { pool_ = pool_ptr; }
++ void set_allocator_pool(allocator_pool_type* pool_ptr) BMNOEXCEPT
++ { pool_ = pool_ptr; }
+
+ ///@}
+
+@@ -107,7 +108,7 @@
+ ///@{
+
+ /*! Swap content */
+- void swap(basic_bmatrix<BV>& bbm) BMNOEXEPT;
++ void swap(basic_bmatrix<BV>& bbm) BMNOEXCEPT;
+
+ /*! Copy content */
+ void copy_from(const basic_bmatrix<BV>& bbm);
+@@ -118,17 +119,17 @@
+ /*! @name row access */
+ ///@{
+
+- /*! Get row bit-vector */
+- const bvector_type* row(size_type i) const;
++ /*! Get row bit-vector. Can return NULL */
++ const bvector_type* row(size_type i) const BMNOEXCEPT;
+
+- /*! Get row bit-vector */
+- bvector_type_const_ptr get_row(size_type i) const;
++ /*! Get row bit-vector. Can return NULL */
++ bvector_type_const_ptr get_row(size_type i) const BMNOEXCEPT;
+
+- /*! Get row bit-vector */
+- bvector_type* get_row(size_type i);
++ /*! Get row bit-vector. Can return NULL */
++ bvector_type* get_row(size_type i) BMNOEXCEPT;
+
+ /*! get number of value rows */
+- size_type rows() const { return rsize_; }
++ size_type rows() const BMNOEXCEPT { return rsize_; }
+
+ /*! Make sure row is constructed, return bit-vector */
+ bvector_type_ptr construct_row(size_type row);
+@@ -168,7 +169,7 @@
+ @param pos - column position in the matrix
+ @param octet_idx - octet based row position (1 octet - 8 rows)
+ */
+- unsigned char get_octet(size_type pos, size_type octet_idx) const;
++ unsigned char get_octet(size_type pos, size_type octet_idx) const BMNOEXCEPT;
+
+ /*!
+ Compare vector[pos] with octet
+@@ -183,7 +184,7 @@
+ @return 0 - equal, -1 - less(vect[pos] < octet), 1 - greater
+ */
+ int compare_octet(size_type pos,
+- size_type octet_idx, char octet) const;
++ size_type octet_idx, char octet) const BMNOEXCEPT;
+
+ ///@}
+
+@@ -194,12 +195,13 @@
+ ///@{
+
+ /// Test if 4 rows from i are not NULL
+- bool test_4rows(unsigned i) const;
++ bool test_4rows(unsigned i) const BMNOEXCEPT;
+
+ /// Get low level internal access to
+- const bm::word_t* get_block(size_type p, unsigned i, unsigned j) const;
++ const bm::word_t* get_block(size_type p,
++ unsigned i, unsigned j) const BMNOEXCEPT;
+
+- unsigned get_half_octet(size_type pos, size_type row_idx) const;
++ unsigned get_half_octet(size_type pos, size_type row_idx) const BMNOEXCEPT;
+
+ /*!
+ \brief run memory optimization for all bit-vector rows
+@@ -222,7 +224,7 @@
+
+ protected:
+ void allocate_rows(size_type rsize);
+- void free_rows() BMNOEXEPT;
++ void free_rows() BMNOEXCEPT;
+
+ bvector_type* construct_bvector(const bvector_type* bv) const;
+ void destruct_bvector(bvector_type* bv) const;
+@@ -286,7 +288,7 @@
+
+ #ifndef BM_NO_CXX11
+ /*! move-ctor */
+- base_sparse_vector(base_sparse_vector<Val, BV, MAX_SIZE>&& bsv) BMNOEXEPT
++ base_sparse_vector(base_sparse_vector<Val, BV, MAX_SIZE>&& bsv) BMNOEXCEPT
+ {
+ bmatr_.swap(bsv.bmatr_);
+ size_ = bsv.size_;
+@@ -295,9 +297,9 @@
+ }
+ #endif
+
+- void swap(base_sparse_vector<Val, BV, MAX_SIZE>& bsv) BMNOEXEPT;
++ void swap(base_sparse_vector<Val, BV, MAX_SIZE>& bsv) BMNOEXCEPT;
+
+- size_type size() const { return size_; }
++ size_type size() const BMNOEXCEPT { return size_; }
+
+ void resize(size_type new_size);
+
+@@ -304,10 +306,10 @@
+ void clear_range(size_type left, size_type right, bool set_null);
+
+ /*! \brief resize to zero, free memory */
+- void clear() BMNOEXEPT;
++ void clear() BMNOEXCEPT;
+
+ /*! return true if empty */
+- bool empty() const { return size() == 0; }
++ bool empty() const BMNOEXCEPT { return size() == 0; }
+
+ public:
+
+@@ -317,13 +319,14 @@
+ /**
+ \brief check if container supports NULL(unassigned) values
+ */
+- bool is_nullable() const { return bmatr_.get_row(this->null_plain()) != 0; }
++ bool is_nullable() const BMNOEXCEPT
++ { return bmatr_.get_row(this->null_plain()) != 0; }
+
+ /**
+ \brief Get bit-vector of assigned values or NULL
+ (if not constructed that way)
+ */
+- const bvector_type* get_null_bvector() const
++ const bvector_type* get_null_bvector() const BMNOEXCEPT
+ { return bmatr_.get_row(this->null_plain()); }
+
+ /** \brief test if specified element is NULL
+@@ -331,7 +334,7 @@
+ \return true if it is NULL false if it was assigned or container
+ is not configured to support assignment flags
+ */
+- bool is_null(size_type idx) const;
++ bool is_null(size_type idx) const BMNOEXCEPT;
+
+
+ ///@}
+@@ -352,25 +355,27 @@
+ \return bit-vector for the bit plain or NULL
+ */
+ bvector_type_const_ptr
+- get_plain(unsigned i) const { return bmatr_.row(i); }
++ get_plain(unsigned i) const BMNOEXCEPT { return bmatr_.row(i); }
+
+ /*!
+ \brief get total number of bit-plains in the vector
+ */
+- static unsigned plains() { return value_bits(); }
++ static unsigned plains() BMNOEXCEPT { return value_bits(); }
+
+ /** Number of stored bit-plains (value plains + extra */
+- static unsigned stored_plains() { return value_bits()+1; }
++ static unsigned stored_plains() BMNOEXCEPT { return value_bits()+1; }
+
+
+ /** Number of effective bit-plains in the value type */
+- unsigned effective_plains() const { return effective_plains_ + 1; }
++ unsigned effective_plains() const BMNOEXCEPT
++ { return effective_plains_ + 1; }
+
+ /*!
+ \brief get access to bit-plain as is (can return NULL)
+ */
+- bvector_type_ptr plain(unsigned i) { return bmatr_.get_row(i); }
+- const bvector_type_ptr plain(unsigned i) const { return bmatr_.get_row(i); }
++ bvector_type_ptr plain(unsigned i) BMNOEXCEPT { return bmatr_.get_row(i); }
++ bvector_type_const_ptr plain(unsigned i) const BMNOEXCEPT
++ { return bmatr_.get_row(i); }
+
+ bvector_type* get_null_bvect() { return bmatr_.get_row(this->null_plain());}
+
+@@ -388,12 +393,12 @@
+ @return 64-bit mask
+ @internal
+ */
+- bm::id64_t get_plains_mask(unsigned element_idx) const;
++ bm::id64_t get_plains_mask(unsigned element_idx) const BMNOEXCEPT;
+
+ /*!
+ get read-only access to inetrnal bit-matrix
+ */
+- const bmatrix_type& get_bmatrix() const { return bmatr_; }
++ const bmatrix_type& get_bmatrix() const BMNOEXCEPT { return bmatr_; }
+ ///@}
+
+ /*!
+@@ -417,7 +422,7 @@
+
+ @sa statistics
+ */
+- void calc_stat(typename bvector_type::statistics* st) const;
++ void calc_stat(typename bvector_type::statistics* st) const BMNOEXCEPT;
+
+ /*!
+ \brief check if another sparse vector has the same content and size
+@@ -429,7 +434,7 @@
+ \return true, if it is the same
+ */
+ bool equal(const base_sparse_vector<Val, BV, MAX_SIZE>& sv,
+- bm::null_support null_able = bm::use_null) const;
++ bm::null_support null_able = bm::use_null) const BMNOEXCEPT;
+
+ protected:
+ void copy_from(const base_sparse_vector<Val, BV, MAX_SIZE>& bsv);
+@@ -463,13 +468,13 @@
+ typedef typename bvector_type::block_idx_type block_idx_type;
+
+ /** Number of total bit-plains in the value type*/
+- static unsigned value_bits()
++ static unsigned value_bits() BMNOEXCEPT
+ {
+ return base_sparse_vector<Val, BV, MAX_SIZE>::sv_value_plains;
+ }
+
+ /** plain index for the "NOT NULL" flags plain */
+- static unsigned null_plain() { return value_bits(); }
++ static unsigned null_plain() BMNOEXCEPT { return value_bits(); }
+
+ /** optimize block in all matrix plains */
+ void optimize_block(block_idx_type nb)
+@@ -515,7 +520,7 @@
+ //---------------------------------------------------------------------
+
+ template<typename BV>
+-basic_bmatrix<BV>::~basic_bmatrix() BMNOEXEPT
++basic_bmatrix<BV>::~basic_bmatrix() BMNOEXCEPT
+ {
+ free_rows();
+ }
+@@ -537,7 +542,7 @@
+ //---------------------------------------------------------------------
+
+ template<typename BV>
+-basic_bmatrix<BV>::basic_bmatrix(basic_bmatrix<BV>&& bbm) BMNOEXEPT
++basic_bmatrix<BV>::basic_bmatrix(basic_bmatrix<BV>&& bbm) BMNOEXCEPT
+ : bv_size_(bbm.bv_size_),
+ alloc_(bbm.alloc_),
+ ap_(bbm.ap_),
+@@ -552,7 +557,7 @@
+
+ template<typename BV>
+ const typename basic_bmatrix<BV>::bvector_type*
+-basic_bmatrix<BV>::row(size_type i) const
++basic_bmatrix<BV>::row(size_type i) const BMNOEXCEPT
+ {
+ BM_ASSERT(i < rsize_);
+ return bv_rows_[i];
+@@ -562,7 +567,7 @@
+
+ template<typename BV>
+ const typename basic_bmatrix<BV>::bvector_type*
+-basic_bmatrix<BV>::get_row(size_type i) const
++basic_bmatrix<BV>::get_row(size_type i) const BMNOEXCEPT
+ {
+ BM_ASSERT(i < rsize_);
+ return bv_rows_[i];
+@@ -572,7 +577,7 @@
+
+ template<typename BV>
+ typename basic_bmatrix<BV>::bvector_type*
+-basic_bmatrix<BV>::get_row(size_type i)
++basic_bmatrix<BV>::get_row(size_type i) BMNOEXCEPT
+ {
+ BM_ASSERT(i < rsize_);
+ return bv_rows_[i];
+@@ -581,7 +586,7 @@
+ //---------------------------------------------------------------------
+
+ template<typename BV>
+-bool basic_bmatrix<BV>::test_4rows(unsigned j) const
++bool basic_bmatrix<BV>::test_4rows(unsigned j) const BMNOEXCEPT
+ {
+ BM_ASSERT((j + 4) <= rsize_);
+ #if defined(BM64_SSE4)
+@@ -593,7 +598,8 @@
+ __m256i w0 = _mm256_loadu_si256((__m256i*)(bv_rows_ + j));
+ return !_mm256_testz_si256(w0, w0);
+ #else
+- bool b = bv_rows_[j + 0] || bv_rows_[j + 1] || bv_rows_[j + 2] || bv_rows_[j + 3];
++ bool b = bv_rows_[j + 0] || bv_rows_[j + 1] ||
++ bv_rows_[j + 2] || bv_rows_[j + 3];
+ return b;
+ #endif
+ }
+@@ -655,7 +661,7 @@
+ //---------------------------------------------------------------------
+
+ template<typename BV>
+-void basic_bmatrix<BV>::free_rows() BMNOEXEPT
++void basic_bmatrix<BV>::free_rows() BMNOEXCEPT
+ {
+ for (size_type i = 0; i < rsize_; ++i)
+ {
+@@ -676,7 +682,7 @@
+ //---------------------------------------------------------------------
+
+ template<typename BV>
+-void basic_bmatrix<BV>::swap(basic_bmatrix<BV>& bbm) BMNOEXEPT
++void basic_bmatrix<BV>::swap(basic_bmatrix<BV>& bbm) BMNOEXCEPT
+ {
+ if (this == &bbm)
+ return;
+@@ -795,12 +801,14 @@
+
+ template<typename BV>
+ const bm::word_t*
+-basic_bmatrix<BV>::get_block(size_type p, unsigned i, unsigned j) const
++basic_bmatrix<BV>::get_block(size_type p,
++ unsigned i, unsigned j) const BMNOEXCEPT
+ {
+ bvector_type_const_ptr bv = this->row(p);
+ if (bv)
+ {
+- const typename bvector_type::blocks_manager_type& bman = bv->get_blocks_manager();
++ const typename bvector_type::blocks_manager_type& bman =
++ bv->get_blocks_manager();
+ return bman.get_block_ptr(i, j);
+ }
+ return 0;
+@@ -902,7 +910,7 @@
+
+ template<typename BV>
+ unsigned char
+-basic_bmatrix<BV>::get_octet(size_type pos, size_type octet_idx) const
++basic_bmatrix<BV>::get_octet(size_type pos, size_type octet_idx) const BMNOEXCEPT
+ {
+ unsigned v = 0;
+
+@@ -1003,7 +1011,7 @@
+ template<typename BV>
+ int basic_bmatrix<BV>::compare_octet(size_type pos,
+ size_type octet_idx,
+- char octet) const
++ char octet) const BMNOEXCEPT
+ {
+ char value = char(get_octet(pos, octet_idx));
+ return (value > octet) - (value < octet);
+@@ -1013,7 +1021,7 @@
+
+ template<typename BV>
+ unsigned
+-basic_bmatrix<BV>::get_half_octet(size_type pos, size_type row_idx) const
++basic_bmatrix<BV>::get_half_octet(size_type pos, size_type row_idx) const BMNOEXCEPT
+ {
+ unsigned v = 0;
+
+@@ -1198,7 +1206,7 @@
+
+ template<class Val, class BV, unsigned MAX_SIZE>
+ void base_sparse_vector<Val, BV, MAX_SIZE>::swap(
+- base_sparse_vector<Val, BV, MAX_SIZE>& bsv) BMNOEXEPT
++ base_sparse_vector<Val, BV, MAX_SIZE>& bsv) BMNOEXCEPT
+ {
+ if (this != &bsv)
+ {
+@@ -1212,7 +1220,7 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class BV, unsigned MAX_SIZE>
+-void base_sparse_vector<Val, BV, MAX_SIZE>::clear() BMNOEXEPT
++void base_sparse_vector<Val, BV, MAX_SIZE>::clear() BMNOEXCEPT
+ {
+ unsigned plains = value_bits();
+ for (size_type i = 0; i < plains; ++i)
+@@ -1274,7 +1282,8 @@
+ //---------------------------------------------------------------------
+
+ template<class Val, class BV, unsigned MAX_SIZE>
+-bool base_sparse_vector<Val, BV, MAX_SIZE>::is_null(size_type idx) const
++bool base_sparse_vector<Val, BV, MAX_SIZE>::is_null(
++ size_type idx) const BMNOEXCEPT
+ {
+ const bvector_type* bv_null = get_null_bvector();
+ return (bv_null) ? (!bv_null->test(idx)) : false;
+@@ -1312,7 +1321,7 @@
+
+ template<class Val, class BV, unsigned MAX_SIZE>
+ bm::id64_t base_sparse_vector<Val, BV, MAX_SIZE>::get_plains_mask(
+- unsigned element_idx) const
++ unsigned element_idx) const BMNOEXCEPT
+ {
+ BM_ASSERT(element_idx < MAX_SIZE);
+ bm::id64_t mask = 0;
+@@ -1364,7 +1373,7 @@
+
+ template<class Val, class BV, unsigned MAX_SIZE>
+ void base_sparse_vector<Val, BV, MAX_SIZE>::calc_stat(
+- typename bvector_type::statistics* st) const
++ typename bvector_type::statistics* st) const BMNOEXCEPT
+ {
+ BM_ASSERT(st);
+
+@@ -1409,7 +1418,7 @@
+
+ template<class Val, class BV, unsigned MAX_SIZE>
+ void base_sparse_vector<Val, BV, MAX_SIZE>::insert_clear_value_plains_from(
+- unsigned plain_idx, size_type idx)
++ unsigned plain_idx, size_type idx)
+ {
+ for (unsigned i = plain_idx; i < sv_value_plains; ++i)
+ {
+@@ -1437,7 +1446,7 @@
+ template<class Val, class BV, unsigned MAX_SIZE>
+ bool base_sparse_vector<Val, BV, MAX_SIZE>::equal(
+ const base_sparse_vector<Val, BV, MAX_SIZE>& sv,
+- bm::null_support null_able) const
++ bm::null_support null_able) const BMNOEXCEPT
+ {
+ size_type arg_size = sv.size();
+ if (this->size_ != arg_size)
+@@ -1468,11 +1477,6 @@
+ bool eq = bv->equal(*arg_bv);
+ if (!eq)
+ return false;
+- /*
+- int cmp = bv->compare(*arg_bv);
+- if (cmp != 0)
+- return false;
+- */
+ } // for j
+
+ if (null_able == bm::use_null)
+@@ -1490,11 +1494,6 @@
+ bool eq = bv_null->equal(*bv_null_arg);
+ if (!eq)
+ return false;
+- /*
+- int cmp = bv_null->compare(*bv_null);
+- if (cmp != 0)
+- return false;
+- */
+ }
+ return true;
+ }
+Index: c++/include/util/bitset/bmintervals.h
+===================================================================
+--- a/c++/include/util/bitset/bmintervals.h (nonexistent)
++++ b/c++/include/util/bitset/bmintervals.h (revision 90104)
+@@ -0,0 +1,781 @@
++#ifndef BMINTERVALS__H__INCLUDED__
++#define BMINTERVALS__H__INCLUDED__
++
++/*
++Copyright(c) 2002-2020 Anatoliy Kuznetsov(anatoliy_kuznetsov at yahoo.com)
++
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
++
++For more information please visit: http://bitmagic.io
++*/
++/*! \file bmintervals.h
++ \brief Algorithms for bit ranges and intervals
++*/
++
++#ifndef BM__H__INCLUDED__
++// BitMagic utility headers do not include main "bm.h" declaration
++// #include "bm.h" or "bm64.h" explicitly
++# error missing include (bm.h or bm64.h)
++#endif
++
++#include "bmdef.h"
++
++/** \defgroup bvintervals Algorithms for bit intervals
++ Algorithms and iterators for bit ranges and intervals
++ @ingroup bvector
++ */
++
++
++namespace bm
++{
++
++/*!
++ \brief forward iterator class to traverse bit-vector as ranges
++
++ Traverse enumerator for forward walking bit-vector as intervals:
++ series of consequtive 1111s flanked with zeroes.
++ Enumerator can traverse the whole bit-vector or jump(go_to) to position.
++
++ \ingroup bvintervals
++*/
++template<typename BV>
++class interval_enumerator
++{
++public:
++#ifndef BM_NO_STL
++ typedef std::input_iterator_tag iterator_category;
++#endif
++ typedef BV bvector_type;
++ typedef typename bvector_type::size_type size_type;
++ typedef typename bvector_type::allocator_type allocator_type;
++ typedef bm::byte_buffer<allocator_type> buffer_type;
++ typedef bm::pair<size_type, size_type> pair_type;
++
++public:
++ /*! @name Construction and assignment */
++ //@{
++
++ interval_enumerator()
++ : bv_(0), interval_(bm::id_max, bm::id_max), gap_ptr_(0)
++ {}
++
++ /**
++ Construct enumerator for the bit-vector
++ */
++ interval_enumerator(const BV& bv)
++ : bv_(&bv), interval_(bm::id_max, bm::id_max), gap_ptr_(0)
++ {
++ go_to_impl(0, false);
++ }
++
++ /**
++ Construct enumerator for the specified position
++ @param bv - source bit-vector
++ @param start_pos - position on bit-vector to search for interval
++ @param extend_start - flag to extend interval start to the start if
++ true start happenes to be less than start_pos
++ @sa go_to
++ */
++ interval_enumerator(const BV& bv, size_type start_pos, bool extend_start)
++ : bv_(&bv), interval_(bm::id_max, bm::id_max), gap_ptr_(0)
++ {
++ go_to_impl(start_pos, extend_start);
++ }
++
++ /**
++ Copy constructor
++ */
++ interval_enumerator(const interval_enumerator<BV>& ien)
++ : bv_(ien.bv_), interval_(bm::id_max, bm::id_max), gap_ptr_(0)
++ {
++ go_to_impl(ien.start(), false);
++ }
++
++ /**
++ Assignment operator
++ */
++ interval_enumerator& operator=(const interval_enumerator<BV>& ien)
++ {
++ bv_ = ien.bv_; gap_ptr_ = 0;
++ go_to_impl(ien.start(), false);
++ }
++
++#ifndef BM_NO_CXX11
++ /** move-ctor */
++ interval_enumerator(interval_enumerator<BV>&& ien) BMNOEXCEPT
++ : bv_(0), interval_(bm::id_max, bm::id_max), gap_ptr_(0)
++ {
++ this->swap(ien);
++ }
++
++ /** move assignmment operator */
++ interval_enumerator<BV>& operator=(interval_enumerator<BV>&& ien) BMNOEXCEPT
++ {
++ if (this != &ien)
++ this->swap(ien);
++ return *this;
++ }
++#endif
++
++ //@}
++
++
++ // -----------------------------------------------------------------
++
++ /*! @name Comparison methods all use start position to compare */
++ //@{
++
++ bool operator==(const interval_enumerator<BV>& ien) const BMNOEXCEPT
++ { return (start() == ien.start()); }
++ bool operator!=(const interval_enumerator<BV>& ien) const BMNOEXCEPT
++ { return (start() != ien.start()); }
++ bool operator < (const interval_enumerator<BV>& ien) const BMNOEXCEPT
++ { return (start() < ien.start()); }
++ bool operator <= (const interval_enumerator<BV>& ien) const BMNOEXCEPT
++ { return (start() <= ien.start()); }
++ bool operator > (const interval_enumerator<BV>& ien) const BMNOEXCEPT
++ { return (start() > ien.start()); }
++ bool operator >= (const interval_enumerator<BV>& ien) const BMNOEXCEPT
++ { return (start() >= ien.start()); }
++ //@}
++
++
++ /// Return interval start/left as bit-vector coordinate 011110 [left..right]
++ size_type start() const BMNOEXCEPT;
++ /// Return interval end/right as bit-vector coordinate 011110 [left..right]
++ size_type end() const BMNOEXCEPT;
++
++ const pair_type& operator*() const BMNOEXCEPT { return interval_; }
++
++ /// Get interval pair
++ const pair_type& get() const BMNOEXCEPT { return interval_; }
++
++ /// Returns true if enumerator is valid (false if traversal is done)
++ bool valid() const BMNOEXCEPT;
++
++ // -----------------------------------------------------------------
++
++ /*! @name enumerator positioning */
++ //@{
++
++ /*!
++ @brief Go to inetrval at specified position
++ Jump to position with interval. If interval is not available at
++ the specified position (o bit) enumerator will find the next interval.
++ If interval is present we have an option to find interval start [left..]
++ and set enumerator from the effective start coodrinate
++
++ @param pos - position on bit-vector
++ @param extend_start - find effective start if it is less than the
++ go to position
++ @return true if enumerator remains valid after the jump
++ */
++ bool go_to(size_type pos, bool extend_start = true);
++
++ /*! Advance to the next interval
++ @return true if interval is available
++ @sa valid
++ */
++ bool advance();
++
++ /*! \brief Advance enumerator forward to the next available bit */
++ interval_enumerator<BV>& operator++() BMNOEXCEPT
++ { advance(); return *this; }
++
++ /*! \brief Advance enumerator forward to the next available bit */
++ interval_enumerator<BV> operator++(int) BMNOEXCEPT
++ {
++ interval_enumerator<BV> tmp = *this;
++ advance();
++ return tmp;
++ }
++ //@}
++
++ /**
++ swap enumerator with another one
++ */
++ void swap(interval_enumerator<BV>& ien) BMNOEXCEPT;
++
++protected:
++ typedef typename bvector_type::block_idx_type block_idx_type;
++ typedef typename bvector_type::allocator_type bv_allocator_type;
++ typedef bm::heap_vector<unsigned short, bv_allocator_type, true>
++ gap_vector_type;
++
++
++ bool go_to_impl(size_type pos, bool extend_start);
++
++ /// Turn FSM into invalid state (out of range)
++ void invalidate() BMNOEXCEPT;
++
++private:
++ const BV* bv_; ///!< bit-vector for traversal
++ gap_vector_type gap_buf_; ///!< GAP buf.vector for bit-block
++ pair_type interval_; ///! current inetrval
++ const bm::gap_word_t* gap_ptr_; ///!< current pointer in GAP block
++};
++
++//----------------------------------------------------------------------------
++
++/*!
++ \brief Returns true if range is all 1s flanked with 0s
++ Function performs the test on a closed range [left, right]
++ true interval is all 1s AND test(left-1)==false AND test(right+1)==false
++ Examples:
++ 01110 [1,3] - true
++ 11110 [0,3] - true
++ 11110 [1,3] - false
++ \param bv - bit-vector for check
++ \param left - index of first bit start checking
++ \param right - index of last bit
++ \return true/false
++
++ \ingroup bvintervals
++
++ @sa is_all_one_range
++*/
++template<class BV>
++bool is_interval(const BV& bv,
++ typename BV::size_type left,
++ typename BV::size_type right) BMNOEXCEPT
++{
++ typedef typename BV::block_idx_type block_idx_type;
++
++ const typename BV::blocks_manager_type& bman = bv.get_blocks_manager();
++
++ if (!bman.is_init())
++ return false; // nothing to do
++
++ if (right < left)
++ bm::xor_swap(left, right);
++ if (left == bm::id_max) // out of range
++ return false;
++ if (right == bm::id_max)
++ --right;
++
++ block_idx_type nblock_left = (left >> bm::set_block_shift);
++ block_idx_type nblock_right = (right >> bm::set_block_shift);
++
++ if (nblock_left == nblock_right) // same block (fast case)
++ {
++ unsigned nbit_left = unsigned(left & bm::set_block_mask);
++ unsigned nbit_right = unsigned(right & bm::set_block_mask);
++ if ((nbit_left > 0) && (nbit_right < bm::gap_max_bits-1))
++ {
++ unsigned i0, j0;
++ bm::get_block_coord(nblock_left, i0, j0);
++ const bm::word_t* block = bman.get_block_ptr(i0, j0);
++ bool b = bm::block_is_interval(block, nbit_left, nbit_right);
++ return b;
++ }
++ }
++ bool is_left, is_right, is_all_one;
++ is_left = left > 0 ? bv.test(left-1) : false;
++ if (is_left == false)
++ {
++ is_right = (right < (bm::id_max - 1)) ? bv.test(right + 1) : false;
++ if (is_left == false && is_right == false)
++ {
++ is_all_one = bv.is_all_one_range(left, right);
++ return is_all_one;
++ }
++ }
++ return false;
++}
++
++
++//----------------------------------------------------------------------------
++
++/*!
++
++ \brief Reverse find index of first 1 bit gap (01110) starting from position
++ Reverse scan for the first 1 in a block of continious 1s.
++ Method employs closed interval semantics: 0[pos..from]
++
++ \param bv - bit-vector for search
++ \param from - position to start reverse search from
++ \param pos - [out] index of the found first 1 bit in a gap of bits
++ \return true if search returned result, false if not found
++ (start point is zero)
++
++ \sa is_interval, find_interval_end
++ \ingroup bvintervals
++*/
++template<class BV>
++bool find_interval_start(const BV& bv,
++ typename BV::size_type from,
++ typename BV::size_type& pos) BMNOEXCEPT
++{
++ typedef typename BV::size_type size_type;
++ typedef typename BV::block_idx_type block_idx_type;
++
++ const typename BV::blocks_manager_type& bman = bv.get_blocks_manager();
++
++ if (!bman.is_init())
++ return false; // nothing to do
++ if (!from)
++ {
++ pos = from;
++ return bv.test(from);
++ }
++
++ block_idx_type nb = (from >> bm::set_block_shift);
++ unsigned i0, j0;
++ bm::get_block_coord(nb, i0, j0);
++
++ size_type base_idx;
++ unsigned found_nbit;
++
++ const bm::word_t* block = bman.get_block_ptr(i0, j0);
++ if (!block)
++ return false;
++ unsigned nbit = unsigned(from & bm::set_block_mask);
++ unsigned res = bm::block_find_interval_start(block, nbit, &found_nbit);
++
++ switch (res)
++ {
++ case 0: // not interval
++ return false;
++ case 1: // interval found
++ pos = found_nbit + (nb * bm::gap_max_bits);
++ return true;
++ case 2: // keep scanning
++ base_idx = bm::get_block_start<size_type>(i0, j0);
++ pos = base_idx + found_nbit;
++ if (!nb)
++ return true;
++ break;
++ default:
++ BM_ASSERT(0);
++ } // switch
++
++ --nb;
++ bm::get_block_coord(nb, i0, j0);
++ bm::word_t*** blk_root = bman.top_blocks_root();
++
++ for (unsigned i = i0; true; --i)
++ {
++ bm::word_t** blk_blk = blk_root[i];
++ if (!blk_blk)
++ return true;
++ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
++ {
++ pos = bm::get_super_block_start<size_type>(i);
++ if (!i)
++ break;
++ continue;
++ }
++ unsigned j = (i == i0) ? j0 : 255;
++ for (; true; --j)
++ {
++ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
++ {
++ pos = bm::get_block_start<size_type>(i, j);
++ goto loop_j_end; // continue
++ }
++
++ block = blk_blk[j];
++ if (!block)
++ return true;
++
++ res = bm::block_find_interval_start(block,
++ bm::gap_max_bits-1, &found_nbit);
++ switch (res)
++ {
++ case 0: // not interval (but it was the interval, so last result
++ return true;
++ case 1: // interval found
++ base_idx = bm::get_block_start<size_type>(i, j);
++ pos = base_idx + found_nbit;
++ return true;
++ case 2: // keep scanning
++ pos = bm::get_block_start<size_type>(i, j);
++ break;
++ default:
++ BM_ASSERT(0);
++ } // switch
++
++ loop_j_end: // continue point
++ if (!j)
++ break;
++ } // for j
++
++ if (!i)
++ break;
++ } // for i
++
++ return true;
++}
++
++
++//----------------------------------------------------------------------------
++
++/*!
++ \brief Reverse find index of first 1 bit gap (01110) starting from position
++ Reverse scan for the first 1 in a block of continious 1s.
++ Method employs closed interval semantics: 0[pos..from]
++
++ \param bv - bit-vector for search
++ \param from - position to start reverse search from
++ \param pos - [out] index of the found first 1 bit in a gap of bits
++ \return true if search returned result, false if not found
++ (start point is zero)
++
++ \sa is_interval, find_interval_end
++ \ingroup bvintervals
++*/
++template <typename BV>
++bool find_interval_end(const BV& bv,
++ typename BV::size_type from,
++ typename BV::size_type & pos) BMNOEXCEPT
++{
++ typedef typename BV::block_idx_type block_idx_type;
++
++ if (from == bm::id_max)
++ return false;
++ const typename BV::blocks_manager_type& bman = bv.get_blocks_manager();
++
++ if (!bman.is_init())
++ return false; // nothing to do
++ if (from == bm::id_max-1)
++ {
++ pos = from;
++ return bv.test(from);
++ }
++
++ block_idx_type nb = (from >> bm::set_block_shift);
++ unsigned i0, j0;
++ bm::get_block_coord(nb, i0, j0);
++
++ unsigned found_nbit;
++
++ const bm::word_t* block = bman.get_block_ptr(i0, j0);
++ if (!block)
++ return false;
++ unsigned nbit = unsigned(from & bm::set_block_mask);
++ unsigned res = bm::block_find_interval_end(block, nbit, &found_nbit);
++ switch (res)
++ {
++ case 0: // not interval
++ return false;
++ case 1: // interval found
++ pos = found_nbit + (nb * bm::gap_max_bits);
++ return true;
++ case 2: // keep scanning
++ pos = found_nbit + (nb * bm::gap_max_bits);
++ break;
++ default:
++ BM_ASSERT(0);
++ } // switch
++
++ block_idx_type nblock_right = (bm::id_max >> bm::set_block_shift);
++ unsigned i_from, j_from, i_to, j_to;
++ bm::get_block_coord(nblock_right, i_to, j_to);
++ block_idx_type top_size = bman.top_block_size();
++ if (i_to >= top_size)
++ i_to = unsigned(top_size-1);
++
++ ++nb;
++ bm::word_t*** blk_root = bman.top_blocks_root();
++ bm::get_block_coord(nb, i_from, j_from);
++
++ for (unsigned i = i_from; i <= i_to; ++i)
++ {
++ bm::word_t** blk_blk = blk_root[i];
++ if (!blk_blk)
++ return true;
++ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
++ {
++ if (i > i_from)
++ {
++ pos += bm::gap_max_bits * bm::set_sub_array_size;
++ continue;
++ }
++ else
++ {
++ // TODO: optimization to avoid scanning rest of the super block
++ }
++ }
++
++ unsigned j = (i == i_from) ? j_from : 0;
++ do
++ {
++ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
++ {
++ pos += bm::gap_max_bits;
++ continue;
++ }
++
++ block = blk_blk[j];
++ if (!block)
++ return true;
++
++ res = bm::block_find_interval_end(block, 0, &found_nbit);
++ switch (res)
++ {
++ case 0: // not interval (but it was the interval, so last result
++ return true;
++ case 1: // interval found
++ pos += found_nbit+1;
++ return true;
++ case 2: // keep scanning
++ pos += bm::gap_max_bits;
++ break;
++ default:
++ BM_ASSERT(0);
++ } // switch
++ } while (++j < bm::set_sub_array_size);
++ } // for i
++
++ return true;
++}
++
++
++
++//----------------------------------------------------------------------------
++//
++//----------------------------------------------------------------------------
++
++template<typename BV>
++typename interval_enumerator<BV>::size_type
++interval_enumerator<BV>::start() const BMNOEXCEPT
++{
++ return interval_.first;
++}
++
++//----------------------------------------------------------------------------
++
++template<typename BV>
++typename interval_enumerator<BV>::size_type
++interval_enumerator<BV>::end() const BMNOEXCEPT
++{
++ return interval_.second;
++}
++
++//----------------------------------------------------------------------------
++
++template<typename BV>
++bool interval_enumerator<BV>::valid() const BMNOEXCEPT
++{
++ return (interval_.first != bm::id_max);
++}
++
++//----------------------------------------------------------------------------
++
++template<typename BV>
++void interval_enumerator<BV>::invalidate() BMNOEXCEPT
++{
++ interval_.first = interval_.second = bm::id_max;
++}
++
++//----------------------------------------------------------------------------
++
++template<typename BV>
++bool interval_enumerator<BV>::go_to(size_type pos, bool extend_start)
++{
++ return go_to_impl(pos, extend_start);
++}
++
++//----------------------------------------------------------------------------
++
++template<typename BV>
++bool interval_enumerator<BV>::go_to_impl(size_type pos, bool extend_start)
++{
++ if (!bv_ || !bv_->is_init() || (pos >= bm::id_max))
++ {
++ invalidate();
++ return false;
++ }
++
++ bool found;
++ size_type start_pos;
++
++ // go to prolog: identify the true interval start position
++ //
++ if (extend_start)
++ {
++ found = bm::find_interval_start(*bv_, pos, start_pos);
++ if (!found)
++ {
++ found = bv_->find(pos, start_pos);
++ if (!found)
++ {
++ invalidate();
++ return false;
++ }
++ }
++ }
++ else
++ {
++ found = bv_->find(pos, start_pos);
++ if (!found)
++ {
++ invalidate();
++ return false;
++ }
++ }
++
++ // start position established, start decoding from it
++ interval_.first = pos = start_pos;
++
++ block_idx_type nb = (pos >> bm::set_block_shift);
++ const typename BV::blocks_manager_type& bman = bv_->get_blocks_manager();
++ unsigned i0, j0;
++ bm::get_block_coord(nb, i0, j0);
++ const bm::word_t* block = bman.get_block_ptr(i0, j0);
++ BM_ASSERT(block);
++
++ if (block == FULL_BLOCK_FAKE_ADDR)
++ {
++ // super-long interval, find the end of it
++ found = bm::find_interval_end(*bv_, pos, interval_.second);
++ BM_ASSERT(found);
++ gap_ptr_ = 0;
++ return true;
++ }
++
++ if (BM_IS_GAP(block))
++ {
++ const bm::gap_word_t* BMRESTRICT gap_block = BMGAP_PTR(block);
++ unsigned nbit = unsigned(pos & bm::set_block_mask);
++
++ unsigned is_set;
++ unsigned gap_pos = bm::gap_bfind(gap_block, nbit, &is_set);
++ BM_ASSERT(is_set);
++
++ interval_.second = (nb * bm::gap_max_bits) + gap_block[gap_pos];
++ if (gap_block[gap_pos] == bm::gap_max_bits-1)
++ {
++ // it is the end of the GAP block - run search
++ //
++ if (interval_.second == bm::id_max-1)
++ {
++ gap_ptr_ = 0;
++ return true;
++ }
++ found = bm::find_interval_end(*bv_, interval_.second + 1, start_pos);
++ if (found)
++ interval_.second = start_pos;
++ gap_ptr_ = 0;
++ return true;
++ }
++ gap_ptr_ = gap_block + gap_pos;
++ return true;
++ }
++
++ // bit-block: turn to GAP and position there
++ //
++ if (gap_buf_.size() == 0)
++ {
++ gap_buf_.resize(bm::gap_max_bits+64);
++ }
++ bm::gap_word_t* gap_tmp = gap_buf_.data();
++ unsigned len = bm::bit_to_gap(gap_tmp, block, bm::gap_max_bits+64);
++ BM_ASSERT(len);
++
++
++ size_type base_idx = (nb * bm::gap_max_bits);
++ for (unsigned i = 1; i <= len; ++i)
++ {
++ size_type gap_pos = base_idx + gap_tmp[i];
++ if (gap_pos >= pos)
++ {
++ if (gap_tmp[i] == bm::gap_max_bits - 1)
++ {
++ found = bm::find_interval_end(*bv_, gap_pos, interval_.second);
++ BM_ASSERT(found);
++ gap_ptr_ = 0;
++ return true;
++ }
++
++ gap_ptr_ = &gap_tmp[i];
++ interval_.second = gap_pos;
++ return true;
++ }
++ if (gap_tmp[i] == bm::gap_max_bits - 1)
++ break;
++ } // for
++
++ BM_ASSERT(0);
++
++ return false;
++}
++
++//----------------------------------------------------------------------------
++
++template<typename BV>
++bool interval_enumerator<BV>::advance()
++{
++ BM_ASSERT(valid());
++
++ if (interval_.second == bm::id_max-1)
++ {
++ invalidate();
++ return false;
++ }
++ block_idx_type nb = (interval_.first >> bm::set_block_shift);
++
++ bool found;
++ if (gap_ptr_) // in GAP block
++ {
++ ++gap_ptr_; // 0 - GAP
++ if (*gap_ptr_ == bm::gap_max_bits-1) // GAP block end
++ {
++ return go_to_impl(((nb+1) * bm::gap_max_bits), false);
++ }
++ unsigned prev = *gap_ptr_;
++
++ ++gap_ptr_; // 1 - GAP
++ BM_ASSERT(*gap_ptr_ > prev);
++ interval_.first = (nb * bm::gap_max_bits) + prev + 1;
++ if (*gap_ptr_ == bm::gap_max_bits-1) // GAP block end
++ {
++ found = bm::find_interval_end(*bv_, interval_.first, interval_.second);
++ BM_ASSERT(found); (void)found;
++ gap_ptr_ = 0;
++ return true;
++ }
++ interval_.second = (nb * bm::gap_max_bits) + *gap_ptr_;
++ return true;
++ }
++ return go_to_impl(interval_.second + 1, false);
++}
++
++//----------------------------------------------------------------------------
++
++template<typename BV>
++void interval_enumerator<BV>::swap(interval_enumerator<BV>& ien) BMNOEXCEPT
++{
++ const BV* bv_tmp = bv_;
++ bv_ = ien.bv_;
++ ien.bv_ = bv_tmp;
++
++ gap_buf_.swap(ien.gap_buf_);
++ bm::xor_swap(interval_.first, ien.interval_.first);
++ bm::xor_swap(interval_.second, ien.interval_.second);
++
++ const bm::gap_word_t* gap_tmp = gap_ptr_;
++ gap_ptr_ = ien.gap_ptr_;
++ ien.gap_ptr_ = gap_tmp;
++}
++
++//----------------------------------------------------------------------------
++//
++//----------------------------------------------------------------------------
++
++
++} // namespace bm
++
++#include "bmundef.h"
++
++#endif
+
+Property changes on: c++/include/util/bitset/bmintervals.h
+___________________________________________________________________
+Added: svn:eol-style
+## -0,0 +1 ##
++native
+\ No newline at end of property
+Added: svn:keywords
+## -0,0 +1 ##
++Id
+\ No newline at end of property
+Added: svn:mime-type
+## -0,0 +1 ##
++text/x-c
+\ No newline at end of property
+Index: c++/include/util/bitset/encoding.h
+===================================================================
+--- a/c++/include/util/bitset/encoding.h (revision 90103)
++++ b/c++/include/util/bitset/encoding.h (revision 90104)
+@@ -51,24 +51,24 @@
+ public:
+ typedef unsigned char* position_type;
+ public:
+- encoder(unsigned char* buf, size_t size);
+- void put_8(unsigned char c);
+- void put_16(bm::short_t s);
+- void put_16(const bm::short_t* s, unsigned count);
+- void put_24(bm::word_t w);
+- void put_32(bm::word_t w);
+- void put_32(const bm::word_t* w, unsigned count);
+- void put_48(bm::id64_t w);
+- void put_64(bm::id64_t w);
++ encoder(unsigned char* buf, size_t size) BMNOEXCEPT;
++ void put_8(unsigned char c) BMNOEXCEPT;
++ void put_16(bm::short_t s) BMNOEXCEPT;
++ void put_16(const bm::short_t* s, unsigned count) BMNOEXCEPT;
++ void put_24(bm::word_t w) BMNOEXCEPT;
++ void put_32(bm::word_t w) BMNOEXCEPT;
++ void put_32(const bm::word_t* w, unsigned count) BMNOEXCEPT;
++ void put_48(bm::id64_t w) BMNOEXCEPT;
++ void put_64(bm::id64_t w) BMNOEXCEPT;
+ void put_prefixed_array_32(unsigned char c,
+- const bm::word_t* w, unsigned count);
++ const bm::word_t* w, unsigned count) BMNOEXCEPT;
+ void put_prefixed_array_16(unsigned char c,
+ const bm::short_t* s, unsigned count,
+- bool encode_count);
+- void memcpy(const unsigned char* src, size_t count);
+- size_t size() const;
+- unsigned char* get_pos() const;
+- void set_pos(unsigned char* buf_pos);
++ bool encode_count) BMNOEXCEPT;
++ void memcpy(const unsigned char* src, size_t count) BMNOEXCEPT;
++ size_t size() const BMNOEXCEPT;
++ unsigned char* get_pos() const BMNOEXCEPT;
++ void set_pos(unsigned char* buf_pos) BMNOEXCEPT;
+ private:
+ unsigned char* buf_;
+ unsigned char* start_;
+@@ -83,25 +83,25 @@
+ class decoder_base
+ {
+ public:
+- decoder_base(const unsigned char* buf) { buf_ = start_ = buf; }
++ decoder_base(const unsigned char* buf) BMNOEXCEPT { buf_ = start_ = buf; }
+
+ /// Reads character from the decoding buffer.
+- unsigned char get_8() { return *buf_++; }
++ unsigned char get_8() BMNOEXCEPT { return *buf_++; }
+
+ /// Returns size of the current decoding stream.
+- size_t size() const { return size_t(buf_ - start_); }
++ size_t size() const BMNOEXCEPT { return size_t(buf_ - start_); }
+
+ /// change current position
+- void seek(int delta) { buf_ += delta; }
++ void seek(int delta) BMNOEXCEPT { buf_ += delta; }
+
+ /// read bytes from the decode buffer
+- void memcpy(unsigned char* dst, size_t count);
++ void memcpy(unsigned char* dst, size_t count) BMNOEXCEPT;
+
+ /// Return current buffer pointer
+- const unsigned char* get_pos() const { return buf_; }
++ const unsigned char* get_pos() const BMNOEXCEPT { return buf_; }
+
+ /// Set current buffer pointer
+- void set_pos(const unsigned char* pos) { buf_ = pos; }
++ void set_pos(const unsigned char* pos) BMNOEXCEPT { buf_ = pos; }
+ protected:
+ const unsigned char* buf_;
+ const unsigned char* start_;
+@@ -117,16 +117,16 @@
+ class decoder : public decoder_base
+ {
+ public:
+- decoder(const unsigned char* buf);
+- bm::short_t get_16();
+- bm::word_t get_24();
+- bm::word_t get_32();
+- bm::id64_t get_48();
+- bm::id64_t get_64();
+- void get_32(bm::word_t* w, unsigned count);
+- bool get_32_OR(bm::word_t* w, unsigned count);
+- void get_32_AND(bm::word_t* w, unsigned count);
+- void get_16(bm::short_t* s, unsigned count);
++ decoder(const unsigned char* buf) BMNOEXCEPT;
++ bm::short_t get_16() BMNOEXCEPT;
++ bm::word_t get_24() BMNOEXCEPT;
++ bm::word_t get_32() BMNOEXCEPT;
++ bm::id64_t get_48() BMNOEXCEPT;
++ bm::id64_t get_64() BMNOEXCEPT;
++ void get_32(bm::word_t* w, unsigned count) BMNOEXCEPT;
++ bool get_32_OR(bm::word_t* w, unsigned count) BMNOEXCEPT;
++ void get_32_AND(bm::word_t* w, unsigned count) BMNOEXCEPT;
++ void get_16(bm::short_t* s, unsigned count) BMNOEXCEPT;
+ };
+
+ // ----------------------------------------------------------------
+@@ -181,23 +181,23 @@
+ ~bit_out() { flush(); }
+
+ /// issue single bit into encode bit-stream
+- void put_bit(unsigned value);
++ void put_bit(unsigned value) BMNOEXCEPT;
+
+ /// issue count bits out of value
+- void put_bits(unsigned value, unsigned count);
++ void put_bits(unsigned value, unsigned count) BMNOEXCEPT;
+
+ /// issue 0 into output stream
+- void put_zero_bit();
++ void put_zero_bit() BMNOEXCEPT;
+
+ /// issue specified number of 0s
+- void put_zero_bits(unsigned count);
++ void put_zero_bits(unsigned count) BMNOEXCEPT;
+
+ /// Elias Gamma encode the specified value
+- void gamma(unsigned value);
++ void gamma(unsigned value) BMNOEXCEPT;
+
+ /// Binary Interpolative array decode
+ void bic_encode_u16(const bm::gap_word_t* arr, unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi)
++ bm::gap_word_t lo, bm::gap_word_t hi) BMNOEXCEPT
+ {
+ bic_encode_u16_cm(arr, sz, lo, hi);
+ }
+@@ -205,24 +205,24 @@
+ /// Binary Interpolative encoding (array of 16-bit ints)
+ void bic_encode_u16_rg(const bm::gap_word_t* arr, unsigned sz,
+ bm::gap_word_t lo,
+- bm::gap_word_t hi);
++ bm::gap_word_t hi) BMNOEXCEPT;
+
+ /// Binary Interpolative encoding (array of 16-bit ints)
+ /// cm - "center-minimal"
+ void bic_encode_u16_cm(const bm::gap_word_t* arr, unsigned sz,
+ bm::gap_word_t lo,
+- bm::gap_word_t hi);
++ bm::gap_word_t hi) BMNOEXCEPT;
+
+ /// Binary Interpolative encoding (array of 32-bit ints)
+ /// cm - "center-minimal"
+ void bic_encode_u32_cm(const bm::word_t* arr, unsigned sz,
+- bm::word_t lo, bm::word_t hi);
++ bm::word_t lo, bm::word_t hi) BMNOEXCEPT;
+
+ /// Flush the incomplete 32-bit accumulator word
+- void flush() { if (used_bits_) flush_accum(); }
++ void flush() BMNOEXCEPT { if (used_bits_) flush_accum(); }
+
+ private:
+- void flush_accum()
++ void flush_accum() BMNOEXCEPT
+ {
+ dest_.put_32(accum_);
+ used_bits_ = accum_ = 0;
+@@ -248,31 +248,32 @@
+ class bit_in
+ {
+ public:
+- bit_in(TDecoder& decoder)
++ bit_in(TDecoder& decoder) BMNOEXCEPT
+ : src_(decoder),
+ used_bits_(unsigned(sizeof(accum_) * 8)),
+- accum_(0)
++ accum_(0)
+ {}
+
+ /// decode unsigned value using Elias Gamma coding
+- unsigned gamma();
++ unsigned gamma() BMNOEXCEPT;
+
+ /// read number of bits out of the stream
+- unsigned get_bits(unsigned count);
++ unsigned get_bits(unsigned count) BMNOEXCEPT;
+
+ /// Binary Interpolative array decode
+ void bic_decode_u16(bm::gap_word_t* arr, unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi)
++ bm::gap_word_t lo, bm::gap_word_t hi) BMNOEXCEPT
+ {
+ bic_decode_u16_cm(arr, sz, lo, hi);
+ }
+
+ void bic_decode_u16_bitset(bm::word_t* block, unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi)
++ bm::gap_word_t lo, bm::gap_word_t hi) BMNOEXCEPT
+ {
+ bic_decode_u16_cm_bitset(block, sz, lo, hi);
+ }
+- void bic_decode_u16_dry(unsigned sz, bm::gap_word_t lo, bm::gap_word_t hi)
++ void bic_decode_u16_dry(unsigned sz,
++ bm::gap_word_t lo, bm::gap_word_t hi) BMNOEXCEPT
+ {
+ bic_decode_u16_cm_dry(sz, lo, hi);
+ }
+@@ -280,29 +281,32 @@
+
+ /// Binary Interpolative array decode
+ void bic_decode_u16_rg(bm::gap_word_t* arr, unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi);
++ bm::gap_word_t lo, bm::gap_word_t hi) BMNOEXCEPT;
+ /// Binary Interpolative array decode
+ void bic_decode_u16_cm(bm::gap_word_t* arr, unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi);
++ bm::gap_word_t lo, bm::gap_word_t hi) BMNOEXCEPT;
+
+ /// Binary Interpolative array decode (32-bit)
+ void bic_decode_u32_cm(bm::word_t* arr, unsigned sz,
+- bm::word_t lo, bm::word_t hi);
++ bm::word_t lo, bm::word_t hi) BMNOEXCEPT;
+
+
+ /// Binary Interpolative array decode into bitset (32-bit based)
+ void bic_decode_u16_rg_bitset(bm::word_t* block, unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi);
++ bm::gap_word_t lo, bm::gap_word_t hi) BMNOEXCEPT;
+
+ /// Binary Interpolative array decode into /dev/null
+- void bic_decode_u16_rg_dry(unsigned sz, bm::gap_word_t lo, bm::gap_word_t hi);
++ void bic_decode_u16_rg_dry(unsigned sz,
++ bm::gap_word_t lo, bm::gap_word_t hi) BMNOEXCEPT;
+
+ /// Binary Interpolative array decode into bitset (32-bit based)
+ void bic_decode_u16_cm_bitset(bm::word_t* block, unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi);
++ bm::gap_word_t lo,
++ bm::gap_word_t hi) BMNOEXCEPT;
+
+ /// Binary Interpolative array decode into /dev/null
+- void bic_decode_u16_cm_dry(unsigned sz, bm::gap_word_t lo, bm::gap_word_t hi);
++ void bic_decode_u16_cm_dry(unsigned sz,
++ bm::gap_word_t lo, bm::gap_word_t hi) BMNOEXCEPT;
+
+ private:
+ bit_in(const bit_in&);
+@@ -377,7 +381,7 @@
+ \param buf - memory buffer pointer.
+ \param size - size of the buffer
+ */
+-inline encoder::encoder(unsigned char* buf, size_t a_size)
++inline encoder::encoder(unsigned char* buf, size_t a_size) BMNOEXCEPT
+ : buf_(buf), start_(buf)
+ {
+ size_ = a_size;
+@@ -387,7 +391,7 @@
+ */
+ inline void encoder::put_prefixed_array_32(unsigned char c,
+ const bm::word_t* w,
+- unsigned count)
++ unsigned count) BMNOEXCEPT
+ {
+ put_8(c);
+ put_32(w, count);
+@@ -399,7 +403,7 @@
+ inline void encoder::put_prefixed_array_16(unsigned char c,
+ const bm::short_t* s,
+ unsigned count,
+- bool encode_count)
++ bool encode_count) BMNOEXCEPT
+ {
+ put_8(c);
+ if (encode_count)
+@@ -413,7 +417,7 @@
+ \brief Puts one character into the encoding buffer.
+ \param c - character to encode
+ */
+-BMFORCEINLINE void encoder::put_8(unsigned char c)
++BMFORCEINLINE void encoder::put_8(unsigned char c) BMNOEXCEPT
+ {
+ *buf_++ = c;
+ }
+@@ -423,7 +427,7 @@
+ \brief Puts short word (16 bits) into the encoding buffer.
+ \param s - short word to encode
+ */
+-BMFORCEINLINE void encoder::put_16(bm::short_t s)
++BMFORCEINLINE void encoder::put_16(bm::short_t s) BMNOEXCEPT
+ {
+ #if (BM_UNALIGNED_ACCESS_OK == 1)
+ ::memcpy(buf_, &s, sizeof(bm::short_t)); // optimizer takes care of it
+@@ -438,7 +442,7 @@
+ /*!
+ \brief Method puts array of short words (16 bits) into the encoding buffer.
+ */
+-inline void encoder::put_16(const bm::short_t* s, unsigned count)
++inline void encoder::put_16(const bm::short_t* s, unsigned count) BMNOEXCEPT
+ {
+ #if (BM_UNALIGNED_ACCESS_OK == 1)
+ ::memcpy(buf_, s, sizeof(bm::short_t)*count);
+@@ -465,7 +469,7 @@
+ \brief copy bytes into target buffer or just rewind if src is NULL
+ */
+ inline
+-void encoder::memcpy(const unsigned char* src, size_t count)
++void encoder::memcpy(const unsigned char* src, size_t count) BMNOEXCEPT
+ {
+ BM_ASSERT((buf_ + count) < (start_ + size_));
+ if (src)
+@@ -478,7 +482,7 @@
+ \fn unsigned encoder::size() const
+ \brief Returns size of the current encoding stream.
+ */
+-inline size_t encoder::size() const
++inline size_t encoder::size() const BMNOEXCEPT
+ {
+ return size_t(buf_ - start_);
+ }
+@@ -486,7 +490,7 @@
+ /**
+ \brief Get current memory stream position
+ */
+-inline encoder::position_type encoder::get_pos() const
++inline encoder::position_type encoder::get_pos() const BMNOEXCEPT
+ {
+ return buf_;
+ }
+@@ -494,7 +498,7 @@
+ /**
+ \brief Set current memory stream position
+ */
+-inline void encoder::set_pos(encoder::position_type buf_pos)
++inline void encoder::set_pos(encoder::position_type buf_pos) BMNOEXCEPT
+ {
+ buf_ = buf_pos;
+ }
+@@ -504,7 +508,7 @@
+ \brief Puts 24 bits word into encoding buffer.
+ \param w - word to encode.
+ */
+-inline void encoder::put_24(bm::word_t w)
++inline void encoder::put_24(bm::word_t w) BMNOEXCEPT
+ {
+ BM_ASSERT((w & ~(0xFFFFFFU)) == 0);
+
+@@ -520,7 +524,7 @@
+ \brief Puts 32 bits word into encoding buffer.
+ \param w - word to encode.
+ */
+-inline void encoder::put_32(bm::word_t w)
++inline void encoder::put_32(bm::word_t w) BMNOEXCEPT
+ {
+ #if (BM_UNALIGNED_ACCESS_OK == 1)
+ ::memcpy(buf_, &w, sizeof(bm::word_t));
+@@ -538,7 +542,7 @@
+ \brief Puts 48 bits word into encoding buffer.
+ \param w - word to encode.
+ */
+-inline void encoder::put_48(bm::id64_t w)
++inline void encoder::put_48(bm::id64_t w) BMNOEXCEPT
+ {
+ BM_ASSERT((w & ~(0xFFFFFFFFFFFFUL)) == 0);
+ *buf_++ = (unsigned char)w;
+@@ -555,7 +559,7 @@
+ \brief Puts 64 bits word into encoding buffer.
+ \param w - word to encode.
+ */
+-inline void encoder::put_64(bm::id64_t w)
++inline void encoder::put_64(bm::id64_t w) BMNOEXCEPT
+ {
+ #if (BM_UNALIGNED_ACCESS_OK == 1)
+ ::memcpy(buf_, &w, sizeof(bm::id64_t));
+@@ -576,10 +580,10 @@
+ /*!
+ \brief Encodes array of 32-bit words
+ */
+-inline
+-void encoder::put_32(const bm::word_t* w, unsigned count)
++inline void encoder::put_32(const bm::word_t* w, unsigned count) BMNOEXCEPT
+ {
+ #if (BM_UNALIGNED_ACCESS_OK == 1)
++ // use memcpy() because compilers now understand it as an idiom and inline
+ ::memcpy(buf_, w, sizeof(bm::word_t) * count);
+ buf_ += sizeof(bm::word_t) * count;
+ #else
+@@ -611,7 +615,7 @@
+ Load bytes from the decode buffer
+ */
+ inline
+-void decoder_base::memcpy(unsigned char* dst, size_t count)
++void decoder_base::memcpy(unsigned char* dst, size_t count) BMNOEXCEPT
+ {
+ if (dst)
+ ::memcpy(dst, buf_, count);
+@@ -623,7 +627,7 @@
+ \brief Construction
+ \param buf - pointer to the decoding memory.
+ */
+-inline decoder::decoder(const unsigned char* buf)
++inline decoder::decoder(const unsigned char* buf) BMNOEXCEPT
+ : decoder_base(buf)
+ {
+ }
+@@ -632,7 +636,7 @@
+ \fn bm::short_t decoder::get_16()
+ \brief Reads 16-bit word from the decoding buffer.
+ */
+-BMFORCEINLINE bm::short_t decoder::get_16()
++BMFORCEINLINE bm::short_t decoder::get_16() BMNOEXCEPT
+ {
+ #if (BM_UNALIGNED_ACCESS_OK == 1)
+ bm::short_t a;
+@@ -648,7 +652,7 @@
+ \fn bm::word_t decoder::get_24()
+ \brief Reads 32-bit word from the decoding buffer.
+ */
+-inline bm::word_t decoder::get_24()
++inline bm::word_t decoder::get_24() BMNOEXCEPT
+ {
+ bm::word_t a = buf_[0] + ((unsigned)buf_[1] << 8) +
+ ((unsigned)buf_[2] << 16);
+@@ -661,7 +665,7 @@
+ \fn bm::word_t decoder::get_32()
+ \brief Reads 32-bit word from the decoding buffer.
+ */
+-BMFORCEINLINE bm::word_t decoder::get_32()
++BMFORCEINLINE bm::word_t decoder::get_32() BMNOEXCEPT
+ {
+ #if (BM_UNALIGNED_ACCESS_OK == 1)
+ bm::word_t a;
+@@ -679,7 +683,7 @@
+ \brief Reads 64-bit word from the decoding buffer.
+ */
+ inline
+-bm::id64_t decoder::get_48()
++bm::id64_t decoder::get_48() BMNOEXCEPT
+ {
+ bm::id64_t a = buf_[0] +
+ ((bm::id64_t)buf_[1] << 8) +
+@@ -696,7 +700,7 @@
+ \brief Reads 64-bit word from the decoding buffer.
+ */
+ inline
+-bm::id64_t decoder::get_64()
++bm::id64_t decoder::get_64() BMNOEXCEPT
+ {
+ #if (BM_UNALIGNED_ACCESS_OK == 1)
+ bm::id64_t a;
+@@ -722,7 +726,7 @@
+ \param w - pointer on memory block to read into.
+ \param count - size of memory block in words.
+ */
+-inline void decoder::get_32(bm::word_t* w, unsigned count)
++inline void decoder::get_32(bm::word_t* w, unsigned count) BMNOEXCEPT
+ {
+ if (!w)
+ {
+@@ -754,7 +758,7 @@
+ \param count - should match bm::set_block_size
+ */
+ inline
+-bool decoder::get_32_OR(bm::word_t* w, unsigned count)
++bool decoder::get_32_OR(bm::word_t* w, unsigned count) BMNOEXCEPT
+ {
+ if (!w)
+ {
+@@ -795,7 +799,7 @@
+ \param count - should match bm::set_block_size
+ */
+ inline
+-void decoder::get_32_AND(bm::word_t* w, unsigned count)
++void decoder::get_32_AND(bm::word_t* w, unsigned count) BMNOEXCEPT
+ {
+ if (!w)
+ {
+@@ -833,7 +837,7 @@
+ \param s - pointer on memory block to read into.
+ \param count - size of memory block in words.
+ */
+-inline void decoder::get_16(bm::short_t* s, unsigned count)
++inline void decoder::get_16(bm::short_t* s, unsigned count) BMNOEXCEPT
+ {
+ if (!s)
+ {
+@@ -1004,7 +1008,7 @@
+ //
+
+ template<typename TEncoder>
+-void bit_out<TEncoder>::put_bit(unsigned value)
++void bit_out<TEncoder>::put_bit(unsigned value) BMNOEXCEPT
+ {
+ BM_ASSERT(value <= 1);
+ accum_ |= (value << used_bits_);
+@@ -1015,7 +1019,7 @@
+ // ----------------------------------------------------------------------
+
+ template<typename TEncoder>
+-void bit_out<TEncoder>::put_bits(unsigned value, unsigned count)
++void bit_out<TEncoder>::put_bits(unsigned value, unsigned count) BMNOEXCEPT
+ {
+ unsigned used = used_bits_;
+ unsigned acc = accum_;
+@@ -1057,7 +1061,7 @@
+ // ----------------------------------------------------------------------
+
+ template<typename TEncoder>
+-void bit_out<TEncoder>::put_zero_bit()
++void bit_out<TEncoder>::put_zero_bit() BMNOEXCEPT
+ {
+ if (++used_bits_ == (sizeof(accum_) * 8))
+ flush_accum();
+@@ -1066,7 +1070,7 @@
+ // ----------------------------------------------------------------------
+
+ template<typename TEncoder>
+-void bit_out<TEncoder>::put_zero_bits(unsigned count)
++void bit_out<TEncoder>::put_zero_bits(unsigned count) BMNOEXCEPT
+ {
+ unsigned used = used_bits_;
+ unsigned free_bits = (sizeof(accum_) * 8) - used;
+@@ -1096,7 +1100,7 @@
+ // ----------------------------------------------------------------------
+
+ template<typename TEncoder>
+-void bit_out<TEncoder>::gamma(unsigned value)
++void bit_out<TEncoder>::gamma(unsigned value) BMNOEXCEPT
+ {
+ BM_ASSERT(value);
+
+@@ -1168,9 +1172,10 @@
+ // ----------------------------------------------------------------------
+
+ template<typename TEncoder>
+-void bit_out<TEncoder>::bic_encode_u16_rg(const bm::gap_word_t* arr,
+- unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi)
++void bit_out<TEncoder>::bic_encode_u16_rg(
++ const bm::gap_word_t* arr,
++ unsigned sz,
++ bm::gap_word_t lo, bm::gap_word_t hi) BMNOEXCEPT
+ {
+ for (;sz;)
+ {
+@@ -1204,7 +1209,8 @@
+ template<typename TEncoder>
+ void bit_out<TEncoder>::bic_encode_u32_cm(const bm::word_t* arr,
+ unsigned sz,
+- bm::word_t lo, bm::word_t hi)
++ bm::word_t lo,
++ bm::word_t hi) BMNOEXCEPT
+ {
+ for (;sz;)
+ {
+@@ -1292,7 +1298,7 @@
+ void bit_out<TEncoder>::bic_encode_u16_cm(const bm::gap_word_t* arr,
+ unsigned sz_i,
+ bm::gap_word_t lo_i,
+- bm::gap_word_t hi_i)
++ bm::gap_word_t hi_i) BMNOEXCEPT
+ {
+ BM_ASSERT(sz_i <= 65535);
+
+@@ -1329,7 +1335,8 @@
+ template<typename TEncoder>
+ void bit_out<TEncoder>::bic_encode_u16_cm(const bm::gap_word_t* arr,
+ unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi)
++ bm::gap_word_t lo,
++ bm::gap_word_t hi) BMNOEXCEPT
+ {
+ for (;sz;)
+ {
+@@ -1379,7 +1386,8 @@
+
+ template<class TDecoder>
+ void bit_in<TDecoder>::bic_decode_u16_rg(bm::gap_word_t* arr, unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi)
++ bm::gap_word_t lo,
++ bm::gap_word_t hi) BMNOEXCEPT
+ {
+ for (;sz;)
+ {
+@@ -1421,7 +1429,8 @@
+
+ template<class TDecoder>
+ void bit_in<TDecoder>::bic_decode_u32_cm(bm::word_t* arr, unsigned sz,
+- bm::word_t lo, bm::word_t hi)
++ bm::word_t lo,
++ bm::word_t hi) BMNOEXCEPT
+ {
+ for (;sz;)
+ {
+@@ -1472,7 +1481,8 @@
+
+ template<class TDecoder>
+ void bit_in<TDecoder>::bic_decode_u16_cm(bm::gap_word_t* arr, unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi)
++ bm::gap_word_t lo,
++ bm::gap_word_t hi) BMNOEXCEPT
+ {
+ for (;sz;)
+ {
+@@ -1523,7 +1533,8 @@
+
+ template<class TDecoder>
+ void bit_in<TDecoder>::bic_decode_u16_cm_bitset(bm::word_t* block, unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi)
++ bm::gap_word_t lo,
++ bm::gap_word_t hi) BMNOEXCEPT
+ {
+ for (;sz;)
+ {
+@@ -1579,7 +1590,8 @@
+
+ template<class TDecoder>
+ void bit_in<TDecoder>::bic_decode_u16_cm_dry(unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi)
++ bm::gap_word_t lo,
++ bm::gap_word_t hi) BMNOEXCEPT
+ {
+ for (;sz;)
+ {
+@@ -1630,7 +1642,8 @@
+
+ template<class TDecoder>
+ void bit_in<TDecoder>::bic_decode_u16_rg_bitset(bm::word_t* block, unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi)
++ bm::gap_word_t lo,
++ bm::gap_word_t hi) BMNOEXCEPT
+ {
+ for (;sz;)
+ {
+@@ -1665,7 +1678,7 @@
+ if (sz == 1)
+ return;
+ bic_decode_u16_rg_bitset(block, mid_idx, lo, bm::gap_word_t(val - 1));
+- // tail recursion:
++ // tail recursion of:
+ //bic_decode_u16_bitset(block, sz - mid_idx - 1, bm::gap_word_t(val + 1), hi);
+ sz -= mid_idx + 1;
+ lo = bm::gap_word_t(val + 1);
+@@ -1676,7 +1689,8 @@
+
+ template<class TDecoder>
+ void bit_in<TDecoder>::bic_decode_u16_rg_dry(unsigned sz,
+- bm::gap_word_t lo, bm::gap_word_t hi)
++ bm::gap_word_t lo,
++ bm::gap_word_t hi) BMNOEXCEPT
+ {
+ for (;sz;)
+ {
+@@ -1705,7 +1719,6 @@
+ if (sz == 1)
+ return;
+ bic_decode_u16_rg_dry(mid_idx, lo, bm::gap_word_t(val - 1));
+- //bic_decode_u16_dry(sz - mid_idx - 1, bm::gap_word_t(val + 1), hi);
+ sz -= mid_idx + 1;
+ lo = bm::gap_word_t(val + 1);
+ } // for sz
+@@ -1716,7 +1729,7 @@
+ // ----------------------------------------------------------------------
+
+ template<class TDecoder>
+-unsigned bit_in<TDecoder>::gamma()
++unsigned bit_in<TDecoder>::gamma() BMNOEXCEPT
+ {
+ unsigned acc = accum_;
+ unsigned used = used_bits_;
+@@ -1801,7 +1814,7 @@
+ // ----------------------------------------------------------------------
+
+ template<class TDecoder>
+-unsigned bit_in<TDecoder>::get_bits(unsigned count)
++unsigned bit_in<TDecoder>::get_bits(unsigned count) BMNOEXCEPT
+ {
+ BM_ASSERT(count);
+ const unsigned maskFF = ~0u;
+Index: c++/include/util/bitset/bmxor.h
+===================================================================
+--- a/c++/include/util/bitset/bmxor.h (revision 90103)
++++ b/c++/include/util/bitset/bmxor.h (revision 90104)
+@@ -36,7 +36,7 @@
+ inline
+ unsigned bit_block_xor_change32(const bm::word_t* BMRESTRICT block,
+ const bm::word_t* BMRESTRICT xor_block,
+- unsigned size)
++ unsigned size) BMNOEXCEPT
+ {
+ unsigned gap_count = 1;
+
+@@ -80,7 +80,7 @@
+ inline
+ unsigned bit_block_xor_change(const bm::word_t* BMRESTRICT block,
+ const bm::word_t* BMRESTRICT xor_block,
+- unsigned size)
++ unsigned size) BMNOEXCEPT
+ {
+ #ifdef VECT_BLOCK_XOR_CHANGE
+ return VECT_BLOCK_XOR_CHANGE(block, xor_block, size);
+@@ -112,7 +112,7 @@
+ inline
+ void compute_complexity_descr(
+ const bm::word_t* BMRESTRICT block,
+- block_waves_xor_descr& BMRESTRICT x_descr)
++ block_waves_xor_descr& BMRESTRICT x_descr) BMNOEXCEPT
+ {
+ for (unsigned i = 0; i < bm::block_waves; ++i)
+ {
+@@ -146,7 +146,7 @@
+ const bm::word_t* BMRESTRICT block,
+ const bm::word_t* BMRESTRICT xor_block,
+ block_waves_xor_descr& BMRESTRICT x_descr,
+- unsigned& block_gain)
++ unsigned& BMRESTRICT block_gain) BMNOEXCEPT
+ {
+ block_gain = 0; // approximate block gain (sum of sub-waves)
+ bm::id64_t digest = 0;
+@@ -191,7 +191,7 @@
+ inline
+ void bit_block_xor(bm::word_t* target_block,
+ const bm::word_t* block, const bm::word_t* xor_block,
+- bm::id64_t digest)
++ bm::id64_t digest) BMNOEXCEPT
+ {
+ BM_ASSERT(target_block);
+ BM_ASSERT(block);
+@@ -267,21 +267,23 @@
+ }
+
+ /// Get reference list size
+- size_type size() const { return (size_type)ref_bvects_.size(); }
++ size_type size() const BMNOEXCEPT { return (size_type)ref_bvects_.size(); }
+
+ /// Get reference vector by the index in this ref-vector
+- const bvector_type* get_bv(size_type idx) const { return ref_bvects_[idx]; }
++ const bvector_type* get_bv(size_type idx) const BMNOEXCEPT
++ { return ref_bvects_[idx]; }
+
+ /// Get reference row index by the index in this ref-vector
+- size_type get_row_idx(size_type idx) const { return (size_type)ref_bvects_rows_[idx]; }
++ size_type get_row_idx(size_type idx) const BMNOEXCEPT
++ { return (size_type)ref_bvects_rows_[idx]; }
+
+ /// not-found value for find methods
+ static
+- size_type not_found() { return ~(size_type(0)); }
++ size_type not_found() BMNOEXCEPT { return ~(size_type(0)); }
+
+ /// Find vector index by the reference index
+ /// @return ~0 if not found
+- size_type find(std::size_t ref_idx) const
++ size_type find(std::size_t ref_idx) const BMNOEXCEPT
+ {
+ size_type sz = size();
+ for (size_type i = 0; i < sz; ++i)
+@@ -333,13 +335,16 @@
+ typedef typename bvector_type::size_type size_type;
+
+ public:
+- void set_ref_vector(const bv_ref_vector_type* ref_vect) { ref_vect_ = ref_vect; }
+- const bv_ref_vector_type& get_ref_vector() const { return *ref_vect_; }
++ void set_ref_vector(const bv_ref_vector_type* ref_vect) BMNOEXCEPT
++ { ref_vect_ = ref_vect; }
+
++ const bv_ref_vector_type& get_ref_vector() const BMNOEXCEPT
++ { return *ref_vect_; }
++
+ /** Compute statistics for the anchor search vector
+ @param block - bit-block target
+ */
+- void compute_x_block_stats(const bm::word_t* block);
++ void compute_x_block_stats(const bm::word_t* block) BMNOEXCEPT;
+
+ /** Scan for all candidate bit-blocks to find mask or match
+ @return true if XOR complement or matching vector found
+@@ -360,23 +365,26 @@
+ /**
+ Validate serialization target
+ */
+- bool validate_found(bm::word_t* xor_block, const bm::word_t* block) const;
++ bool validate_found(bm::word_t* xor_block,
++ const bm::word_t* block) const BMNOEXCEPT;
+
+- size_type found_ridx() const { return found_ridx_; }
+- const bm::word_t* get_found_block() const { return found_block_xor_; }
+- unsigned get_x_best_metric() const { return x_best_metric_; }
+- bm::id64_t get_xor_digest() const { return x_d64_; }
++ size_type found_ridx() const BMNOEXCEPT { return found_ridx_; }
++ const bm::word_t* get_found_block() const BMNOEXCEPT
++ { return found_block_xor_; }
++ unsigned get_x_best_metric() const BMNOEXCEPT { return x_best_metric_; }
++ bm::id64_t get_xor_digest() const BMNOEXCEPT { return x_d64_; }
+
+ /// true if completely identical vector found
+- bool is_eq_found() const { return !x_best_metric_; }
++ bool is_eq_found() const BMNOEXCEPT { return !x_best_metric_; }
+
+
+- unsigned get_x_bc() const { return x_bc_; }
+- unsigned get_x_gc() const { return x_gc_; }
+- unsigned get_x_block_best() const { return x_block_best_metric_; }
++ unsigned get_x_bc() const BMNOEXCEPT { return x_bc_; }
++ unsigned get_x_gc() const BMNOEXCEPT { return x_gc_; }
++ unsigned get_x_block_best() const BMNOEXCEPT
++ { return x_block_best_metric_; }
+
+
+- bm::block_waves_xor_descr& get_descr() { return x_descr_; }
++ bm::block_waves_xor_descr& get_descr() BMNOEXCEPT { return x_descr_; }
+
+ private:
+ const bv_ref_vector_type* ref_vect_ = 0; ///< ref.vect for XOR filter
+@@ -400,7 +408,7 @@
+ // --------------------------------------------------------------------------
+
+ template<typename BV>
+-void xor_scanner<BV>::compute_x_block_stats(const bm::word_t* block)
++void xor_scanner<BV>::compute_x_block_stats(const bm::word_t* block) BMNOEXCEPT
+ {
+ BM_ASSERT(IS_VALID_ADDR(block));
+ BM_ASSERT(!BM_IS_GAP(block));
+@@ -439,7 +447,8 @@
+ {
+ const bvector_type* bv = ref_vect_->get_bv(ri);
+ BM_ASSERT(bv);
+- const typename bvector_type::blocks_manager_type& bman = bv->get_blocks_manager();
++ const typename bvector_type::blocks_manager_type& bman =
++ bv->get_blocks_manager();
+ const bm::word_t* block_xor = bman.get_block_ptr(i, j);
+ if (!IS_VALID_ADDR(block_xor) || BM_IS_GAP(block_xor))
+ continue;
+@@ -487,7 +496,7 @@
+ if (!xor_bc) // completely identical block?
+ {
+ unsigned pos;
+- bool f = bit_find_first_diff(block, block_xor, &pos);
++ bool f = bm::bit_find_first_diff(block, block_xor, &pos);
+ x_best_metric_ += f;
+ }
+ }
+@@ -555,7 +564,7 @@
+
+ template<typename BV>
+ bool xor_scanner<BV>::validate_found(bm::word_t* xor_block,
+- const bm::word_t* block) const
++ const bm::word_t* block) const BMNOEXCEPT
+ {
+ bm::id64_t d64 = get_xor_digest();
+ BM_ASSERT(d64);
+Index: c++/include/util/bitset/bmsse2.h
+===================================================================
+--- a/c++/include/util/bitset/bmsse2.h (revision 90103)
++++ b/c++/include/util/bitset/bmsse2.h (revision 90104)
+@@ -398,6 +398,74 @@
+ }
+ return size;
+ }
++
++/**
++ Hybrid binary search, starts as binary, then switches to linear scan
++
++ \param buf - GAP buffer pointer.
++ \param pos - index of the element.
++ \param is_set - output. GAP value (0 or 1).
++ \return GAP index.
++
++ @ingroup SSE2
++*/
++inline
++unsigned sse2_gap_bfind(const unsigned short* BMRESTRICT buf,
++ unsigned pos, unsigned* BMRESTRICT is_set)
++{
++ unsigned start = 1;
++ unsigned end = 1 + ((*buf) >> 3);
++ unsigned dsize = end - start;
++
++ if (dsize < 17)
++ {
++ start = bm::sse2_gap_find(buf+1, (bm::gap_word_t)pos, dsize);
++ *is_set = ((*buf) & 1) ^ (start & 1);
++ BM_ASSERT(buf[start+1] >= pos);
++ BM_ASSERT(buf[start] < pos || (start==0));
++
++ return start+1;
++ }
++ unsigned arr_end = end;
++ while (start != end)
++ {
++ unsigned curr = (start + end) >> 1;
++ if (buf[curr] < pos)
++ start = curr + 1;
++ else
++ end = curr;
++
++ unsigned size = end - start;
++ if (size < 16)
++ {
++ size += (end != arr_end);
++ unsigned idx =
++ bm::sse2_gap_find(buf + start, (bm::gap_word_t)pos, size);
++ start += idx;
++
++ BM_ASSERT(buf[start] >= pos);
++ BM_ASSERT(buf[start - 1] < pos || (start == 1));
++ break;
++ }
++ }
++
++ *is_set = ((*buf) & 1) ^ ((start-1) & 1);
++ return start;
++}
++
++/**
++ Hybrid binary search, starts as binary, then switches to scan
++ @ingroup SSE2
++*/
++inline
++unsigned sse2_gap_test(const unsigned short* BMRESTRICT buf, unsigned pos)
++{
++ unsigned is_set;
++ bm::sse2_gap_bfind(buf, pos, &is_set);
++ return is_set;
++}
++
++
+ #ifdef __GNUG__
+ #pragma GCC diagnostic pop
+ #endif
+@@ -460,6 +528,8 @@
+ #define VECT_SET_BLOCK(dst, value) \
+ sse2_set_block((__m128i*) dst, value)
+
++#define VECT_GAP_BFIND(buf, pos, is_set) \
++ sse2_gap_bfind(buf, pos, is_set)
+
+
+ } // namespace
+Index: c++/include/util/bitset/bmblocks.h
+===================================================================
+--- a/c++/include/util/bitset/bmblocks.h (revision 90103)
++++ b/c++/include/util/bitset/bmblocks.h (revision 90104)
+@@ -59,10 +59,10 @@
+ public:
+ typedef id_type size_type;
+
+- bm_func_base(blocks_manager& bman) : bm_(bman) {}
++ bm_func_base(blocks_manager& bman) BMNOEXCEPT : bm_(bman) {}
+
+- void on_empty_top(unsigned /* top_block_idx*/ ) {}
+- void on_empty_block(block_idx_type /* block_idx*/ ) {}
++ void on_empty_top(unsigned /* top_block_idx*/ ) BMNOEXCEPT {}
++ void on_empty_block(block_idx_type /* block_idx*/ )BMNOEXCEPT {}
+ private:
+ bm_func_base(const bm_func_base&);
+ bm_func_base& operator=(const bm_func_base&);
+@@ -76,13 +76,13 @@
+ {
+ public:
+ typedef id_type size_type;
+- bm_func_base_const(const blocks_manager& bman) : bm_(bman) {}
++ bm_func_base_const(const blocks_manager& bman) BMNOEXCEPT : bm_(bman) {}
+
+- void on_empty_top(unsigned /* top_block_idx*/ ) {}
+- void on_empty_block(block_idx_type /* block_idx*/ ) {}
++ void on_empty_top(unsigned /* top_block_idx*/ ) BMNOEXCEPT {}
++ void on_empty_block(block_idx_type /* block_idx*/ ) BMNOEXCEPT {}
+ private:
+- bm_func_base_const(const bm_func_base_const&);
+- bm_func_base_const& operator=(const bm_func_base_const&);
++ bm_func_base_const(const bm_func_base_const&) BMNOEXCEPT;
++ bm_func_base_const& operator=(const bm_func_base_const&) BMNOEXCEPT;
+ protected:
+ const blocks_manager& bm_;
+ };
+@@ -92,10 +92,10 @@
+ class block_count_base : public bm_func_base_const
+ {
+ protected:
+- block_count_base(const blocks_manager& bm)
++ block_count_base(const blocks_manager& bm) BMNOEXCEPT
+ : bm_func_base_const(bm) {}
+
+- bm::id_t block_count(const bm::word_t* block) const
++ bm::id_t block_count(const bm::word_t* block) const BMNOEXCEPT
+ {
+ return this->bm_.block_bitcount(block);
+ }
+@@ -108,17 +108,17 @@
+ public:
+ typedef id_type size_type;
+
+- block_count_func(const blocks_manager& bm)
++ block_count_func(const blocks_manager& bm) BMNOEXCEPT
+ : block_count_base(bm), count_(0) {}
+
+- id_type count() const { return count_; }
++ id_type count() const BMNOEXCEPT { return count_; }
+
+- void operator()(const bm::word_t* block)
++ void operator()(const bm::word_t* block) BMNOEXCEPT
+ {
+ count_ += this->block_count(block);
+ }
+- void add_full(id_type c) { count_ += c; }
+- void reset() { count_ = 0; }
++ void add_full(id_type c) BMNOEXCEPT { count_ += c; }
++ void reset() BMNOEXCEPT { count_ = 0; }
+
+ private:
+ id_type count_;
+@@ -131,24 +131,22 @@
+ public:
+ typedef id_type size_type;
+
+- block_count_arr_func(const blocks_manager& bm, unsigned* arr)
++ block_count_arr_func(const blocks_manager& bm, unsigned* arr) BMNOEXCEPT
+ : block_count_base(bm), arr_(arr), last_idx_(0)
+ {
+ arr_[0] = 0;
+ }
+
+- void operator()(const bm::word_t* block, id_type idx)
++ void operator()(const bm::word_t* block, id_type idx) BMNOEXCEPT
+ {
+ while (++last_idx_ < idx)
+- {
+ arr_[last_idx_] = 0;
+- }
+ arr_[idx] = this->block_count(block);
+ last_idx_ = idx;
+ }
+
+- id_type last_block() const { return last_idx_; }
+- void on_non_empty_top(unsigned) {}
++ id_type last_block() const BMNOEXCEPT { return last_idx_; }
++ void on_non_empty_top(unsigned) BMNOEXCEPT {}
+
+ private:
+ unsigned* arr_;
+@@ -161,13 +159,14 @@
+ public:
+ typedef id_type size_type;
+
+- block_count_change_func(const blocks_manager& bm)
++ block_count_change_func(const blocks_manager& bm) BMNOEXCEPT
+ : bm_func_base_const(bm),
+ count_(0),
+ prev_block_border_bit_(0)
+ {}
+
+- block_idx_type block_count(const bm::word_t* block, block_idx_type idx)
++ block_idx_type block_count(const bm::word_t* block,
++ block_idx_type idx) BMNOEXCEPT
+ {
+ block_idx_type cnt = 0;
+ id_type first_bit;
+@@ -187,7 +186,7 @@
+ if (BM_IS_GAP(block))
+ {
+ gap_word_t* gap_block = BMGAP_PTR(block);
+- cnt = gap_length(gap_block) - 1;
++ cnt = bm::gap_length(gap_block) - 1;
+ if (idx)
+ {
+ first_bit = bm::gap_test_unr(gap_block, 0);
+@@ -213,9 +212,9 @@
+ return cnt;
+ }
+
+- id_type count() const { return count_; }
++ id_type count() const BMNOEXCEPT { return count_; }
+
+- void operator()(const bm::word_t* block, block_idx_type idx)
++ void operator()(const bm::word_t* block, block_idx_type idx) BMNOEXCEPT
+ {
+ count_ += block_count(block, idx);
+ }
+@@ -232,11 +231,12 @@
+ public:
+ typedef id_type size_type;
+
+- block_any_func(const blocks_manager& bm)
++ block_any_func(const blocks_manager& bm) BMNOEXCEPT
+ : bm_func_base_const(bm)
+ {}
+
+- bool operator()(const bm::word_t* block, block_idx_type /*idx*/)
++ bool operator()
++ (const bm::word_t* block, block_idx_type /*idx*/) BMNOEXCEPT
+ {
+ if (BM_IS_GAP(block)) // gap block
+ return (!gap_is_all_zero(BMGAP_PTR(block)));
+@@ -250,9 +250,9 @@
+ class gap_level_func : public bm_func_base
+ {
+ public:
+- gap_level_func(blocks_manager& bm, const gap_word_t* glevel_len)
+- : bm_func_base(bm),
+- glevel_len_(glevel_len)
++ gap_level_func(blocks_manager& bm,
++ const gap_word_t* glevel_len) BMNOEXCEPT
++ : bm_func_base(bm), glevel_len_(glevel_len)
+ {
+ BM_ASSERT(glevel_len);
+ }
+@@ -282,12 +282,11 @@
+ return;
+ }
+
+- unsigned len = gap_length(gap_blk);
+- int level = gap_calc_level(len, glevel_len_);
++ unsigned len = bm::gap_length(gap_blk);
++ int level = bm::gap_calc_level(len, glevel_len_);
+ if (level == -1)
+ {
+- bm::word_t* blk =
+- bman.get_allocator().alloc_bit_block();
++ bm::word_t* blk = bman.get_allocator().alloc_bit_block();
+ bman.set_block_ptr(idx, blk);
+ bm::gap_convert_to_bitset(blk, gap_blk);
+ }
+@@ -294,7 +293,7 @@
+ else
+ {
+ gap_word_t* gap_blk_new =
+- bman.allocate_gap_block(unsigned(level), gap_blk, glevel_len_);
++ bman.allocate_gap_block(unsigned(level), gap_blk, glevel_len_);
+
+ bm::word_t* p = (bm::word_t*) gap_blk_new;
+ BMSET_PTRGAP(p);
+@@ -312,7 +311,7 @@
+ class block_one_func : public bm_func_base
+ {
+ public:
+- block_one_func(blocks_manager& bm) : bm_func_base(bm) {}
++ block_one_func(blocks_manager& bm) BMNOEXCEPT : bm_func_base(bm) {}
+
+ void operator()(bm::word_t* block, block_idx_type idx)
+ {
+@@ -357,7 +356,7 @@
+ }
+
+ #ifndef BM_NO_CXX11
+- blocks_manager(blocks_manager&& blockman) BMNOEXEPT
++ blocks_manager(blocks_manager&& blockman) BMNOEXCEPT
+ : max_bits_(blockman.max_bits_),
+ top_blocks_(0),
+ top_block_size_(blockman.top_block_size_),
+@@ -369,7 +368,7 @@
+ }
+ #endif
+
+- ~blocks_manager() BMNOEXEPT
++ ~blocks_manager() BMNOEXCEPT
+ {
+ if (temp_block_)
+ alloc_.free_bit_block(temp_block_);
+@@ -379,7 +378,7 @@
+ /*! \brief Swaps content
+ \param bm another blocks manager
+ */
+- void swap(blocks_manager& bm) BMNOEXEPT
++ void swap(blocks_manager& bm) BMNOEXCEPT
+ {
+ BM_ASSERT(this != &bm);
+
+@@ -399,7 +398,7 @@
+
+ /*! \brief implementation of moving semantics
+ */
+- void move_from(blocks_manager& bm) BMNOEXEPT
++ void move_from(blocks_manager& bm) BMNOEXCEPT
+ {
+ deinit_tree();
+ swap(bm);
+@@ -412,9 +411,9 @@
+ }
+
+
+- void free_ptr(bm::word_t** ptr)
++ void free_ptr(bm::word_t** ptr) BMNOEXCEPT
+ {
+- if (ptr) alloc_.free_ptr(ptr);
++ alloc_.free_ptr(ptr);
+ }
+
+ /**
+@@ -422,7 +421,7 @@
+ \param bits_to_store - supposed capacity (number of bits)
+ \return size of the top level block
+ */
+- unsigned compute_top_block_size(id_type bits_to_store)
++ unsigned compute_top_block_size(id_type bits_to_store) const BMNOEXCEPT
+ {
+ if (bits_to_store >= bm::id_max) // working in full-range mode
+ return bm::set_top_array_size;
+@@ -456,7 +455,8 @@
+ \param no_more_blocks - 1 if there are no more blocks at all
+ \return block adress or NULL if not yet allocated
+ */
+- bm::word_t* get_block(block_idx_type nb, int* no_more_blocks) const
++ const bm::word_t*
++ get_block(block_idx_type nb, int* no_more_blocks) const BMNOEXCEPT
+ {
+ BM_ASSERT(top_blocks_);
+ unsigned i = unsigned(nb >> bm::set_array_shift);
+@@ -489,7 +489,7 @@
+ @return bm::set_total_blocks - no more blocks
+ */
+ block_idx_type
+- find_next_nz_block(block_idx_type nb, bool deep_scan = true) const
++ find_next_nz_block(block_idx_type nb, bool deep_scan=true) const BMNOEXCEPT
+ {
+ if (is_init())
+ {
+@@ -521,7 +521,7 @@
+ \param j - second level block index
+ \return block adress or NULL if not yet allocated
+ */
+- const bm::word_t* get_block(unsigned i, unsigned j) const
++ const bm::word_t* get_block(unsigned i, unsigned j) const BMNOEXCEPT
+ {
+ if (!top_blocks_ || i >= top_block_size_) return 0;
+ const bm::word_t* const* blk_blk = top_blocks_[i];
+@@ -537,7 +537,7 @@
+ \param j - second level block index
+ \return block adress or NULL if not yet allocated
+ */
+- const bm::word_t* get_block_ptr(unsigned i, unsigned j) const
++ const bm::word_t* get_block_ptr(unsigned i, unsigned j) const BMNOEXCEPT
+ {
+ if (!top_blocks_ || i >= top_block_size_) return 0;
+
+@@ -553,9 +553,10 @@
+ \param j - second level block index
+ \return block adress or NULL if not yet allocated
+ */
+- bm::word_t* get_block_ptr(unsigned i, unsigned j)
++ bm::word_t* get_block_ptr(unsigned i, unsigned j) BMNOEXCEPT
+ {
+- if (!top_blocks_ || i >= top_block_size_) return 0;
++ if (!top_blocks_ || i >= top_block_size_)
++ return 0;
+ bm::word_t* const* blk_blk = top_blocks_[i];
+ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
+ return FULL_BLOCK_FAKE_ADDR;
+@@ -569,7 +570,7 @@
+ \param i - top level block index
+ \return block adress or NULL if not yet allocated
+ */
+- const bm::word_t* const * get_topblock(unsigned i) const
++ const bm::word_t* const * get_topblock(unsigned i) const BMNOEXCEPT
+ {
+ return (!top_blocks_ || i >= top_block_size_) ? 0 : top_blocks_[i];
+ }
+@@ -577,7 +578,7 @@
+ /**
+ \brief Returns root block in the tree.
+ */
+- bm::word_t*** top_blocks_root() const
++ bm::word_t*** top_blocks_root() const BMNOEXCEPT
+ {
+ blocks_manager* bm =
+ const_cast<blocks_manager*>(this);
+@@ -837,7 +838,7 @@
+ {
+ gap_res = true;
+ new_block = (bm::word_t*)
+- get_allocator().alloc_gap_block(unsigned(new_level), glen());
++ get_allocator().alloc_gap_block(unsigned(new_level), glen());
+ ::memcpy(new_block, gap_block, len * sizeof(bm::gap_word_t));
+ bm::set_gap_level(new_block, new_level);
+ }
+@@ -1124,7 +1125,7 @@
+ /*! @brief Fills all blocks with 0.
+ @param free_mem - if true function frees the resources (obsolete)
+ */
+- void set_all_zero(bool /*free_mem*/)
++ void set_all_zero(bool /*free_mem*/) BMNOEXCEPT
+ {
+ if (!is_init()) return;
+ deinit_tree(); // TODO: optimization of top-level realloc
+@@ -1141,7 +1142,7 @@
+ bm::set_sub_array_size, func);
+ }
+
+- void free_top_subblock(unsigned nblk_blk)
++ void free_top_subblock(unsigned nblk_blk) BMNOEXCEPT
+ {
+ BM_ASSERT(top_blocks_[nblk_blk]);
+ if ((bm::word_t*)top_blocks_[nblk_blk] != FULL_BLOCK_FAKE_ADDR)
+@@ -1452,7 +1453,7 @@
+ Places new block into blocks table.
+ */
+ BMFORCEINLINE
+- void set_block_ptr(unsigned i, unsigned j, bm::word_t* block)
++ void set_block_ptr(unsigned i, unsigned j, bm::word_t* block) BMNOEXCEPT
+ {
+ BM_ASSERT(is_init());
+ BM_ASSERT(i < top_block_size_);
+@@ -1597,7 +1598,7 @@
+ /**
+ Free block, make it zero pointer in the tree
+ */
+- void zero_gap_block_ptr(unsigned i, unsigned j)
++ void zero_gap_block_ptr(unsigned i, unsigned j) BMNOEXCEPT
+ {
+ BM_ASSERT(top_blocks_ && i < top_block_size_);
+
+@@ -1616,19 +1617,15 @@
+ Count number of bits ON in the block
+ */
+ static
+- bm::id_t block_bitcount(const bm::word_t* block)
++ bm::id_t block_bitcount(const bm::word_t* block) BMNOEXCEPT
+ {
+ BM_ASSERT(block);
+ id_t count;
+ if (BM_IS_GAP(block))
+- {
+ count = bm::gap_bit_count_unr(BMGAP_PTR(block));
+- }
+ else // bitset
+- {
+ count = (IS_FULL_BLOCK(block)) ? bm::bits_in_block
+ : bm::bit_block_count(block);
+- }
+ return count;
+ }
+
+@@ -1678,7 +1675,7 @@
+ }
+
+ /*! deallocate temp block */
+- void free_temp_block()
++ void free_temp_block() BMNOEXCEPT
+ {
+ if (temp_block_)
+ {
+@@ -1686,6 +1683,7 @@
+ temp_block_ = 0;
+ }
+ }
++
+ /*! Detach and return temp block.
+ if temp block is NULL allocates a bit-block
+ caller is responsible for returning
+@@ -1705,7 +1703,7 @@
+ /*! Return temp block
+ if temp block already exists - block gets deallocated
+ */
+- void return_tempblock(bm::word_t* block)
++ void return_tempblock(bm::word_t* block) BMNOEXCEPT
+ {
+ BM_ASSERT(block != temp_block_);
+ BM_ASSERT(IS_VALID_ADDR(block));
+@@ -1717,7 +1715,7 @@
+ }
+
+ /*! Assigns new GAP lengths vector */
+- void set_glen(const gap_word_t* glevel_len)
++ void set_glen(const gap_word_t* glevel_len) BMNOEXCEPT
+ {
+ ::memcpy(glevel_len_, glevel_len, sizeof(glevel_len_));
+ }
+@@ -1745,7 +1743,7 @@
+
+ /** Returns true if second level block pointer is 0.
+ */
+- bool is_subblock_null(unsigned nsub) const
++ bool is_subblock_null(unsigned nsub) const BMNOEXCEPT
+ {
+ BM_ASSERT(top_blocks_);
+ if (nsub >= top_block_size_)
+@@ -1753,7 +1751,7 @@
+ return top_blocks_[nsub] == NULL;
+ }
+
+- bm::word_t*** top_blocks_root()
++ bm::word_t*** top_blocks_root() BMNOEXCEPT
+ {
+ return top_blocks_;
+ }
+@@ -1760,7 +1758,7 @@
+
+ /*! \brief Returns current GAP level vector
+ */
+- const gap_word_t* glen() const
++ const gap_word_t* glen() const BMNOEXCEPT
+ {
+ return glevel_len_;
+ }
+@@ -1768,7 +1766,7 @@
+ /*! \brief Returns GAP level length for specified level
+ \param level - level number
+ */
+- unsigned glen(unsigned level) const
++ unsigned glen(unsigned level) const BMNOEXCEPT
+ {
+ return glevel_len_[level];
+ }
+@@ -1775,7 +1773,7 @@
+
+ /*! \brief Returns size of the top block array in the tree
+ */
+- unsigned top_block_size() const
++ unsigned top_block_size() const BMNOEXCEPT
+ {
+ return top_block_size_;
+ }
+@@ -1829,21 +1827,20 @@
+
+ /** \brief Returns reference on the allocator
+ */
+- allocator_type& get_allocator() { return alloc_; }
++ allocator_type& get_allocator() BMNOEXCEPT { return alloc_; }
+
+ /** \brief Returns allocator
+ */
+- allocator_type get_allocator() const { return alloc_; }
++ allocator_type get_allocator() const BMNOEXCEPT { return alloc_; }
+
+
+ /// if tree of blocks already up
+- bool is_init() const { return top_blocks_ != 0; }
++ bool is_init() const BMNOEXCEPT { return top_blocks_ != 0; }
+
+ /// allocate first level of descr. of blocks
+ void init_tree()
+ {
+ BM_ASSERT(top_blocks_ == 0);
+-
+ if (top_block_size_)
+ {
+ top_blocks_ = (bm::word_t***) alloc_.alloc_ptr(top_block_size_);
+@@ -1865,7 +1862,7 @@
+ alloc_.free_bit_block(blk); \
+ }
+
+- void deallocate_top_subblock(unsigned nblk_blk)
++ void deallocate_top_subblock(unsigned nblk_blk) BMNOEXCEPT
+ {
+ if (!top_blocks_[nblk_blk])
+ return;
+@@ -1907,7 +1904,7 @@
+ /** destroy tree, free memory in all blocks and control structures
+ Note: pointers are NOT assigned to zero(!)
+ */
+- void destroy_tree() BMNOEXEPT
++ void destroy_tree() BMNOEXCEPT
+ {
+ if (!top_blocks_)
+ return;
+@@ -1937,7 +1934,7 @@
+ }
+ #undef BM_FREE_OP
+
+- void deinit_tree() BMNOEXEPT
++ void deinit_tree() BMNOEXCEPT
+ {
+ destroy_tree();
+ top_blocks_ = 0; top_block_size_ = 0;
+@@ -1946,7 +1943,7 @@
+ // ----------------------------------------------------------------
+
+ /// calculate top blocks which are not NULL and not FULL
+- unsigned find_real_top_blocks() const
++ unsigned find_real_top_blocks() const BMNOEXCEPT
+ {
+ unsigned cnt = 0;
+ unsigned top_blocks = top_block_size();
+@@ -1964,7 +1961,7 @@
+ // ----------------------------------------------------------------
+
+ /// calculate max top blocks size whithout NULL-tail
+- unsigned find_max_top_blocks() const
++ unsigned find_max_top_blocks() const BMNOEXCEPT
+ {
+ unsigned top_blocks = top_block_size();
+ if (!top_blocks)
+@@ -1981,11 +1978,11 @@
+
+ // ----------------------------------------------------------------
+
+- void validate_top_zero(unsigned i)
++ void validate_top_zero(unsigned i) BMNOEXCEPT
+ {
+ BM_ASSERT(i < top_block_size());
+ bm::word_t** blk_blk = top_blocks_[i];
+- // TODO: SIMD
++ // TODO: SIMD or unroll
+ for (unsigned j = 0; j < bm::set_sub_array_size; ++j)
+ {
+ if (blk_blk[j])
+@@ -1997,7 +1994,7 @@
+
+ // ----------------------------------------------------------------
+
+- void validate_top_full(unsigned i)
++ void validate_top_full(unsigned i) BMNOEXCEPT
+ {
+ BM_ASSERT(i < top_block_size());
+ bm::word_t** blk_blk = top_blocks_[i];
+@@ -2015,7 +2012,7 @@
+ Calculate approximate memory needed to serialize big runs
+ of 0000s and 111s (as blocks)
+ */
+- size_t calc_serialization_null_full() const
++ size_t calc_serialization_null_full() const BMNOEXCEPT
+ {
+ size_t s_size = sizeof(unsigned);
+ if (!top_blocks_)
+@@ -2041,6 +2038,9 @@
+ }
+ nb_empty += (i - nb_prev) * bm::set_sub_array_size;
+ blk_blk = top_blocks_[i];
++ BM_ASSERT(blk_blk);
++ if (!blk_blk)
++ break;
+ }
+ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
+ {
+@@ -2357,7 +2357,7 @@
+ class bit_block_guard
+ {
+ public:
+- bit_block_guard(BlocksManager& bman, bm::word_t* blk=0)
++ bit_block_guard(BlocksManager& bman, bm::word_t* blk=0) BMNOEXCEPT
+ : bman_(bman),
+ block_(blk)
+ {}
+@@ -2366,18 +2366,20 @@
+ if (IS_VALID_ADDR(block_))
+ bman_.get_allocator().free_bit_block(block_, 3);
+ }
+- void attach(bm::word_t* blk)
++
++ void attach(bm::word_t* blk) BMNOEXCEPT
+ {
+ if (IS_VALID_ADDR(block_))
+ bman_.get_allocator().free_bit_block(block_);
+ block_ = blk;
+ }
++
+ bm::word_t* allocate()
+ {
+ attach(bman_.get_allocator().alloc_bit_block(3));
+ return block_;
+ }
+- bm::word_t* get() { return block_; }
++ bm::word_t* get() BMNOEXCEPT { return block_; }
+
+ private:
+ bit_block_guard(const bit_block_guard&);
+Index: c++/include/util/bitset/bmutil.h
+===================================================================
+--- a/c++/include/util/bitset/bmutil.h (revision 90103)
++++ b/c++/include/util/bitset/bmutil.h (revision 90104)
+@@ -94,22 +94,34 @@
+ bm::word_t* end() { return (b_.w32 + bm::set_block_size); }
+ };
+
+-
+ /**
+ Get minimum of 2 values
+ */
+ template<typename T>
+-T min_value(T v1, T v2)
++T min_value(T v1, T v2) BMNOEXCEPT
+ {
+ return v1 < v2 ? v1 : v2;
+ }
+
++/**
++ \brief ad-hoc conditional expressions
++ \internal
++*/
++template <bool b> struct conditional
++{
++ static bool test() { return true; }
++};
++template <> struct conditional<false>
++{
++ static bool test() { return false; }
++};
+
++
+ /**
+ Fast loop-less function to find LOG2
+ */
+ template<typename T>
+-T ilog2(T x)
++T ilog2(T x) BMNOEXCEPT
+ {
+ unsigned int l = 0;
+
+@@ -122,7 +134,7 @@
+ }
+
+ template<>
+-inline bm::gap_word_t ilog2(gap_word_t x)
++inline bm::gap_word_t ilog2(gap_word_t x) BMNOEXCEPT
+ {
+ unsigned int l = 0;
+ if (x >= 1<<8) { x = (bm::gap_word_t)(x >> 8); l |= 8; }
+@@ -140,7 +152,7 @@
+ class ptr_guard
+ {
+ public:
+- ptr_guard(T* p) : ptr_(p) {}
++ ptr_guard(T* p) BMNOEXCEPT : ptr_(p) {}
+ ~ptr_guard() { delete ptr_; }
+ private:
+ ptr_guard(const ptr_guard<T>& p);
+@@ -154,8 +166,7 @@
+ @ingroup bitfunc
+ @internal
+ */
+-inline
+-unsigned count_leading_zeros(unsigned x)
++inline unsigned count_leading_zeros(unsigned x) BMNOEXCEPT
+ {
+ unsigned n =
+ (x >= (1U << 16)) ?
+@@ -171,7 +182,7 @@
+ @internal
+ */
+ inline
+-unsigned count_trailing_zeros(unsigned v)
++unsigned count_trailing_zeros(unsigned v) BMNOEXCEPT
+ {
+ // (v & -v) isolates the last set bit
+ return unsigned(bm::tzcnt_table<true>::_lut[(-v & v) % 37]);
+@@ -181,7 +192,7 @@
+ Lookup table based integer LOG2
+ */
+ template<typename T>
+-T ilog2_LUT(T x)
++T ilog2_LUT(T x) BMNOEXCEPT
+ {
+ unsigned l = 0;
+ if (x & 0xffff0000)
+@@ -200,7 +211,7 @@
+ Lookup table based short integer LOG2
+ */
+ template<>
+-inline bm::gap_word_t ilog2_LUT<bm::gap_word_t>(bm::gap_word_t x)
++inline bm::gap_word_t ilog2_LUT<bm::gap_word_t>(bm::gap_word_t x) BMNOEXCEPT
+ {
+ bm::gap_word_t l = 0;
+ if (x & 0xff00)
+@@ -218,7 +229,7 @@
+ #ifdef __GNUG__
+
+ BMFORCEINLINE
+-unsigned bsf_asm32(unsigned int v)
++unsigned bsf_asm32(unsigned int v) BMNOEXCEPT
+ {
+ unsigned r;
+ asm volatile(" bsfl %1, %0": "=r"(r): "rm"(v) );
+@@ -226,7 +237,7 @@
+ }
+
+ BMFORCEINLINE
+-unsigned bsr_asm32(unsigned int v)
++unsigned bsr_asm32(unsigned int v) BMNOEXCEPT
+ {
+ unsigned r;
+ asm volatile(" bsrl %1, %0": "=r"(r): "rm"(v) );
+@@ -240,7 +251,7 @@
+ #if defined(_M_AMD64) || defined(_M_X64) // inline assembly not supported
+
+ BMFORCEINLINE
+-unsigned int bsr_asm32(unsigned int value)
++unsigned int bsr_asm32(unsigned int value) BMNOEXCEPT
+ {
+ unsigned long r;
+ _BitScanReverse(&r, value);
+@@ -248,7 +259,7 @@
+ }
+
+ BMFORCEINLINE
+-unsigned int bsf_asm32(unsigned int value)
++unsigned int bsf_asm32(unsigned int value) BMNOEXCEPT
+ {
+ unsigned long r;
+ _BitScanForward(&r, value);
+@@ -258,13 +269,13 @@
+ #else
+
+ BMFORCEINLINE
+-unsigned int bsr_asm32(unsigned int value)
++unsigned int bsr_asm32(unsigned int value) BMNOEXCEPT
+ {
+ __asm bsr eax, value
+ }
+
+ BMFORCEINLINE
+-unsigned int bsf_asm32(unsigned int value)
++unsigned int bsf_asm32(unsigned int value) BMNOEXCEPT
+ {
+ __asm bsf eax, value
+ }
+@@ -280,7 +291,7 @@
+ // http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.37.8562
+ //
+ template<typename T>
+-T bit_scan_fwd(T v)
++T bit_scan_fwd(T v) BMNOEXCEPT
+ {
+ return
+ DeBruijn_bit_position<true>::_multiply[(((v & -v) * 0x077CB531U)) >> 27];
+@@ -287,7 +298,7 @@
+ }
+
+ inline
+-unsigned bit_scan_reverse32(unsigned value)
++unsigned bit_scan_reverse32(unsigned value) BMNOEXCEPT
+ {
+ BM_ASSERT(value);
+ #if defined(BM_USE_GCC_BUILD)
+@@ -302,7 +313,7 @@
+ }
+
+ inline
+-unsigned bit_scan_forward32(unsigned value)
++unsigned bit_scan_forward32(unsigned value) BMNOEXCEPT
+ {
+ BM_ASSERT(value);
+ #if defined(BM_USE_GCC_BUILD)
+@@ -318,7 +329,7 @@
+
+
+ BMFORCEINLINE
+-unsigned long long bmi_bslr_u64(unsigned long long w)
++unsigned long long bmi_bslr_u64(unsigned long long w) BMNOEXCEPT
+ {
+ #if defined(BMAVX2OPT) || defined (BMAVX512OPT)
+ return _blsr_u64(w);
+@@ -339,7 +350,7 @@
+
+ /// 64-bit bit-scan reverse
+ inline
+-unsigned count_leading_zeros_u64(bm::id64_t w)
++unsigned count_leading_zeros_u64(bm::id64_t w) BMNOEXCEPT
+ {
+ BM_ASSERT(w);
+ #if defined(BMAVX2OPT) || defined (BMAVX512OPT)
+@@ -367,7 +378,7 @@
+
+ /// 64-bit bit-scan fwd
+ inline
+-unsigned count_trailing_zeros_u64(bm::id64_t w)
++unsigned count_trailing_zeros_u64(bm::id64_t w) BMNOEXCEPT
+ {
+ BM_ASSERT(w);
+
+@@ -396,6 +407,72 @@
+
+
+
++/*!
++ Returns BSR value
++ @ingroup bitfunc
++*/
++template <class T>
++unsigned bit_scan_reverse(T value) BMNOEXCEPT
++{
++ BM_ASSERT(value);
++
++ if (bm::conditional<sizeof(T)==8>::test())
++ {
++ #if defined(BM_USE_GCC_BUILD)
++ return (unsigned) (63 - __builtin_clzll(value));
++ #else
++ bm::id64_t v8 = value;
++ v8 >>= 32;
++ unsigned v = (unsigned)v8;
++ if (v)
++ {
++ v = bm::bit_scan_reverse32(v);
++ return v + 32;
++ }
++ #endif
++ }
++ return bm::bit_scan_reverse32((unsigned)value);
++}
++
++/*! \brief and functor
++ \internal
++ */
++struct and_func
++{
++ static
++ BMFORCEINLINE unsigned op(unsigned v1, unsigned v2) BMNOEXCEPT2
++ { return v1 & v2; }
++};
++/*! \brief xor functor
++ \internal
++ */
++struct xor_func
++{
++ static
++ BMFORCEINLINE unsigned op(unsigned v1, unsigned v2) BMNOEXCEPT2
++ { return v1 ^ v2; }
++};
++/*! \brief or functor
++ \internal
++ */
++struct or_func
++{
++ static
++ BMFORCEINLINE unsigned op(unsigned v1, unsigned v2) BMNOEXCEPT2
++ { return v1 | v2; }
++};
++/*! \brief sub functor
++ \internal
++ */
++struct sub_func
++{
++ static
++ BMFORCEINLINE unsigned op(unsigned v1, unsigned v2) BMNOEXCEPT2
++ { return v1 & ~v2; }
++};
++
++
++
+ #ifdef __GNUG__
+ #pragma GCC diagnostic pop
+ #endif
+Index: c++/include/util/bitset/bmsse4.h
+===================================================================
+--- a/c++/include/util/bitset/bmsse4.h (revision 90103)
++++ b/c++/include/util/bitset/bmsse4.h (revision 90104)
+@@ -577,6 +577,17 @@
+ }
+
+ /*!
++ @brief check if SSE wave is all oxFFFF...FFF
++ @ingroup SSE4
++*/
++BMFORCEINLINE
++bool sse42_test_all_one_wave(const void* ptr)
++{
++ return _mm_test_all_ones(_mm_loadu_si128((__m128i*)ptr));
++}
++
++
++/*!
+ @brief check if wave of pointers is all NULL
+ @ingroup SSE4
+ */
+@@ -973,12 +984,14 @@
+ #endif
+
+ /*!
+- SSE4.2 check for one to two (variable len) 128 bit SSE lines for gap search results (8 elements)
++ SSE4.2 check for one to two (variable len) 128 bit SSE lines
++ for gap search results (8 elements)
+ @ingroup SSE4
+ \internal
+ */
+ inline
+-unsigned sse4_gap_find(const bm::gap_word_t* BMRESTRICT pbuf, const bm::gap_word_t pos, const unsigned size)
++unsigned sse4_gap_find(const bm::gap_word_t* BMRESTRICT pbuf,
++ const bm::gap_word_t pos, const unsigned size)
+ {
+ BM_ASSERT(size <= 16);
+ BM_ASSERT(size);
+@@ -1032,6 +1045,74 @@
+ }
+
+ /**
++ Hybrid binary search, starts as binary, then switches to linear scan
++
++ \param buf - GAP buffer pointer.
++ \param pos - index of the element.
++ \param is_set - output. GAP value (0 or 1).
++ \return GAP index.
++
++ @ingroup SSE4
++*/
++inline
++unsigned sse42_gap_bfind(const unsigned short* BMRESTRICT buf,
++ unsigned pos, unsigned* BMRESTRICT is_set)
++{
++ unsigned start = 1;
++ unsigned end = 1 + ((*buf) >> 3);
++ unsigned dsize = end - start;
++
++ if (dsize < 17)
++ {
++ start = bm::sse4_gap_find(buf+1, (bm::gap_word_t)pos, dsize);
++ *is_set = ((*buf) & 1) ^ (start & 1);
++ BM_ASSERT(buf[start+1] >= pos);
++ BM_ASSERT(buf[start] < pos || (start==0));
++
++ return start+1;
++ }
++ unsigned arr_end = end;
++ while (start != end)
++ {
++ unsigned curr = (start + end) >> 1;
++ if (buf[curr] < pos)
++ start = curr + 1;
++ else
++ end = curr;
++
++ unsigned size = end - start;
++ if (size < 16)
++ {
++ size += (end != arr_end);
++ unsigned idx =
++ bm::sse4_gap_find(buf + start, (bm::gap_word_t)pos, size);
++ start += idx;
++
++ BM_ASSERT(buf[start] >= pos);
++ BM_ASSERT(buf[start - 1] < pos || (start == 1));
++ break;
++ }
++ }
++
++ *is_set = ((*buf) & 1) ^ ((start-1) & 1);
++ return start;
++}
++
++/**
++ Hybrid binary search, starts as binary, then switches to scan
++ @ingroup SSE4
++*/
++inline
++unsigned sse42_gap_test(const unsigned short* BMRESTRICT buf, unsigned pos)
++{
++ unsigned is_set;
++ bm::sse42_gap_bfind(buf, pos, &is_set);
++ return is_set;
++}
++
++
++
++/**
+ Experimental (test) function to do SIMD vector search (lower bound)
+ in sorted, growing array
+ @ingroup SSE4
+@@ -1751,6 +1832,8 @@
+ #define VECT_BIT_BLOCK_XOR(t, src, src_xor, d) \
+ sse42_bit_block_xor(t, src, src_xor, d)
+
++#define VECT_GAP_BFIND(buf, pos, is_set) \
++ sse42_gap_bfind(buf, pos, is_set)
+
+ #ifdef __GNUG__
+ #pragma GCC diagnostic pop
+Index: c++/include/util/bitset/bmserial.h
+===================================================================
+--- a/c++/include/util/bitset/bmserial.h (revision 90103)
++++ b/c++/include/util/bitset/bmserial.h (revision 90104)
+@@ -75,12 +75,12 @@
+ class serializer
+ {
+ public:
+- typedef BV bvector_type;
+- typedef typename bvector_type::allocator_type allocator_type;
+- typedef typename bvector_type::blocks_manager_type blocks_manager_type;
+- typedef typename bvector_type::statistics statistics_type;
+- typedef typename bvector_type::block_idx_type block_idx_type;
+- typedef typename bvector_type::size_type size_type;
++ typedef BV bvector_type;
++ typedef typename bvector_type::allocator_type allocator_type;
++ typedef typename bvector_type::blocks_manager_type blocks_manager_type;
++ typedef typename bvector_type::statistics statistics_type;
++ typedef typename bvector_type::block_idx_type block_idx_type;
++ typedef typename bvector_type::size_type size_type;
+
+ typedef byte_buffer<allocator_type> buffer;
+ typedef bm::bv_ref_vector<BV> bv_ref_vector_type;
+@@ -113,7 +113,7 @@
+ @param clevel - compression level (0-5)
+ @sa get_compression_level
+ */
+- void set_compression_level(unsigned clevel);
++ void set_compression_level(unsigned clevel) BMNOEXCEPT;
+
+ /**
+ Get compression level (0-5), Default 5 (recommended)
+@@ -127,7 +127,8 @@
+ Recommended: use 3 or 5
+
+ */
+- unsigned get_compression_level() const { return compression_level_; }
++ unsigned get_compression_level() const BMNOEXCEPT
++ { return compression_level_; }
+
+
+ //@}
+@@ -189,7 +190,8 @@
+ Return serialization counter vector
+ @internal
+ */
+- const size_type* get_compression_stat() const { return compression_stat_; }
++ const size_type* get_compression_stat() const BMNOEXCEPT
++ { return compression_stat_; }
+
+ /**
+ Set GAP length serialization (serializes GAP levels of the original vector)
+@@ -196,13 +198,13 @@
+
+ @param value - when TRUE serialized vector includes GAP levels parameters
+ */
+- void gap_length_serialization(bool value);
++ void gap_length_serialization(bool value) BMNOEXCEPT;
+
+ /**
+ Set byte-order serialization (for cross platform compatibility)
+ @param value - TRUE serialization format includes byte-order marker
+ */
+- void byte_order_serialization(bool value);
++ void byte_order_serialization(bool value) BMNOEXCEPT;
+
+ /**
+ Add skip-markers to serialization BLOB for faster range decode
+@@ -214,7 +216,7 @@
+ smaller interval means more bookmarks added to the skip list thus
+ more increasing the BLOB size
+ */
+- void set_bookmarks(bool enable, unsigned bm_interval = 256);
++ void set_bookmarks(bool enable, unsigned bm_interval = 256) BMNOEXCEPT;
+
+ /**
+ Attach collection of reference vectors for XOR serialization
+@@ -227,7 +229,7 @@
+ Set current index in rer.vector collection
+ (not a row idx or plain idx)
+ */
+- void set_curr_ref_idx(size_type ref_idx);
++ void set_curr_ref_idx(size_type ref_idx) BMNOEXCEPT;
+
+
+ protected:
+@@ -234,13 +236,14 @@
+ /**
+ Encode serialization header information
+ */
+- void encode_header(const BV& bv, bm::encoder& enc);
++ void encode_header(const BV& bv, bm::encoder& enc) BMNOEXCEPT;
+
+ /*! Encode GAP block */
+ void encode_gap_block(const bm::gap_word_t* gap_block, bm::encoder& enc);
+
+ /*! Encode GAP block with Elias Gamma coder */
+- void gamma_gap_block(const bm::gap_word_t* gap_block, bm::encoder& enc);
++ void gamma_gap_block(const bm::gap_word_t* gap_block,
++ bm::encoder& enc) BMNOEXCEPT;
+
+ /**
+ Encode GAP block as delta-array with Elias Gamma coder
+@@ -248,29 +251,30 @@
+ void gamma_gap_array(const bm::gap_word_t* gap_block,
+ unsigned arr_len,
+ bm::encoder& enc,
+- bool inverted = false);
++ bool inverted = false) BMNOEXCEPT;
+
+ /// Encode bit-block as an array of bits
+ void encode_bit_array(const bm::word_t* block,
+- bm::encoder& enc, bool inverted);
++ bm::encoder& enc, bool inverted) BMNOEXCEPT;
+
+ void gamma_gap_bit_block(const bm::word_t* block,
+- bm::encoder& enc);
++ bm::encoder& enc) BMNOEXCEPT;
+
+ void gamma_arr_bit_block(const bm::word_t* block,
+- bm::encoder& enc, bool inverted);
++ bm::encoder& enc, bool inverted) BMNOEXCEPT;
+
+ void bienc_arr_bit_block(const bm::word_t* block,
+- bm::encoder& enc, bool inverted);
++ bm::encoder& enc, bool inverted) BMNOEXCEPT;
+
+ /// encode bit-block as interpolated bit block of gaps
+- void bienc_gap_bit_block(const bm::word_t* block, bm::encoder& enc);
++ void bienc_gap_bit_block(const bm::word_t* block,
++ bm::encoder& enc) BMNOEXCEPT;
+
+ void interpolated_arr_bit_block(const bm::word_t* block,
+- bm::encoder& enc, bool inverted);
++ bm::encoder& enc, bool inverted) BMNOEXCEPT;
+ /// encode bit-block as interpolated gap block
+ void interpolated_gap_bit_block(const bm::word_t* block,
+- bm::encoder& enc);
++ bm::encoder& enc) BMNOEXCEPT;
+
+ /**
+ Encode GAP block as an array with binary interpolated coder
+@@ -278,16 +282,16 @@
+ void interpolated_gap_array(const bm::gap_word_t* gap_block,
+ unsigned arr_len,
+ bm::encoder& enc,
+- bool inverted);
++ bool inverted) BMNOEXCEPT;
+ void interpolated_gap_array_v0(const bm::gap_word_t* gap_block,
+ unsigned arr_len,
+ bm::encoder& enc,
+- bool inverted);
++ bool inverted) BMNOEXCEPT;
+
+
+ /*! Encode GAP block with using binary interpolated encoder */
+ void interpolated_encode_gap_block(
+- const bm::gap_word_t* gap_block, bm::encoder& enc);
++ const bm::gap_word_t* gap_block, bm::encoder& enc) BMNOEXCEPT;
+
+ /**
+ Encode BIT block with repeatable runs of zeroes
+@@ -294,13 +298,13 @@
+ */
+ void encode_bit_interval(const bm::word_t* blk,
+ bm::encoder& enc,
+- unsigned size_control);
++ unsigned size_control) BMNOEXCEPT;
+ /**
+ Encode bit-block using digest (hierarchical compression)
+ */
+ void encode_bit_digest(const bm::word_t* blk,
+- bm::encoder& enc,
+- bm::id64_t d0);
++ bm::encoder& enc,
++ bm::id64_t d0) BMNOEXCEPT;
+
+ /**
+ Determine best representation for GAP block based
+@@ -314,25 +318,26 @@
+
+ @internal
+ */
+- unsigned char find_gap_best_encoding(const bm::gap_word_t* gap_block);
++ unsigned char
++ find_gap_best_encoding(const bm::gap_word_t* gap_block) BMNOEXCEPT;
+
+ /// Determine best representation for a bit-block
+- unsigned char find_bit_best_encoding(const bm::word_t* block);
++ unsigned char find_bit_best_encoding(const bm::word_t* block) BMNOEXCEPT;
+
+ /// Determine best representation for a bit-block (level 5)
+- unsigned char find_bit_best_encoding_l5(const bm::word_t* block);
++ unsigned char find_bit_best_encoding_l5(const bm::word_t* block) BMNOEXCEPT;
+
+ /// Reset all accumulated compression statistics
+- void reset_compression_stats();
++ void reset_compression_stats() BMNOEXCEPT;
+
+- void reset_models() { mod_size_ = 0; }
+- void add_model(unsigned char mod, unsigned score);
++ void reset_models() BMNOEXCEPT { mod_size_ = 0; }
++ void add_model(unsigned char mod, unsigned score) BMNOEXCEPT;
+ protected:
+
+ /// Bookmark state structure
+ struct bookmark_state
+ {
+- bookmark_state(block_idx_type nb_range)
++ bookmark_state(block_idx_type nb_range) BMNOEXCEPT
+ : ptr_(0), nb_(0),
+ nb_range_(nb_range), bm_type_(0)
+ {
+@@ -364,7 +369,7 @@
+ */
+ static
+ void process_bookmark(block_idx_type nb, bookmark_state& bookm,
+- bm::encoder& enc);
++ bm::encoder& enc) BMNOEXCEPT;
+
+ private:
+ serializer(const serializer&);
+@@ -421,6 +426,8 @@
+ protected:
+ typedef DEC decoder_type;
+ typedef BLOCK_IDX block_idx_type;
++ typedef bm::bit_in<DEC> bit_in_type;
++
+ protected:
+ deseriaizer_base()
+ : id_array_(0), bookmark_idx_(0), skip_offset_(0), skip_pos_(0)
+@@ -440,29 +447,31 @@
+ bm::gap_word_t* dst_arr);
+
+ /// Read binary interpolated list into a bit-set
+- void read_bic_arr(decoder_type& decoder, bm::word_t* blk);
++ void read_bic_arr(decoder_type& decoder, bm::word_t* blk) BMNOEXCEPT;
+
+ /// Read binary interpolated gap blocks into a bitset
+- void read_bic_gap(decoder_type& decoder, bm::word_t* blk);
++ void read_bic_gap(decoder_type& decoder, bm::word_t* blk) BMNOEXCEPT;
+
+ /// Read inverted binary interpolated list into a bit-set
+- void read_bic_arr_inv(decoder_type& decoder, bm::word_t* blk);
++ void read_bic_arr_inv(decoder_type& decoder, bm::word_t* blk) BMNOEXCEPT;
+
+ /// Read digest0-type bit-block
+- void read_digest0_block(decoder_type& decoder, bm::word_t* blk);
++ void read_digest0_block(decoder_type& decoder, bm::word_t* blk) BMNOEXCEPT;
+
+
+ /// read bit-block encoded as runs
+ static
+- void read_0runs_block(decoder_type& decoder, bm::word_t* blk);
++ void read_0runs_block(decoder_type& decoder, bm::word_t* blk) BMNOEXCEPT;
+
+ static
+- const char* err_msg() { return "BM::Invalid serialization format"; }
++ const char* err_msg() BMNOEXCEPT { return "BM::Invalid serialization format"; }
+
+ /// Try to skip if skip bookmark is available within reach
+ /// @return new block idx if skip went well
+ ///
+- block_idx_type try_skip(decoder_type& decoder, block_idx_type nb, block_idx_type expect_nb);
++ block_idx_type try_skip(decoder_type& decoder,
++ block_idx_type nb,
++ block_idx_type expect_nb) BMNOEXCEPT;
+
+ protected:
+ bm::gap_word_t* id_array_; ///< ptr to idx array for temp decode use
+@@ -519,7 +528,7 @@
+ is not guaranteed to be absent
+ @sa unset_range()
+ */
+- void set_range(size_type from, size_type to)
++ void set_range(size_type from, size_type to) BMNOEXCEPT
+ {
+ is_range_set_ = 1; idx_from_ = from; idx_to_ = to;
+ }
+@@ -528,7 +537,7 @@
+ Disable range deserialization
+ @sa set_range()
+ */
+- void unset_range() { is_range_set_ = 0; }
++ void unset_range() BMNOEXCEPT { is_range_set_ = 0; }
+
+ protected:
+ typedef typename BV::blocks_manager_type blocks_manager_type;
+@@ -608,7 +617,7 @@
+ void set_range(size_type from, size_type to);
+
+ /// disable range filtration
+- void unset_range() { is_range_set_ = false; }
++ void unset_range() BMNOEXCEPT { is_range_set_ = false; }
+
+ size_type deserialize(bvector_type& bv,
+ serial_iterator_type& sit,
+@@ -639,7 +648,8 @@
+ serial_iterator_type& sit,
+ set_operation op);
+ static
+- const char* err_msg() { return "BM::de-serialization format error"; }
++ const char* err_msg() BMNOEXCEPT
++ { return "BM::de-serialization format error"; }
+ private:
+ bool is_range_set_ = false;
+ size_type nb_range_from_ = 0;
+@@ -675,7 +685,7 @@
+ void next();
+
+ /// skip all zero or all-one blocks
+- block_idx_type skip_mono_blocks();
++ block_idx_type skip_mono_blocks() BMNOEXCEPT;
+
+ /// read bit block, using logical operation
+ unsigned get_bit_block(bm::word_t* dst_block,
+@@ -708,17 +718,17 @@
+ };
+
+ /// Returns iterator internal state
+- iterator_state state() const { return this->state_; }
++ iterator_state state() const BMNOEXCEPT { return this->state_; }
+
+- iterator_state get_state() const { return this->state_; }
++ iterator_state get_state() const BMNOEXCEPT { return this->state_; }
+ /// Number of ids in the inverted list (valid for e_list_ids)
+- unsigned get_id_count() const { return this->id_cnt_; }
++ unsigned get_id_count() const BMNOEXCEPT { return this->id_cnt_; }
+
+ /// Get last id from the id list
+- bm::id_t get_id() const { return this->last_id_; }
++ bm::id_t get_id() const BMNOEXCEPT { return this->last_id_; }
+
+ /// Get current block index
+- block_idx_type block_idx() const { return this->block_idx_; }
++ block_idx_type block_idx() const BMNOEXCEPT { return this->block_idx_; }
+
+ public:
+ /// member function pointer for bitset-bitset get operations
+@@ -761,19 +771,19 @@
+ /// (Converts inverted list into bits)
+ /// Returns number of words (bits) being read
+ unsigned get_arr_bit(bm::word_t* dst_block,
+- bool clear_target=true);
++ bool clear_target=true) BMNOEXCEPT;
+
+ /// Get current block type
+- unsigned get_block_type() const { return block_type_; }
++ unsigned get_block_type() const BMNOEXCEPT { return block_type_; }
+
+- unsigned get_bit();
++ unsigned get_bit() BMNOEXCEPT;
+
+- void get_inv_arr(bm::word_t* block);
++ void get_inv_arr(bm::word_t* block) BMNOEXCEPT;
+
+ /// Try to skip if skip bookmark is available within reach
+ /// @return true if skip went well
+ ///
+- bool try_skip(block_idx_type nb, block_idx_type expect_nb)
++ bool try_skip(block_idx_type nb, block_idx_type expect_nb) BMNOEXCEPT
+ {
+ block_idx_type new_nb = parent_type::try_skip(decoder_, nb, expect_nb);
+ if (new_nb)
+@@ -1064,6 +1074,7 @@
+ gap_serial_(false),
+ byte_order_serial_(true),
+ sb_bookmarks_(false),
++ sb_range_(0),
+ compression_level_(bm::set_compression_default),
+ ref_vect_(0),
+ ref_idx_(0),
+@@ -1097,7 +1108,7 @@
+
+
+ template<class BV>
+-void serializer<BV>::reset_compression_stats()
++void serializer<BV>::reset_compression_stats() BMNOEXCEPT
+ {
+ for (unsigned i = 0; i < 256; ++i)
+ compression_stat_[i] = 0;
+@@ -1105,7 +1116,7 @@
+
+
+ template<class BV>
+-void serializer<BV>::set_compression_level(unsigned clevel)
++void serializer<BV>::set_compression_level(unsigned clevel) BMNOEXCEPT
+ {
+ if (clevel <= bm::set_compression_max)
+ compression_level_ = clevel;
+@@ -1112,23 +1123,23 @@
+ }
+
+ template<class BV>
+-void serializer<BV>::gap_length_serialization(bool value)
++void serializer<BV>::gap_length_serialization(bool value) BMNOEXCEPT
+ {
+ gap_serial_ = value;
+ }
+
+ template<class BV>
+-void serializer<BV>::byte_order_serialization(bool value)
++void serializer<BV>::byte_order_serialization(bool value) BMNOEXCEPT
+ {
+ byte_order_serial_ = value;
+ }
+
+ template<class BV>
+-void serializer<BV>::set_bookmarks(bool enable, unsigned bm_interval)
++void serializer<BV>::set_bookmarks(bool enable, unsigned bm_interval) BMNOEXCEPT
+ {
+ sb_bookmarks_ = enable;
+ if (enable)
+- {
++ {
+ if (bm_interval > 512)
+ bm_interval = 512;
+ else
+@@ -1148,13 +1159,13 @@
+ }
+
+ template<class BV>
+-void serializer<BV>::set_curr_ref_idx(size_type ref_idx)
++void serializer<BV>::set_curr_ref_idx(size_type ref_idx) BMNOEXCEPT
+ {
+ ref_idx_ = ref_idx;
+ }
+
+ template<class BV>
+-void serializer<BV>::encode_header(const BV& bv, bm::encoder& enc)
++void serializer<BV>::encode_header(const BV& bv, bm::encoder& enc) BMNOEXCEPT
+ {
+ const blocks_manager_type& bman = bv.get_blocks_manager();
+
+@@ -1207,7 +1218,7 @@
+
+ template<class BV>
+ void serializer<BV>::interpolated_encode_gap_block(
+- const bm::gap_word_t* gap_block, bm::encoder& enc)
++ const bm::gap_word_t* gap_block, bm::encoder& enc) BMNOEXCEPT
+ {
+ unsigned len = bm::gap_length(gap_block);
+ if (len > 4) // BIC encoding
+@@ -1266,7 +1277,8 @@
+
+
+ template<class BV>
+-void serializer<BV>::gamma_gap_block(const bm::gap_word_t* gap_block, bm::encoder& enc)
++void serializer<BV>::gamma_gap_block(const bm::gap_word_t* gap_block,
++ bm::encoder& enc) BMNOEXCEPT
+ {
+ unsigned len = gap_length(gap_block);
+ if (len > 3 && (compression_level_ > 3)) // Use Elias Gamma encoding
+@@ -1307,7 +1319,7 @@
+ void serializer<BV>::gamma_gap_array(const bm::gap_word_t* gap_array,
+ unsigned arr_len,
+ bm::encoder& enc,
+- bool inverted)
++ bool inverted) BMNOEXCEPT
+ {
+ unsigned char scode = inverted ? bm::set_block_arrgap_egamma_inv
+ : bm::set_block_arrgap_egamma;
+@@ -1349,10 +1361,11 @@
+
+
+ template<class BV>
+-void serializer<BV>::interpolated_gap_array_v0(const bm::gap_word_t* gap_block,
+- unsigned arr_len,
+- bm::encoder& enc,
+- bool inverted)
++void serializer<BV>::interpolated_gap_array_v0(
++ const bm::gap_word_t* gap_block,
++ unsigned arr_len,
++ bm::encoder& enc,
++ bool inverted) BMNOEXCEPT
+ {
+ BM_ASSERT(arr_len <= 65535);
+ unsigned char scode = inverted ? bm::set_block_arrgap_bienc_inv
+@@ -1399,7 +1412,7 @@
+ void serializer<BV>::interpolated_gap_array(const bm::gap_word_t* gap_block,
+ unsigned arr_len,
+ bm::encoder& enc,
+- bool inverted)
++ bool inverted) BMNOEXCEPT
+ {
+ BM_ASSERT(arr_len <= 65535);
+
+@@ -1471,7 +1484,7 @@
+
+
+ template<class BV>
+-void serializer<BV>::add_model(unsigned char mod, unsigned score)
++void serializer<BV>::add_model(unsigned char mod, unsigned score) BMNOEXCEPT
+ {
+ BM_ASSERT(mod_size_ < 64); // too many models (memory corruption?)
+ scores_[mod_size_] = score; models_[mod_size_] = mod;
+@@ -1479,7 +1492,8 @@
+ }
+
+ template<class BV>
+-unsigned char serializer<BV>::find_bit_best_encoding_l5(const bm::word_t* block)
++unsigned char
++serializer<BV>::find_bit_best_encoding_l5(const bm::word_t* block) BMNOEXCEPT
+ {
+ unsigned bc, bit_gaps;
+
+@@ -1566,7 +1580,8 @@
+ }
+
+ template<class BV>
+-unsigned char serializer<BV>::find_bit_best_encoding(const bm::word_t* block)
++unsigned char
++serializer<BV>::find_bit_best_encoding(const bm::word_t* block) BMNOEXCEPT
+ {
+ reset_models();
+
+@@ -1672,7 +1687,7 @@
+
+ template<class BV>
+ unsigned char
+-serializer<BV>::find_gap_best_encoding(const bm::gap_word_t* gap_block)
++serializer<BV>::find_gap_best_encoding(const bm::gap_word_t* gap_block)BMNOEXCEPT
+ {
+ // heuristics and hard-coded rules to determine
+ // the best representation for d-GAP block
+@@ -1731,9 +1746,9 @@
+ break;
+
+ case bm::set_block_bit_1bit:
+- arr_len = gap_convert_to_arr(gap_temp_block,
+- gap_block,
+- bm::gap_equiv_len-10);
++ arr_len = bm::gap_convert_to_arr(gap_temp_block,
++ gap_block,
++ bm::gap_equiv_len-10);
+ BM_ASSERT(arr_len == 1);
+ enc.put_8(bm::set_block_bit_1bit);
+ enc.put_16(gap_temp_block[0]);
+@@ -1779,7 +1794,7 @@
+ void serializer<BV>::encode_bit_interval(const bm::word_t* blk,
+ bm::encoder& enc,
+ unsigned //size_control
+- )
++ ) BMNOEXCEPT
+ {
+ enc.put_8(bm::set_block_bit_0runs);
+ enc.put_8((blk[0]==0) ? 0 : 1); // encode start
+@@ -1830,7 +1845,7 @@
+ template<class BV>
+ void serializer<BV>::encode_bit_digest(const bm::word_t* block,
+ bm::encoder& enc,
+- bm::id64_t d0)
++ bm::id64_t d0) BMNOEXCEPT
+ {
+ // evaluate a few "sure" models here and pick the best
+ //
+@@ -1927,16 +1942,16 @@
+ template<class BV>
+ void serializer<BV>::encode_bit_array(const bm::word_t* block,
+ bm::encoder& enc,
+- bool inverted)
++ bool inverted) BMNOEXCEPT
+ {
+ unsigned arr_len;
+ unsigned mask = inverted ? ~0u : 0u;
+ // TODO: get rid of max bits
+- arr_len = bit_convert_to_arr(bit_idx_arr_.data(),
+- block,
+- bm::gap_max_bits,
+- bm::gap_max_bits_cmrz,
+- mask);
++ arr_len = bm::bit_convert_to_arr(bit_idx_arr_.data(),
++ block,
++ bm::gap_max_bits,
++ bm::gap_max_bits_cmrz,
++ mask);
+ if (arr_len)
+ {
+ unsigned char scode =
+@@ -1950,7 +1965,7 @@
+
+ template<class BV>
+ void serializer<BV>::gamma_gap_bit_block(const bm::word_t* block,
+- bm::encoder& enc)
++ bm::encoder& enc) BMNOEXCEPT
+ {
+ unsigned len = bm::bit_to_gap(bit_idx_arr_.data(), block, bm::gap_equiv_len);
+ BM_ASSERT(len); (void)len;
+@@ -1959,7 +1974,8 @@
+
+ template<class BV>
+ void serializer<BV>::gamma_arr_bit_block(const bm::word_t* block,
+- bm::encoder& enc, bool inverted)
++ bm::encoder& enc,
++ bool inverted) BMNOEXCEPT
+ {
+ unsigned mask = inverted ? ~0u : 0u;
+ unsigned arr_len = bit_convert_to_arr(bit_idx_arr_.data(),
+@@ -1978,7 +1994,8 @@
+
+ template<class BV>
+ void serializer<BV>::bienc_arr_bit_block(const bm::word_t* block,
+- bm::encoder& enc, bool inverted)
++ bm::encoder& enc,
++ bool inverted) BMNOEXCEPT
+ {
+ unsigned mask = inverted ? ~0u : 0u;
+ unsigned arr_len = bit_convert_to_arr(bit_idx_arr_.data(),
+@@ -1996,7 +2013,7 @@
+
+ template<class BV>
+ void serializer<BV>::interpolated_gap_bit_block(const bm::word_t* block,
+- bm::encoder& enc)
++ bm::encoder& enc) BMNOEXCEPT
+ {
+ unsigned len = bm::bit_to_gap(bit_idx_arr_.data(), block, bm::gap_max_bits);
+ BM_ASSERT(len); (void)len;
+@@ -2006,7 +2023,7 @@
+
+ template<class BV>
+ void serializer<BV>::bienc_gap_bit_block(const bm::word_t* block,
+- bm::encoder& enc)
++ bm::encoder& enc) BMNOEXCEPT
+ {
+ unsigned len = bm::bit_to_gap(bit_idx_arr_.data(), block, bm::gap_max_bits);
+ BM_ASSERT(len); (void)len;
+@@ -2052,8 +2069,10 @@
+
+
+ template<class BV>
+-void serializer<BV>::interpolated_arr_bit_block(const bm::word_t* block,
+- bm::encoder& enc, bool inverted)
++void
++serializer<BV>::interpolated_arr_bit_block(const bm::word_t* block,
++ bm::encoder& enc,
++ bool inverted) BMNOEXCEPT
+ {
+ unsigned mask = inverted ? ~0u : 0u;
+ unsigned arr_len = bit_convert_to_arr(bit_idx_arr_.data(),
+@@ -2134,7 +2153,7 @@
+ template<class BV>
+ void serializer<BV>::process_bookmark(block_idx_type nb,
+ bookmark_state& bookm,
+- bm::encoder& enc)
++ bm::encoder& enc) BMNOEXCEPT
+ {
+ BM_ASSERT(bookm.nb_range_);
+
+@@ -2786,8 +2805,6 @@
+ unsigned block_type,
+ bm::gap_word_t* dst_arr)
+ {
+- typedef bit_in<DEC> bit_in_type;
+-
+ bm::gap_word_t len = 0;
+
+ switch (block_type)
+@@ -2867,12 +2884,12 @@
+ }
+
+ template<typename DEC, typename BLOCK_IDX>
+-void deseriaizer_base<DEC, BLOCK_IDX>::read_bic_arr(decoder_type& dec,
+- bm::word_t* blk)
++void
++deseriaizer_base<DEC, BLOCK_IDX>::read_bic_arr(decoder_type& dec,
++ bm::word_t* blk) BMNOEXCEPT
+ {
+ BM_ASSERT(!BM_IS_GAP(blk));
+
+- typedef bit_in<DEC> bit_in_type;
+ bm::gap_word_t min_v = dec.get_16();
+ bm::gap_word_t max_v = dec.get_16();
+ unsigned arr_len = dec.get_16();
+@@ -2890,7 +2907,9 @@
+ }
+
+ template<typename DEC, typename BLOCK_IDX>
+-void deseriaizer_base<DEC, BLOCK_IDX>::read_bic_arr_inv(decoder_type& decoder, bm::word_t* blk)
++void
++deseriaizer_base<DEC, BLOCK_IDX>::read_bic_arr_inv(decoder_type& decoder,
++ bm::word_t* blk) BMNOEXCEPT
+ {
+ // TODO: optimization
+ bm::bit_block_set(blk, 0);
+@@ -2899,18 +2918,16 @@
+ }
+
+ template<typename DEC, typename BLOCK_IDX>
+-void deseriaizer_base<DEC, BLOCK_IDX>::read_bic_gap(decoder_type& dec, bm::word_t* blk)
++void deseriaizer_base<DEC, BLOCK_IDX>::read_bic_gap(decoder_type& dec,
++ bm::word_t* blk) BMNOEXCEPT
+ {
+ BM_ASSERT(!BM_IS_GAP(blk));
+
+- typedef bit_in<DEC> bit_in_type;
+-
+ bm::gap_word_t head = dec.get_8();
+ unsigned arr_len = dec.get_16();
+ bm::gap_word_t min_v = dec.get_16();
+
+ BM_ASSERT(arr_len <= bie_cut_off);
+-
+
+ id_array_[0] = head;
+ id_array_[1] = min_v;
+@@ -2920,15 +2937,14 @@
+ bin.bic_decode_u16(&id_array_[2], arr_len-2, min_v, 65535);
+
+ if (!IS_VALID_ADDR(blk))
+- {
+ return;
+- }
+ bm::gap_add_to_bitset(blk, id_array_, arr_len);
+ }
+
+ template<typename DEC, typename BLOCK_IDX>
+-void deseriaizer_base<DEC, BLOCK_IDX>::read_digest0_block(decoder_type& dec,
+- bm::word_t* block)
++void deseriaizer_base<DEC, BLOCK_IDX>::read_digest0_block(
++ decoder_type& dec,
++ bm::word_t* block) BMNOEXCEPT
+ {
+ bm::id64_t d0 = dec.get_64();
+ while (d0)
+@@ -2966,8 +2982,9 @@
+ }
+
+ template<typename DEC, typename BLOCK_IDX>
+-void deseriaizer_base<DEC, BLOCK_IDX>::read_0runs_block(decoder_type& dec,
+- bm::word_t* blk)
++void deseriaizer_base<DEC, BLOCK_IDX>::read_0runs_block(
++ decoder_type& dec,
++ bm::word_t* blk) BMNOEXCEPT
+ {
+ //TODO: optimization if block exists and it is OR-ed read
+ bm::bit_block_set(blk, 0);
+@@ -2995,13 +3012,13 @@
+
+
+ template<typename DEC, typename BLOCK_IDX>
+-void deseriaizer_base<DEC, BLOCK_IDX>::read_gap_block(decoder_type& decoder,
++void
++deseriaizer_base<DEC, BLOCK_IDX>::read_gap_block(decoder_type& decoder,
+ unsigned block_type,
+ bm::gap_word_t* dst_block,
+ bm::gap_word_t& gap_head)
+ {
+- typedef bit_in<DEC> bit_in_type;
+-
++// typedef bit_in<DEC> bit_in_type;
+ switch (block_type)
+ {
+ case set_block_gap:
+@@ -3028,7 +3045,7 @@
+ for (gap_word_t k = 0; k < len; ++k)
+ {
+ gap_word_t bit_idx = decoder.get_16();
+- gap_add_value(dst_block, bit_idx);
++ bm::gap_add_value(dst_block, bit_idx);
+ } // for
+ }
+ break;
+@@ -3126,7 +3143,7 @@
+ deseriaizer_base<DEC, BLOCK_IDX>::try_skip(
+ decoder_type& decoder,
+ block_idx_type nb,
+- block_idx_type expect_nb)
++ block_idx_type expect_nb) BMNOEXCEPT
+ {
+ if (skip_offset_) // skip bookmark is available
+ {
+@@ -3156,26 +3173,22 @@
+ nb_sync = decoder.get_32();
+ break;
+ case set_nb_sync_mark48:
++ nb_sync = block_idx_type(decoder.get_48());
+ #ifndef BM64ADDR
+ BM_ASSERT(0);
+- #ifndef BM_NO_STL
+- throw std::logic_error(this->err_msg());
+- #else
+- BM_THROW(BM_ERR_SERIALFORMAT);
+- #endif
++ decoder.set_pos(save_pos);
++ skip_offset_ = 0;
++ return 0; // invalid bookmark from 64-bit serialization
+ #endif
+- nb_sync = block_idx_type(decoder.get_48());
+ break;
+ case set_nb_sync_mark64:
++ nb_sync = block_idx_type(decoder.get_64());
+ #ifndef BM64ADDR
+ BM_ASSERT(0);
+- #ifndef BM_NO_STL
+- throw std::logic_error(this->err_msg());
+- #else
+- BM_THROW(BM_ERR_SERIALFORMAT);
+- #endif
++ decoder.set_pos(save_pos);
++ skip_offset_ = 0;
++ return 0; // invalid bookmark from 64-bit serialization
+ #endif
+- nb_sync = block_idx_type(decoder.get_64());
+ break;
+ default:
+ BM_ASSERT(0);
+@@ -3187,8 +3200,6 @@
+ nb_sync += nb;
+ if (nb_sync <= expect_nb) // within reach
+ {
+- //block_idx_ = nb_sync;
+- //state_ = e_blocks;
+ skip_offset_ = 0;
+ return nb_sync;
+ }
+@@ -3593,6 +3604,7 @@
+ {
+ // 64-bit vector cannot be deserialized into 32-bit
+ BM_ASSERT(sizeof(block_idx_type)==8);
++ bv_size = (block_idx_type)dec.get_64();
+ #ifndef BM64ADDR
+ #ifndef BM_NO_STL
+ throw std::logic_error(this->err_msg());
+@@ -3600,7 +3612,6 @@
+ BM_THROW(BM_ERR_SERIALFORMAT);
+ #endif
+ #endif
+- bv_size = (block_idx_type)dec.get_64();
+ }
+ else
+ bv_size = dec.get_32();
+@@ -3715,12 +3726,12 @@
+ goto process_full_blocks;
+ #else
+ BM_ASSERT(0); // 32-bit vector cannot read 64-bit
++ dec.get_64();
+ #ifndef BM_NO_STL
+ throw std::logic_error(this->err_msg());
+ #else
+ BM_THROW(BM_ERR_SERIALFORMAT);
+ #endif
+- dec.get_64();
+ #endif
+ process_full_blocks:
+ {
+@@ -3957,7 +3968,7 @@
+ template<class BV, class DEC>
+ void deserializer<BV, DEC>::xor_decode(size_type x_ref_idx, bm::id64_t x_ref_d64,
+ blocks_manager_type& bman,
+- block_idx_type nb)
++ block_idx_type nb)
+ {
+ BM_ASSERT(ref_vect_);
+
+@@ -4098,7 +4109,15 @@
+ }
+ state_ = e_blocks;
+ }
+- block_idx_arr_ = (gap_word_t*) ::malloc(sizeof(gap_word_t) * bm::gap_max_bits);
++ block_idx_arr_=(gap_word_t*)::malloc(sizeof(gap_word_t) * bm::gap_max_bits);
++ if (!block_idx_arr_)
++ {
++ #ifndef BM_NO_STL
++ throw std::bad_alloc();
++ #else
++ BM_THROW(BM_ERR_BADALLOC);
++ #endif
++ }
+ this->id_array_ = block_idx_arr_;
+ }
+
+@@ -4335,7 +4354,7 @@
+
+ template<typename DEC, typename BLOCK_IDX>
+ typename serial_stream_iterator<DEC, BLOCK_IDX>::block_idx_type
+-serial_stream_iterator<DEC, BLOCK_IDX>::skip_mono_blocks()
++serial_stream_iterator<DEC, BLOCK_IDX>::skip_mono_blocks() BMNOEXCEPT
+ {
+ BM_ASSERT(state_ == e_zero_blocks || state_ == e_one_blocks);
+ if (!mono_block_cnt_)
+@@ -4350,7 +4369,8 @@
+ }
+
+ template<typename DEC, typename BLOCK_IDX>
+-void serial_stream_iterator<DEC, BLOCK_IDX>::get_inv_arr(bm::word_t* block)
++void
++serial_stream_iterator<DEC, BLOCK_IDX>::get_inv_arr(bm::word_t* block) BMNOEXCEPT
+ {
+ gap_word_t len = decoder_.get_16();
+ if (block)
+@@ -4358,7 +4378,7 @@
+ bm::bit_block_set(block, ~0u);
+ for (unsigned k = 0; k < len; ++k)
+ {
+- gap_word_t bit_idx = decoder_.get_16();
++ bm::gap_word_t bit_idx = decoder_.get_16();
+ bm::clear_bit(block, bit_idx);
+ }
+ }
+@@ -5519,8 +5539,8 @@
+
+ template<typename DEC, typename BLOCK_IDX>
+ unsigned serial_stream_iterator<DEC, BLOCK_IDX>::get_arr_bit(
+- bm::word_t* dst_block,
+- bool clear_target)
++ bm::word_t* dst_block,
++ bool clear_target) BMNOEXCEPT
+ {
+ BM_ASSERT(this->block_type_ == set_block_arrbit ||
+ this->block_type_ == set_block_bit_1bit);
+@@ -5547,10 +5567,9 @@
+ else
+ {
+ if (this->block_type_ == set_block_bit_1bit)
+- {
+- return 1; // nothing to do: len var already consumed 16bits
+- }
+- // fwd the decocing stream
++ return 1; // nothing to do: len var already consumed 16 bits
++
++ // fwd the decode stream
+ decoder_.seek(len * 2);
+ }
+ return len;
+@@ -5557,7 +5576,7 @@
+ }
+
+ template<typename DEC, typename BLOCK_IDX>
+-unsigned serial_stream_iterator<DEC, BLOCK_IDX>::get_bit()
++unsigned serial_stream_iterator<DEC, BLOCK_IDX>::get_bit() BMNOEXCEPT
+ {
+ BM_ASSERT(this->block_type_ == set_block_bit_1bit);
+ ++(this->block_idx_);
+Index: c++/include/util/bitset/bm.h
+===================================================================
+--- a/c++/include/util/bitset/bm.h (revision 90103)
++++ b/c++/include/util/bitset/bm.h (revision 90104)
+@@ -145,12 +145,12 @@
+ class reference
+ {
+ public:
+- reference(bvector<Alloc>& bv, size_type position)
++ reference(bvector<Alloc>& bv, size_type position) BMNOEXCEPT
+ : bv_(bv),
+ position_(position)
+ {}
+
+- reference(const reference& ref)
++ reference(const reference& ref) BMNOEXCEPT
+ : bv_(ref.bv_),
+ position_(ref.position_)
+ {
+@@ -157,7 +157,7 @@
+ bv_.set(position_, ref.bv_.get_bit(position_));
+ }
+
+- operator bool() const
++ operator bool() const BMNOEXCEPT
+ {
+ return bv_.get_bit(position_);
+ }
+@@ -168,13 +168,13 @@
+ return *this;
+ }
+
+- const reference& operator=(bool value) const
++ const reference& operator=(bool value) const BMNOEXCEPT
+ {
+ bv_.set(position_, value);
+ return *this;
+ }
+
+- bool operator==(const reference& ref) const
++ bool operator==(const reference& ref) const BMNOEXCEPT
+ {
+ return bool(*this) == bool(ref);
+ }
+@@ -204,13 +204,13 @@
+ }
+
+ /*! Logical Not operator */
+- bool operator!() const
++ bool operator!() const BMNOEXCEPT
+ {
+ return !bv_.get_bit(position_);
+ }
+
+ /*! Bit Not operator */
+- bool operator~() const
++ bool operator~() const BMNOEXCEPT
+ {
+ return !bv_.get_bit(position_);
+ }
+@@ -237,34 +237,37 @@
+ {
+ friend class bvector;
+ public:
+- iterator_base() : bv_(0), position_(bm::id_max), block_(0) {}
++ iterator_base() BMNOEXCEPT
++ : bv_(0), position_(bm::id_max), block_(0), block_type_(0),
++ block_idx_(0)
++ {}
+
+- bool operator==(const iterator_base& it) const
++ bool operator==(const iterator_base& it) const BMNOEXCEPT
+ {
+ return (position_ == it.position_) && (bv_ == it.bv_);
+ }
+
+- bool operator!=(const iterator_base& it) const
++ bool operator!=(const iterator_base& it) const BMNOEXCEPT
+ {
+ return ! operator==(it);
+ }
+
+- bool operator < (const iterator_base& it) const
++ bool operator < (const iterator_base& it) const BMNOEXCEPT
+ {
+ return position_ < it.position_;
+ }
+
+- bool operator <= (const iterator_base& it) const
++ bool operator <= (const iterator_base& it) const BMNOEXCEPT
+ {
+ return position_ <= it.position_;
+ }
+
+- bool operator > (const iterator_base& it) const
++ bool operator > (const iterator_base& it) const BMNOEXCEPT
+ {
+ return position_ > it.position_;
+ }
+
+- bool operator >= (const iterator_base& it) const
++ bool operator >= (const iterator_base& it) const BMNOEXCEPT
+ {
+ return position_ >= it.position_;
+ }
+@@ -274,18 +277,19 @@
+ \brief Checks if iterator is still valid. Analog of != 0 comparison for pointers.
+ \returns true if iterator is valid.
+ */
+- bool valid() const { return position_ != bm::id_max; }
++ bool valid() const BMNOEXCEPT { return position_ != bm::id_max; }
+
+ /**
+ \fn bool bm::bvector::iterator_base::invalidate()
+ \brief Turns iterator into an invalid state.
+ */
+- void invalidate() { position_ = bm::id_max; }
++ void invalidate() BMNOEXCEPT
++ { position_ = bm::id_max; block_type_ = ~0u;}
+
+ /** \brief Compare FSMs for testing purposes
+ \internal
+ */
+- bool compare_state(const iterator_base& ib) const
++ bool compare_state(const iterator_base& ib) const BMNOEXCEPT
+ {
+ if (this->bv_ != ib.bv_) return false;
+ if (this->position_ != ib.position_) return false;
+@@ -317,7 +321,9 @@
+
+ public:
+
+- /** Information about current bitblock. */
++ /** Bit-block descriptor
++ @internal
++ */
+ struct bitblock_descr
+ {
+ const bm::word_t* ptr; //!< Word pointer.
+@@ -327,7 +333,9 @@
+ size_type pos; //!< Last bit position decode before
+ };
+
+- /** Information about current DGAP block. */
++ /** Information about current DGAP block.
++ @internal
++ */
+ struct dgap_descr
+ {
+ const gap_word_t* ptr; //!< Word pointer.
+@@ -379,9 +387,9 @@
+ typedef void pointer;
+ typedef void reference;
+
+- insert_iterator() : bvect_(0), max_bit_(0) {}
++ insert_iterator() BMNOEXCEPT : bvect_(0), max_bit_(0) {}
+
+- insert_iterator(bvector<Alloc>& bvect)
++ insert_iterator(bvector<Alloc>& bvect) BMNOEXCEPT
+ : bvect_(&bvect),
+ max_bit_(bvect.size())
+ {
+@@ -463,7 +471,7 @@
+ typedef void pointer;
+ typedef void reference;
+
+- bulk_insert_iterator()
++ bulk_insert_iterator() BMNOEXCEPT
+ : bvect_(0), buf_(0), buf_size_(0), sorted_(BM_UNKNOWN) {}
+
+ ~bulk_insert_iterator()
+@@ -473,7 +481,8 @@
+ bvect_->blockman_.get_allocator().free_bit_block((bm::word_t*)buf_);
+ }
+
+- bulk_insert_iterator(bvector<Alloc>& bvect, bm::sort_order so = BM_UNKNOWN)
++ bulk_insert_iterator(bvector<Alloc>& bvect,
++ bm::sort_order so = BM_UNKNOWN) BMNOEXCEPT
+ : bvect_(&bvect), sorted_(so)
+ {
+ bvect_->init();
+@@ -499,7 +508,7 @@
+ sorted_ = BM_UNKNOWN;
+ }
+
+- bulk_insert_iterator(bulk_insert_iterator&& iit) BMNOEXEPT
++ bulk_insert_iterator(bulk_insert_iterator&& iit) BMNOEXCEPT
+ : bvect_(iit.bvect_)
+ {
+ buf_ = iit.buf_; iit.buf_ = 0;
+@@ -518,7 +527,7 @@
+ return *this;
+ }
+
+- bulk_insert_iterator& operator=(bulk_insert_iterator&& ii) BMNOEXEPT
++ bulk_insert_iterator& operator=(bulk_insert_iterator&& ii) BMNOEXCEPT
+ {
+ bvect_ = ii.bvect_;
+ if (buf_)
+@@ -562,11 +571,11 @@
+ bvect_->sync_size();
+ }
+
+- bvector_type* get_bvector() const { return bvect_; }
++ bvector_type* get_bvector() const BMNOEXCEPT { return bvect_; }
+
+ protected:
+ static
+- size_type buf_size_max()
++ size_type buf_size_max() BMNOEXCEPT
+ {
+ #ifdef BM64ADDR
+ return bm::set_block_size / 2;
+@@ -599,7 +608,7 @@
+ typedef unsigned& reference;
+
+ public:
+- enumerator() : iterator_base()
++ enumerator() BMNOEXCEPT : iterator_base()
+ {}
+
+ /*! @brief Construct enumerator associated with a vector.
+@@ -606,7 +615,7 @@
+ This construction creates unpositioned iterator with status
+ valid() == false. It can be re-positioned using go_first() or go_to()
+ */
+- enumerator(const bvector<Alloc>* bv)
++ enumerator(const bvector<Alloc>* bv) BMNOEXCEPT
+ : iterator_base()
+ {
+ this->bv_ = const_cast<bvector<Alloc>*>(bv);
+@@ -613,12 +622,26 @@
+ }
+
+ /*! @brief Construct enumerator for bit vector
++ @param bv bit-vector reference
++ @param pos bit position in the vector
++ if position is 0, it finds the next 1 or becomes not valid
++ (en.valid() == false)
++ */
++ enumerator(const bvector<Alloc>& bv, size_type pos = 0) BMNOEXCEPT
++ : iterator_base()
++ {
++ this->bv_ = const_cast<bvector<Alloc>*>(&bv);
++ go_to(pos);
++ }
++
++
++ /*! @brief Construct enumerator for bit vector
+ @param bv bit-vector pointer
+ @param pos bit position in the vector
+ if position is 0, it finds the next 1 or becomes not valid
+ (en.valid() == false)
+ */
+- enumerator(const bvector<Alloc>* bv, size_type pos)
++ enumerator(const bvector<Alloc>* bv, size_type pos) BMNOEXCEPT
+ : iterator_base()
+ {
+ this->bv_ = const_cast<bvector<Alloc>*>(bv);
+@@ -626,18 +649,18 @@
+ }
+
+ /*! \brief Get current position (value) */
+- size_type operator*() const { return this->position_; }
++ size_type operator*() const BMNOEXCEPT { return this->position_; }
+
+ /*! \brief Get current position (value) */
+- size_type value() const { return this->position_; }
++ size_type value() const BMNOEXCEPT { return this->position_; }
+
+ /*! \brief Advance enumerator forward to the next available bit */
+- enumerator& operator++() { return this->go_up(); }
++ enumerator& operator++() BMNOEXCEPT { this->go_up(); return *this; }
+
+ /*! \brief Advance enumerator forward to the next available bit.
+ Possibly do NOT use this operator it is slower than the pre-fix increment.
+ */
+- enumerator operator++(int)
++ enumerator operator++(int) BMNOEXCEPT
+ {
+ enumerator tmp = *this;
+ this->go_up();
+@@ -644,158 +667,28 @@
+ return tmp;
+ }
+
+-
+ /*! \brief Position enumerator to the first available bit */
+- void go_first()
+- {
+- BM_ASSERT(this->bv_);
+-
+- blocks_manager_type* bman = &(this->bv_->blockman_);
+- if (!bman->is_init())
+- {
+- this->invalidate();
+- return;
+- }
+-
+- bm::word_t*** blk_root = bman->top_blocks_root();
++ void go_first() BMNOEXCEPT;
+
+- this->block_idx_ = this->position_= 0;
+- unsigned i, j;
++ /*! advance iterator forward by one
++ @return true if advance was successfull and the enumerator is valid
++ */
++ bool advance() BMNOEXCEPT { return this->go_up(); }
+
+- for (i = 0; i < bman->top_block_size(); ++i)
+- {
+- bm::word_t** blk_blk = blk_root[i];
+-
+- if (blk_blk == 0) // not allocated
+- {
+- this->block_idx_ += bm::set_sub_array_size;
+- this->position_ += bm::bits_in_array;
+- continue;
+- }
+-
+- if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
+- blk_blk = FULL_SUB_BLOCK_REAL_ADDR;
+-
+- for (j = 0; j < bm::set_sub_array_size; ++j,++(this->block_idx_))
+- {
+- this->block_ = blk_blk[j];
+-
+- if (this->block_ == 0)
+- {
+- this->position_ += bits_in_block;
+- continue;
+- }
+-
+- if (BM_IS_GAP(this->block_))
+- {
+- this->block_type_ = 1;
+- if (search_in_gapblock())
+- {
+- return;
+- }
+- }
+- else
+- {
+- if (this->block_ == FULL_BLOCK_FAKE_ADDR)
+- this->block_ = FULL_BLOCK_REAL_ADDR;
+-
+- this->block_type_ = 0;
+- if (search_in_bitblock())
+- {
+- return;
+- }
+- }
+-
+- } // for j
+-
+- } // for i
+-
+- this->invalidate();
+- }
+-
+- /// advance iterator forward by one
+- void advance() { this->go_up(); }
+-
+-
+ /*! \brief Advance enumerator to the next available bit */
+- enumerator& go_up()
+- {
+- BM_ASSERT(this->valid());
+- BM_ASSERT_THROW(this->valid(), BM_ERR_RANGE);
++ bool go_up() BMNOEXCEPT;
+
+- // Current block search.
+- //
+-
+- block_descr_type* bdescr = &(this->bdescr_);
+- switch (this->block_type_)
+- {
+- case 0: // BitBlock
+- {
+- // check if we can get the value from the bits traversal cache
+- unsigned short idx = ++(bdescr->bit_.idx);
+- if (idx < bdescr->bit_.cnt)
+- {
+- this->position_ = bdescr->bit_.pos + bdescr->bit_.bits[idx];
+- return *this;
+- }
+- this->position_ +=
+- (bm::set_bitscan_wave_size * 32) - bdescr->bit_.bits[--idx];
+-
+- bdescr->bit_.ptr += bm::set_bitscan_wave_size;
+- if (decode_bit_group(bdescr))
+- {
+- return *this;
+- }
+- }
+- break;
+- case 1: // DGAP Block
+- {
+- ++this->position_;
+- if (--(bdescr->gap_.gap_len))
+- {
+- return *this;
+- }
+-
+- // next gap is "OFF" by definition.
+- if (*(bdescr->gap_.ptr) == bm::gap_max_bits - 1)
+- {
+- break;
+- }
+- gap_word_t prev = *(bdescr->gap_.ptr);
+- unsigned int val = *(++(bdescr->gap_.ptr));
+-
+- this->position_ += val - prev;
+- // next gap is now "ON"
+- if (*(bdescr->gap_.ptr) == bm::gap_max_bits - 1)
+- {
+- break;
+- }
+- prev = *(bdescr->gap_.ptr);
+- val = *(++(bdescr->gap_.ptr));
+- bdescr->gap_.gap_len = (gap_word_t)(val - prev);
+- return *this; // next "ON" found;
+- }
+- default:
+- BM_ASSERT(0);
+-
+- } // switch
+-
+- if (search_in_blocks())
+- return *this;
+-
+- this->invalidate();
+- return *this;
+- }
+-
+ /*!
+ @brief Skip to specified relative rank
+- @param rank - number of ON bits to go for
++ @param rank - number of ON bits to go for (must be: > 0)
++ @return true if skip was successfull and enumerator is valid
+ */
+- enumerator& skip_to_rank(size_type rank)
++ bool skip_to_rank(size_type rank) BMNOEXCEPT
+ {
++ BM_ASSERT(rank);
+ --rank;
+ if (!rank)
+- return *this;
++ return this->valid();
+ return skip(rank);
+ }
+
+@@ -802,329 +695,26 @@
+ /*!
+ @brief Skip specified number of bits from enumeration
+ @param rank - number of ON bits to skip
++ @return true if skip was successfull and enumerator is valid
+ */
+- enumerator& skip(size_type rank)
+- {
+- if (!this->valid() || !rank)
+- return *this;
+- for (; rank; --rank)
+- {
+- block_descr_type* bdescr = &(this->bdescr_);
+- switch (this->block_type_)
+- {
+- case 0: // BitBlock
+- for (; rank; --rank)
+- {
+- unsigned short idx = ++(bdescr->bit_.idx);
+- if (idx < bdescr->bit_.cnt)
+- {
+- this->position_ = bdescr->bit_.pos + bdescr->bit_.bits[idx];
+- continue;
+- }
+- this->position_ +=
+- (bm::set_bitscan_wave_size * 32) - bdescr->bit_.bits[--idx];
+- bdescr->bit_.ptr += bm::set_bitscan_wave_size;
+-
+- if (!decode_bit_group(bdescr, rank))
+- break;
+- } // for rank
+- break;
+- case 1: // DGAP Block
+- for (; rank; --rank) // TODO: better skip logic
+- {
+- ++this->position_;
+- if (--(bdescr->gap_.gap_len))
+- {
+- continue;
+- }
++ bool skip(size_type rank) BMNOEXCEPT;
+
+- // next gap is "OFF" by definition.
+- if (*(bdescr->gap_.ptr) == bm::gap_max_bits - 1)
+- {
+- break;
+- }
+- gap_word_t prev = *(bdescr->gap_.ptr);
+- unsigned int val = *(++(bdescr->gap_.ptr));
+-
+- this->position_ += val - prev;
+- // next gap is now "ON"
+- if (*(bdescr->gap_.ptr) == bm::gap_max_bits - 1)
+- {
+- break;
+- }
+- prev = *(bdescr->gap_.ptr);
+- val = *(++(bdescr->gap_.ptr));
+- bdescr->gap_.gap_len = (gap_word_t)(val - prev);
+- } // for rank
+- break;
+- default:
+- BM_ASSERT(0);
+- } // switch
+-
+- if (!rank)
+- return *this;
+-
+- if (!search_in_blocks())
+- {
+- this->invalidate();
+- return *this;
+- }
+- } // for rank
+- return *this;
+- }
+-
+ /*!
+ @brief go to a specific position in the bit-vector (or next)
+ */
+- enumerator& go_to(size_type pos)
+- {
+- if (pos == 0)
+- {
+- go_first();
+- return *this;
+- }
++ bool go_to(size_type pos) BMNOEXCEPT;
+
+- size_type new_pos = this->bv_->check_or_next(pos); // find the true pos
+- if (new_pos == 0) // no bits available
+- {
+- this->invalidate();
+- return *this;
+- }
+- BM_ASSERT(new_pos >= pos);
+- pos = new_pos;
+-
+-
+- this->position_ = pos;
+- size_type nb = this->block_idx_ = (pos >> bm::set_block_shift);
+- bm::bvector<Alloc>::blocks_manager_type& bman =
+- this->bv_->get_blocks_manager();
+- unsigned i0, j0;
+- bm::get_block_coord(nb, i0, j0);
+- this->block_ = bman.get_block(i0, j0);
+-
+- BM_ASSERT(this->block_);
+-
+- this->block_type_ = (bool)BM_IS_GAP(this->block_);
+-
+- block_descr_type* bdescr = &(this->bdescr_);
+- unsigned nbit = unsigned(pos & bm::set_block_mask);
+-
+- if (this->block_type_) // gap
+- {
+- this->position_ = nb * bm::set_block_size * 32;
+- search_in_gapblock();
+-
+- if (this->position_ == pos)
+- return *this;
+- this->position_ = pos;
+-
+- gap_word_t* gptr = BMGAP_PTR(this->block_);
+- unsigned is_set;
+- unsigned gpos = bm::gap_bfind(gptr, nbit, &is_set);
+- BM_ASSERT(is_set);
+-
+- bdescr->gap_.ptr = gptr + gpos;
+- if (gpos == 1)
+- {
+- bdescr->gap_.gap_len = bm::gap_word_t(gptr[gpos] - (nbit - 1));
+- }
+- else
+- {
+- bm::gap_word_t interval = bm::gap_word_t(gptr[gpos] - gptr[gpos - 1]);
+- bm::gap_word_t interval2 = bm::gap_word_t(nbit - gptr[gpos - 1]);
+- bdescr->gap_.gap_len = bm::gap_word_t(interval - interval2 + 1);
+- }
+- }
+- else // bit
+- {
+- if (nbit == 0)
+- {
+- search_in_bitblock();
+- return *this;
+- }
+-
+- unsigned nword = unsigned(nbit >> bm::set_word_shift);
+-
+- // check if we need to step back to match the wave
+- unsigned parity = nword % bm::set_bitscan_wave_size;
+- bdescr->bit_.ptr = this->block_ + (nword - parity);
+- bdescr->bit_.cnt = bm::bitscan_wave(bdescr->bit_.ptr, bdescr->bit_.bits);
+- BM_ASSERT(bdescr->bit_.cnt);
+- bdescr->bit_.pos = (nb * bm::set_block_size * 32) + ((nword - parity) * 32);
+- bdescr->bit_.idx = 0;
+- nbit &= bm::set_word_mask;
+- nbit += 32 * parity;
+- for (unsigned i = 0; i < bdescr->bit_.cnt; ++i)
+- {
+- if (bdescr->bit_.bits[i] == nbit)
+- return *this;
+- bdescr->bit_.idx++;
+- } // for
+- BM_ASSERT(0);
+- }
+- return *this;
+- }
+-
+-
+ private:
+ typedef typename iterator_base::block_descr block_descr_type;
+
+- bool decode_wave(block_descr_type* bdescr)
+- {
+- bdescr->bit_.cnt = bm::bitscan_wave(bdescr->bit_.ptr, bdescr->bit_.bits);
+- if (bdescr->bit_.cnt) // found
+- {
+- bdescr->bit_.idx ^= bdescr->bit_.idx; // = 0;
+- bdescr->bit_.pos = this->position_;
+- this->position_ += bdescr->bit_.bits[0];
+- return true;
+- }
+- return false;
+- }
+-
+- bool decode_bit_group(block_descr_type* bdescr)
+- {
+- const word_t* block_end = this->block_ + bm::set_block_size;
+- for (; bdescr->bit_.ptr < block_end;)
+- {
+- if (decode_wave(bdescr))
+- return true;
+- this->position_ += bm::set_bitscan_wave_size * 32; // wave size
+- bdescr->bit_.ptr += bm::set_bitscan_wave_size;
+- } // for
+- return false;
+- }
+-
+- bool decode_bit_group(block_descr_type* bdescr, size_type& rank)
+- {
+- const word_t* block_end = this->block_ + bm::set_block_size;
+-
+- for (; bdescr->bit_.ptr < block_end;)
+- {
+- const bm::id64_t* w64_p = (bm::id64_t*)bdescr->bit_.ptr;
+- bm::id64_t w64 = *w64_p;
+- unsigned cnt = bm::word_bitcount64(w64);
+- if (rank > cnt)
+- {
+- rank -= cnt;
+- }
+- else
+- {
+- if (decode_wave(bdescr))
+- return true;
+- }
+- this->position_ += bm::set_bitscan_wave_size * 32; // wave size
+- bdescr->bit_.ptr += bm::set_bitscan_wave_size;
+- } // for
+- return false;
+- }
++ static bool decode_wave(block_descr_type* bdescr) BMNOEXCEPT;
++ bool decode_bit_group(block_descr_type* bdescr) BMNOEXCEPT;
++ bool decode_bit_group(block_descr_type* bdescr,
++ size_type& rank) BMNOEXCEPT;
++ bool search_in_bitblock() BMNOEXCEPT;
++ bool search_in_gapblock() BMNOEXCEPT;
++ bool search_in_blocks() BMNOEXCEPT;
+
+- bool search_in_bitblock()
+- {
+- BM_ASSERT(this->block_type_ == 0);
+-
+- block_descr_type* bdescr = &(this->bdescr_);
+- bdescr->bit_.ptr = this->block_;
+-
+- return decode_bit_group(bdescr);
+- }
+-
+- bool search_in_gapblock()
+- {
+- BM_ASSERT(this->block_type_ == 1);
+-
+- block_descr_type* bdescr = &(this->bdescr_);
+- bdescr->gap_.ptr = BMGAP_PTR(this->block_);
+- unsigned bitval = *(bdescr->gap_.ptr) & 1;
+-
+- ++(bdescr->gap_.ptr);
+-
+- for (;true;)
+- {
+- unsigned val = *(bdescr->gap_.ptr);
+- if (bitval)
+- {
+- gap_word_t* first = BMGAP_PTR(this->block_) + 1;
+- if (bdescr->gap_.ptr == first)
+- {
+- bdescr->gap_.gap_len = (gap_word_t)(val + 1);
+- }
+- else
+- {
+- bdescr->gap_.gap_len =
+- (gap_word_t)(val - *(bdescr->gap_.ptr-1));
+- }
+- return true;
+- }
+- this->position_ += val + 1;
+- if (val == bm::gap_max_bits - 1)
+- break;
+- bitval ^= 1;
+- ++(bdescr->gap_.ptr);
+- }
+- return false;
+- }
+-
+- bool search_in_blocks()
+- {
+- ++(this->block_idx_);
+- const blocks_manager_type& bman = this->bv_->blockman_;
+- block_idx_type i = this->block_idx_ >> bm::set_array_shift;
+- block_idx_type top_block_size = bman.top_block_size();
+- bm::word_t*** blk_root = bman.top_blocks_root();
+- for (; i < top_block_size; ++i)
+- {
+- bm::word_t** blk_blk = blk_root[i];
+- if (blk_blk == 0)
+- {
+- // fast scan fwd in top level
+- size_type bn = this->block_idx_ + bm::set_sub_array_size;
+- size_type pos = this->position_ + bm::bits_in_array;
+- for (++i; i < top_block_size; ++i)
+- {
+- if (blk_root[i])
+- break;
+- bn += bm::set_sub_array_size;
+- pos += bm::bits_in_array;
+- } // for i
+- this->block_idx_ = bn;
+- this->position_ = pos;
+- if ((i < top_block_size) && blk_root[i])
+- --i;
+- continue;
+- }
+- if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
+- blk_blk = FULL_SUB_BLOCK_REAL_ADDR;
+-
+- block_idx_type j = this->block_idx_ & bm::set_array_mask;
+-
+- for(; j < bm::set_sub_array_size; ++j, ++(this->block_idx_))
+- {
+- this->block_ = blk_blk[j];
+-
+- if (this->block_ == 0)
+- {
+- this->position_ += bm::bits_in_block;
+- continue;
+- }
+-
+- this->block_type_ = BM_IS_GAP(this->block_);
+- if (this->block_type_)
+- {
+- if (search_in_gapblock())
+- return true;
+- }
+- else
+- {
+- if (this->block_ == FULL_BLOCK_FAKE_ADDR)
+- this->block_ = FULL_BLOCK_REAL_ADDR;
+- if (search_in_bitblock())
+- return true;
+- }
+- } // for j
+- } // for i
+- return false;
+- }
+ };
+
+ /*!
+@@ -1142,15 +732,14 @@
+ #ifndef BM_NO_STL
+ typedef std::input_iterator_tag iterator_category;
+ #endif
+- counted_enumerator() : bit_count_(0){}
++ counted_enumerator() BMNOEXCEPT : bit_count_(0){}
+
+- counted_enumerator(const enumerator& en) : enumerator(en)
++ counted_enumerator(const enumerator& en) BMNOEXCEPT : enumerator(en)
+ {
+- if (this->valid())
+- bit_count_ = 1;
++ bit_count_ = this->valid(); // 0 || 1
+ }
+
+- counted_enumerator& operator=(const enumerator& en)
++ counted_enumerator& operator=(const enumerator& en) BMNOEXCEPT
+ {
+ enumerator* me = this;
+ *me = en;
+@@ -1159,11 +748,10 @@
+ return *this;
+ }
+
+- counted_enumerator& operator++()
++ counted_enumerator& operator++() BMNOEXCEPT
+ {
+ this->go_up();
+- if (this->valid())
+- ++(this->bit_count_);
++ this->bit_count_ += this->valid();
+ return *this;
+ }
+
+@@ -1171,8 +759,7 @@
+ {
+ counted_enumerator tmp(*this);
+ this->go_up();
+- if (this->valid())
+- ++bit_count_;
++ this->bit_count_ += this->valid();
+ return tmp;
+ }
+
+@@ -1181,7 +768,7 @@
+ Method returns number of ON bits fromn the bit 0 to the current bit
+ For the first bit in bitvector it is 1, for the second 2
+ */
+- size_type count() const { return bit_count_; }
++ size_type count() const BMNOEXCEPT { return bit_count_; }
+ private:
+ /*! Function closed for usage */
+ counted_enumerator& go_to(size_type pos);
+@@ -1198,10 +785,10 @@
+ class mem_pool_guard
+ {
+ public:
+- mem_pool_guard() : bv_(0)
++ mem_pool_guard() BMNOEXCEPT : bv_(0)
+ {}
+
+- mem_pool_guard(allocator_pool_type& pool, bvector<Alloc>& bv)
++ mem_pool_guard(allocator_pool_type& pool, bvector<Alloc>& bv) BMNOEXCEPT
+ : bv_(&bv)
+ {
+ bv.set_allocator_pool(&pool);
+@@ -1213,13 +800,14 @@
+ }
+
+ /// check if vector has no assigned allocator and set one
+- void assign_if_not_set(allocator_pool_type& pool, bvector<Alloc>& bv)
++ void assign_if_not_set(allocator_pool_type& pool,
++ bvector<Alloc>& bv) BMNOEXCEPT
+ {
+- if (bv.get_allocator_pool() == 0) // alloc pool not set yet
++ if (!bv.get_allocator_pool()) // alloc pool not set yet
+ {
+ BM_ASSERT(!bv_);
+ bv_ = &bv;
+- bv.set_allocator_pool(&pool);
++ bv_->set_allocator_pool(&pool);
+ }
+ }
+
+@@ -1248,7 +836,7 @@
+ const gap_word_t* glevel_len;
+
+ allocation_policy(bm::strategy s=BM_BIT,
+- const gap_word_t* glevels = bm::gap_len_table<true>::_len)
++ const gap_word_t* glevels = bm::gap_len_table<true>::_len) BMNOEXCEPT
+ : strat(s), glevel_len(glevels)
+ {}
+ };
+@@ -1329,7 +917,7 @@
+ }
+
+
+- ~bvector() BMNOEXEPT {}
++ ~bvector() BMNOEXCEPT {}
+ /*!
+ \brief Explicit post-construction initialization
+ */
+@@ -1353,7 +941,7 @@
+ /*!
+ \brief Move constructor
+ */
+- bvector(bvector<Alloc>&& bvect) BMNOEXEPT
++ bvector(bvector<Alloc>&& bvect) BMNOEXCEPT
+ {
+ blockman_.move_from(bvect.blockman_);
+ size_ = bvect.size_;
+@@ -1380,7 +968,7 @@
+ /*!
+ \brief Move assignment operator
+ */
+- bvector& operator=(bvector<Alloc>&& bvect) BMNOEXEPT
++ bvector& operator=(bvector<Alloc>&& bvect) BMNOEXCEPT
+ {
+ this->move_from(bvect);
+ return *this;
+@@ -1389,11 +977,11 @@
+ /*!
+ \brief Move bvector content from another bvector
+ */
+- void move_from(bvector<Alloc>& bvect) BMNOEXEPT;
++ void move_from(bvector<Alloc>& bvect) BMNOEXCEPT;
+
+ /*! \brief Exchanges content of bv and this bvector.
+ */
+- void swap(bvector<Alloc>& bvect) BMNOEXEPT;
++ void swap(bvector<Alloc>& bvect) BMNOEXCEPT;
+
+ /*! \brief Merge/move content from another vector
+
+@@ -1419,7 +1007,7 @@
+ return reference(*this, n);
+ }
+
+- bool operator[](size_type n) const
++ bool operator[](size_type n) const BMNOEXCEPT
+ {
+ BM_ASSERT(n < size_);
+ return get_bit(n);
+@@ -1434,25 +1022,23 @@
+ bool operator <= (const bvector<Alloc>& bv) const { return compare(bv)<=0; }
+ bool operator > (const bvector<Alloc>& bv) const { return compare(bv)>0; }
+ bool operator >= (const bvector<Alloc>& bv) const { return compare(bv) >= 0; }
+- bool operator == (const bvector<Alloc>& bv) const { return equal(bv); }
+- bool operator != (const bvector<Alloc>& bv) const { return !equal(bv); }
++ bool operator == (const bvector<Alloc>& bv) const BMNOEXCEPT { return equal(bv); }
++ bool operator != (const bvector<Alloc>& bv) const BMNOEXCEPT { return !equal(bv); }
+
+ bvector<Alloc> operator~() const { return bvector<Alloc>(*this).invert(); }
+
+ Alloc get_allocator() const
+- {
+- return blockman_.get_allocator();
+- }
++ { return blockman_.get_allocator(); }
+
+- /// Set allocator pool for local (non-threaded)
++ /// Set allocator pool for local (non-th readed)
+ /// memory cyclic(lots of alloc-free ops) opertations
+ ///
+- void set_allocator_pool(allocator_pool_type* pool_ptr)
++ void set_allocator_pool(allocator_pool_type* pool_ptr) BMNOEXCEPT
+ { blockman_.get_allocator().set_pool(pool_ptr); }
+
+ /// Get curent allocator pool (if set)
+ /// @return pointer to the current pool or NULL
+- allocator_pool_type* get_allocator_pool()
++ allocator_pool_type* get_allocator_pool() BMNOEXCEPT
+ { return blockman_.get_allocator().get_pool(); }
+
+ // --------------------------------------------------------------------
+@@ -1567,6 +1153,10 @@
+ */
+ void set_bit_no_check(size_type n);
+
++ /**
++ \brief Set specified bit without checking preconditions (size, etc)
++ */
++ bool set_bit_no_check(size_type n, bool val);
+
+ /*!
+ \brief Sets all bits in the specified closed interval [left,right]
+@@ -1596,9 +1186,7 @@
+ @sa set_range
+ */
+ void clear_range(size_type left, size_type right)
+- {
+- set_range(left, right, false);
+- }
++ { set_range(left, right, false); }
+
+
+ /*!
+@@ -1642,20 +1230,13 @@
+ \param free_mem if "true" (default) bvector frees the memory,
+ otherwise sets blocks to 0.
+ */
+- void clear(bool free_mem = false)
+- {
+- blockman_.set_all_zero(free_mem);
+- }
++ void clear(bool free_mem = false) { blockman_.set_all_zero(free_mem); }
+
+ /*!
+ \brief Clears every bit in the bitvector.
+ \return *this;
+ */
+- bvector<Alloc>& reset()
+- {
+- clear(true);
+- return *this;
+- }
++ bvector<Alloc>& reset() { clear(true); return *this; }
+
+ /*!
+ \brief Flips bit n
+@@ -1688,7 +1269,7 @@
+ //size_type capacity() const { return blockman_.capacity(); }
+
+ /*! \brief return current size of the vector (bits) */
+- size_type size() const { return size_; }
++ size_type size() const BMNOEXCEPT { return size_; }
+
+ /*!
+ \brief Change size of the bvector
+@@ -1699,15 +1280,16 @@
+ //@}
+ // --------------------------------------------------------------------
+
+- /*! @name Population counting and ranking methods
++ /*! @name Population counting, ranks, ranges and intervals
+ */
+ //@{
+
+ /*!
+ \brief population cout (count of ON bits)
+- \return Total number of bits ON.
++ \sa count_range
++ \return Total number of bits ON
+ */
+- size_type count() const;
++ size_type count() const BMNOEXCEPT;
+
+ /*! \brief Computes bitcount values for all bvector blocks
+ \param arr - pointer on array of block bit counts
+@@ -1715,8 +1297,9 @@
+ This number +1 gives you number of arr elements initialized during the
+ function call.
+ */
+- block_idx_type count_blocks(unsigned* arr) const;
+-
++ block_idx_type count_blocks(unsigned* arr) const BMNOEXCEPT;
++
++
+ /*!
+ \brief Returns count of 1 bits in the given range [left..right]
+ Uses rank-select index to accelerate the search
+@@ -1729,7 +1312,7 @@
+ */
+ size_type count_range(size_type left,
+ size_type right,
+- const rs_index_type& rs_idx) const;
++ const rs_index_type& rs_idx) const BMNOEXCEPT;
+
+ /*!
+ \brief Returns count of 1 bits in the given range [left..right]
+@@ -1739,11 +1322,33 @@
+
+ \return population count in the diapason
+ */
+- size_type count_range(size_type left,
+- size_type right) const;
++ size_type count_range(size_type left, size_type right) const BMNOEXCEPT;
+
+-
++ /*!
++ \brief Returns true if all bits in the range are 1s (saturated interval)
++ Function uses closed interval [left, right]
+
++ \param left - index of first bit start checking
++ \param right - index of last bit
++
++ \return true if all bits are 1, false otherwise
++ @sa any_range, count_range
++ */
++ bool is_all_one_range(size_type left, size_type right) const BMNOEXCEPT;
++
++ /*!
++ \brief Returns true if any bits in the range are 1s (non-empty interval)
++ Function uses closed interval [left, right]
++
++ \param left - index of first bit start checking
++ \param right - index of last bit
++
++ \return true if at least 1 bits is set
++ @sa is_all_one_range, count_range
++ */
++ bool any_range(size_type left, size_type right) const BMNOEXCEPT;
++
++
+ /*! \brief compute running total of all blocks in bit vector (rank-select index)
+ \param rs_idx - [out] pointer to index / count structure
+ \param bv_blocks - [out] list of block ids in the vector (internal, optional)
+@@ -1762,24 +1367,42 @@
+ should be prepared using build_rs_index
+ \return population count in the range [0..n]
+ \sa build_rs_index
+- \sa count_to_test, select, rank
++ \sa count_to_test, select, rank, rank_corrected
+ */
+- size_type count_to(size_type n, const rs_index_type& rs_idx) const;
++ size_type count_to(size_type n,
++ const rs_index_type& rs_idx) const BMNOEXCEPT;
+
+
+ /*!
+- \brief Returns rank of specified bit position
++ \brief Returns rank of specified bit position (same as count_to())
+
+ \param n - index of bit to rank
+ \param rs_idx - rank-select index
+ \return population count in the range [0..n]
+ \sa build_rs_index
+- \sa count_to_test, select, rank
++ \sa count_to_test, select, rank, rank_corrected
+ */
+- size_type rank(size_type n, const rs_index_type& rs_idx) const
+- { return count_to(n, rs_idx); }
++ size_type rank(size_type n,
++ const rs_index_type& rs_idx) const BMNOEXCEPT
++ { return count_to(n, rs_idx); }
+
++ /*!
++ \brief Returns rank corrceted by the requested border value (as -1)
+
++ This is rank function (bit-count) minus value of bit 'n'
++ if bit-n is true function returns rank()-1 if false returns rank()
++ faster than rank() + test().
++
++
++ \param n - index of bit to rank
++ \param rs_idx - rank-select index
++ \return population count in the range [0..n] corrected as -1 by the value of n
++ \sa build_rs_index
++ \sa count_to_test, select, rank
++ */
++ size_type rank_corrected(size_type n,
++ const rs_index_type& rs_idx) const BMNOEXCEPT;
++
+ /*!
+ \brief popcount in [0..right] range if test(right) == true
+
+@@ -1787,8 +1410,9 @@
+ plus count_to()
+
+ \param n - index of bit to test and rank
+- \param blocks_cnt - block count structure to accelerate search
+- should be prepared using running_count_blocks
++ \param rs_idx - rank-select index
++ (block count structure to accelerate search)
++ should be prepared using build_rs_index()
+
+ \return population count in the diapason or 0 if right bit test failed
+
+@@ -1795,17 +1419,19 @@
+ \sa build_rs_index
+ \sa count_to
+ */
+- size_type count_to_test(size_type n, const rs_index_type& blocks_cnt) const;
++ size_type
++ count_to_test(size_type n,
++ const rs_index_type& rs_idx) const BMNOEXCEPT;
+
+
+ /*! Recalculate bitcount (deprecated)
+ */
+- size_type recalc_count() { return count(); }
++ size_type recalc_count() BMNOEXCEPT { return count(); }
+
+ /*!
+ Disables count cache. (deprecated).
+ */
+- void forget_count() {}
++ void forget_count() BMNOEXCEPT {}
+
+ //@}
+
+@@ -1818,7 +1444,7 @@
+ \param n - Index of the bit to check.
+ \return Bit value (1 or 0)
+ */
+- bool get_bit(size_type n) const;
++ bool get_bit(size_type n) const BMNOEXCEPT;
+
+ /*!
+ \brief returns true if bit n is set and false is bit n is 0.
+@@ -1825,7 +1451,7 @@
+ \param n - Index of the bit to check.
+ \return Bit value (1 or 0)
+ */
+- bool test(size_type n) const { return get_bit(n); }
++ bool test(size_type n) const BMNOEXCEPT { return get_bit(n); }
+
+ //@}
+
+@@ -1874,12 +1500,12 @@
+ \brief Returns true if any bits in this bitset are set, and otherwise returns false.
+ \return true if any bit is set
+ */
+- bool any() const;
++ bool any() const BMNOEXCEPT;
+
+ /*!
+ \brief Returns true if no bits are set, otherwise returns false.
+ */
+- bool none() const { return !any(); }
++ bool none() const BMNOEXCEPT { return !any(); }
+
+ //@}
+ // --------------------------------------------------------------------
+@@ -1890,22 +1516,23 @@
+ /*!
+ \fn bool bvector::find(bm::id_t& pos) const
+ \brief Finds index of first 1 bit
+- \param pos - index of the found 1 bit
++ \param pos - [out] index of the found 1 bit
+ \return true if search returned result
+ \sa get_first, get_next, extract_next, find_reverse, find_first_mismatch
+ */
+- bool find(size_type& pos) const;
++ bool find(size_type& pos) const BMNOEXCEPT;
+
+ /*!
+ \fn bool bvector::find(bm::id_t from, bm::id_t& pos) const
+- \brief Finds index of 1 bit starting from position
++ \brief Find index of 1 bit starting from position
+ \param from - position to start search from
+- \param pos - index of the found 1 bit
++ \param pos - [out] index of the found 1 bit
+ \return true if search returned result
+ \sa get_first, get_next, extract_next, find_reverse, find_first_mismatch
+ */
+- bool find(size_type from, size_type& pos) const;
++ bool find(size_type from, size_type& pos) const BMNOEXCEPT;
+
++
+ /*!
+ \fn bm::id_t bvector::get_first() const
+ \brief find first 1 bit in vector.
+@@ -1915,7 +1542,7 @@
+ \return Index of the first 1 bit, may return 0
+ \sa get_next, find, extract_next, find_reverse
+ */
+- size_type get_first() const { return check_or_next(0); }
++ size_type get_first() const BMNOEXCEPT { return check_or_next(0); }
+
+ /*!
+ \fn bm::id_t bvector::get_next(bm::id_t prev) const
+@@ -1924,7 +1551,7 @@
+ \return Index of the next bit which is ON or 0 if not found.
+ \sa get_first, find, extract_next, find_reverse
+ */
+- size_type get_next(size_type prev) const
++ size_type get_next(size_type prev) const BMNOEXCEPT
+ { return (++prev == bm::id_max) ? 0 : check_or_next(prev); }
+
+ /*!
+@@ -1945,7 +1572,7 @@
+ \return true if search returned result
+ \sa get_first, get_next, extract_next, find, find_first_mismatch
+ */
+- bool find_reverse(size_type& pos) const;
++ bool find_reverse(size_type& pos) const BMNOEXCEPT;
+
+ /*!
+ \brief Finds dynamic range of bit-vector [first, last]
+@@ -1954,7 +1581,7 @@
+ \return true if search returned result
+ \sa get_first, get_next, extract_next, find, find_reverse
+ */
+- bool find_range(size_type& first, size_type& last) const;
++ bool find_range(size_type& first, size_type& last) const BMNOEXCEPT;
+
+ /*!
+ \brief Find bit-vector position for the specified rank(bitcount)
+@@ -1969,7 +1596,8 @@
+
+ \return true if requested rank was found
+ */
+- bool find_rank(size_type rank, size_type from, size_type& pos) const;
++ bool find_rank(size_type rank, size_type from,
++ size_type& pos) const BMNOEXCEPT;
+
+ /*!
+ \brief Find bit-vector position for the specified rank(bitcount)
+@@ -1989,7 +1617,7 @@
+ \return true if requested rank was found
+ */
+ bool find_rank(size_type rank, size_type from, size_type& pos,
+- const rs_index_type& rs_idx) const;
++ const rs_index_type& rs_idx) const BMNOEXCEPT;
+
+ /*!
+ \brief select bit-vector position for the specified rank(bitcount)
+@@ -2007,7 +1635,8 @@
+
+ \return true if requested rank was found
+ */
+- bool select(size_type rank, size_type& pos, const rs_index_type& rs_idx) const;
++ bool select(size_type rank, size_type& pos,
++ const rs_index_type& rs_idx) const BMNOEXCEPT;
+
+ //@}
+
+@@ -2185,7 +1814,7 @@
+
+ @sa statistics
+ */
+- void calc_stat(struct bm::bvector<Alloc>::statistics* st) const;
++ void calc_stat(struct bm::bvector<Alloc>::statistics* st) const BMNOEXCEPT;
+
+ /*!
+ \brief Sets new blocks allocation strategy.
+@@ -2200,7 +1829,8 @@
+ 1 - Blocks mutation mode (adaptive algorithm)
+ \sa set_new_blocks_strat
+ */
+- strategy get_new_blocks_strat() const { return new_blocks_strat_; }
++ strategy get_new_blocks_strat() const BMNOEXCEPT
++ { return new_blocks_strat_; }
+
+ /*!
+ \brief Optimize memory bitvector's memory allocation.
+@@ -2239,7 +1869,7 @@
+ Return true if bvector is initialized at all
+ @internal
+ */
+- bool is_init() const { return blockman_.is_init(); }
++ bool is_init() const BMNOEXCEPT { return blockman_.is_init(); }
+
+ //@}
+
+@@ -2258,13 +1888,13 @@
+ @return 0 if this == arg, -1 if this < arg, 1 if this > arg
+ @sa find_first_mismatch
+ */
+- int compare(const bvector<Alloc>& bvect) const;
++ int compare(const bvector<Alloc>& bvect) const BMNOEXCEPT;
+
+ /*!
+ \brief Equal comparison with an agr bit-vector
+ @return true if vectors are identical
+ */
+- bool equal(const bvector<Alloc>& bvect) const
++ bool equal(const bvector<Alloc>& bvect) const BMNOEXCEPT
+ {
+ size_type pos;
+ bool found = find_first_mismatch(bvect, pos);
+@@ -2285,7 +1915,7 @@
+ bool find_first_mismatch(const bvector<Alloc>& bvect,
+ size_type& pos,
+ size_type search_to = bm::id_max
+- ) const;
++ ) const BMNOEXCEPT;
+
+ //@}
+
+@@ -2305,7 +1935,8 @@
+ Use only if you are BitMagic library
+ @internal
+ */
+- const blocks_manager_type& get_blocks_manager() const { return blockman_; }
++ const blocks_manager_type& get_blocks_manager() const BMNOEXCEPT
++ { return blockman_; }
+
+ /**
+ \brief get access to memory manager (internal)
+@@ -2312,7 +1943,8 @@
+ Use only if you are BitMagic library
+ @internal
+ */
+- blocks_manager_type& get_blocks_manager() { return blockman_; }
++ blocks_manager_type& get_blocks_manager() BMNOEXCEPT
++ { return blockman_; }
+
+ //@}
+
+@@ -2338,21 +1970,22 @@
+
+ private:
+
+- size_type check_or_next(size_type prev) const;
++ size_type check_or_next(size_type prev) const BMNOEXCEPT;
+
+- /// set bit in GAP block withlength extension control
++ /// set bit in GAP block with GAP block length control
+ bool gap_block_set(bm::gap_word_t* gap_blk,
+ bool val, block_idx_type nblock, unsigned nbit);
+-
++
++ /// set bit in GAP block with GAP block length control
++ void gap_block_set_no_ret(bm::gap_word_t* gap_blk,
++ bool val, block_idx_type nblock,
++ unsigned nbit);
++
+ /// check if specified bit is 1, and set it to 0
+ /// if specified bit is 0, scan for the next 1 and returns it
+ /// if no 1 found returns 0
+ size_type check_or_next_extract(size_type prev);
+
+- /**
+- \brief Set specified bit without checking preconditions (size, etc)
+- */
+- bool set_bit_no_check(size_type n, bool val);
+
+ /**
+ \brief AND specified bit without checking preconditions (size, etc)
+@@ -2440,11 +2073,11 @@
+ size_type block_count_to(const bm::word_t* block,
+ block_idx_type nb,
+ unsigned nbit_right,
+- const rs_index_type& blocks_cnt);
++ const rs_index_type& blocks_cnt) BMNOEXCEPT;
+ /**
+ Return value of first bit in the block
+ */
+- bool test_first_block_bit(block_idx_type nb) const;
++ bool test_first_block_bit(block_idx_type nb) const BMNOEXCEPT;
+
+ private:
+ blocks_manager_type blockman_; //!< bitblocks manager
+@@ -2510,7 +2143,7 @@
+ // -----------------------------------------------------------------------
+
+ template<typename Alloc>
+-void bvector<Alloc>::move_from(bvector<Alloc>& bvect) BMNOEXEPT
++void bvector<Alloc>::move_from(bvector<Alloc>& bvect) BMNOEXCEPT
+ {
+ if (this != &bvect)
+ {
+@@ -2572,7 +2205,7 @@
+ // -----------------------------------------------------------------------
+
+ template<typename Alloc>
+-typename bvector<Alloc>::size_type bvector<Alloc>::count() const
++typename bvector<Alloc>::size_type bvector<Alloc>::count() const BMNOEXCEPT
+ {
+ if (!blockman_.is_init())
+ return 0;
+@@ -2592,6 +2225,9 @@
+ if (!found)
+ break;
+ blk_blk = blk_root[i];
++ BM_ASSERT(blk_blk);
++ if (!blk_blk)
++ break;
+ }
+ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
+ {
+@@ -2619,7 +2255,7 @@
+ // -----------------------------------------------------------------------
+
+ template<typename Alloc>
+-bool bvector<Alloc>::any() const
++bool bvector<Alloc>::any() const BMNOEXCEPT
+ {
+ word_t*** blk_root = blockman_.top_blocks_root();
+ if (!blk_root)
+@@ -2780,7 +2416,7 @@
+
+ template<typename Alloc>
+ typename bvector<Alloc>::block_idx_type
+-bvector<Alloc>::count_blocks(unsigned* arr) const
++bvector<Alloc>::count_blocks(unsigned* arr) const BMNOEXCEPT
+ {
+ bm::word_t*** blk_root = blockman_.top_blocks_root();
+ if (blk_root == 0)
+@@ -2797,7 +2433,7 @@
+ bvector<Alloc>::block_count_to(const bm::word_t* block,
+ block_idx_type nb,
+ unsigned nbit_right,
+- const rs_index_type& rs_idx)
++ const rs_index_type& rs_idx) BMNOEXCEPT
+ {
+ size_type c;
+ unsigned sub_range = rs_idx.find_sub_range(nbit_right);
+@@ -2909,7 +2545,7 @@
+ template<typename Alloc>
+ typename bvector<Alloc>::size_type
+ bvector<Alloc>::count_to(size_type right,
+- const rs_index_type& rs_idx) const
++ const rs_index_type& rs_idx) const BMNOEXCEPT
+ {
+ BM_ASSERT(right < bm::id_max);
+ if (!blockman_.is_init())
+@@ -2963,7 +2599,7 @@
+ template<typename Alloc>
+ typename bvector<Alloc>::size_type
+ bvector<Alloc>::count_to_test(size_type right,
+- const rs_index_type& blocks_cnt) const
++ const rs_index_type& rs_idx) const BMNOEXCEPT
+ {
+ BM_ASSERT(right < bm::id_max);
+ if (!blockman_.is_init())
+@@ -2972,15 +2608,13 @@
+ unsigned nblock_right = unsigned(right >> bm::set_block_shift);
+ unsigned nbit_right = unsigned(right & bm::set_block_mask);
+
+- // running count of all blocks before target
+- //
+- size_type cnt = 0;
+ unsigned i, j;
+ bm::get_block_coord(nblock_right, i, j);
+ const bm::word_t* block = blockman_.get_block_ptr(i, j);
+
++ size_type cnt = 0;
+ if (!block)
+- return 0;
++ return cnt;
+
+ bool gap = BM_IS_GAP(block);
+ if (gap)
+@@ -2989,7 +2623,7 @@
+ if (bm::gap_test_unr(gap_blk, (gap_word_t)nbit_right))
+ cnt = bm::gap_bit_count_to(gap_blk, (gap_word_t)nbit_right);
+ else
+- return 0;
++ return cnt;
+ }
+ else
+ {
+@@ -3004,14 +2638,16 @@
+ w &= (1u << (nbit_right & bm::set_word_mask));
+ if (w)
+ {
+- cnt = block_count_to(block, nblock_right, nbit_right, blocks_cnt);
++ cnt = block_count_to(block, nblock_right, nbit_right, rs_idx);
+ BM_ASSERT(cnt == bm::bit_block_calc_count_to(block, nbit_right));
+ }
+ else
+- return 0;
++ {
++ return cnt;
++ }
+ }
+ }
+- cnt += nblock_right ? blocks_cnt.rcount(nblock_right - 1) : 0;
++ cnt += nblock_right ? rs_idx.rcount(nblock_right - 1) : 0;
+ return cnt;
+ }
+
+@@ -3019,14 +2655,59 @@
+
+ template<typename Alloc>
+ typename bvector<Alloc>::size_type
+-bvector<Alloc>::count_range(size_type left, size_type right) const
++bvector<Alloc>::rank_corrected(size_type right,
++ const rs_index_type& rs_idx) const BMNOEXCEPT
+ {
++ BM_ASSERT(right < bm::id_max);
++ if (!blockman_.is_init())
++ return 0;
++
++ unsigned nblock_right = unsigned(right >> bm::set_block_shift);
++ unsigned nbit_right = unsigned(right & bm::set_block_mask);
++
++ size_type cnt = nblock_right ? rs_idx.rcount(nblock_right - 1) : 0;
++
++ unsigned i, j;
++ bm::get_block_coord(nblock_right, i, j);
++ const bm::word_t* block = blockman_.get_block_ptr(i, j);
++
++ if (!block)
++ return cnt;
++
++ bool gap = BM_IS_GAP(block);
++ if (gap)
++ {
++ cnt += bm::gap_bit_count_to(BMGAP_PTR(block), (gap_word_t)nbit_right,
++ true /* rank corrected */);
++ }
++ else
++ {
++ if (block == FULL_BLOCK_FAKE_ADDR)
++ cnt += nbit_right;
++ else
++ {
++ cnt += block_count_to(block, nblock_right, nbit_right, rs_idx);
++ unsigned w = block[nbit_right >> bm::set_word_shift] &
++ (1u << (nbit_right & bm::set_word_mask));
++ cnt -= bool(w); // rank correction
++ }
++ }
++ return cnt;
++}
++
++
++// -----------------------------------------------------------------------
++
++template<typename Alloc>
++typename bvector<Alloc>::size_type
++bvector<Alloc>::count_range(size_type left, size_type right) const BMNOEXCEPT
++{
+ BM_ASSERT(left < bm::id_max && right < bm::id_max);
+- BM_ASSERT(left <= right);
++ if (left > right)
++ bm::xor_swap(left, right);
++ if (right == bm::id_max)
++ --right;
+
+- BM_ASSERT_THROW(right < bm::id_max, BM_ERR_RANGE);
+- BM_ASSERT_THROW(left <= right, BM_ERR_RANGE);
+-
+ if (!blockman_.is_init())
+ return 0;
+
+@@ -3033,8 +2714,8 @@
+ size_type cnt = 0;
+
+ // calculate logical number of start and destination blocks
+- unsigned nblock_left = unsigned(left >> bm::set_block_shift);
+- unsigned nblock_right = unsigned(right >> bm::set_block_shift);
++ block_idx_type nblock_left = (left >> bm::set_block_shift);
++ block_idx_type nblock_right = (right >> bm::set_block_shift);
+
+ unsigned i0, j0;
+ bm::get_block_coord(nblock_left, i0, j0);
+@@ -3076,13 +2757,15 @@
+ {
+ return cnt;
+ }
+-
++
++ // process all full mid-blocks
+ {
+ func.reset();
+ word_t*** blk_root = blockman_.top_blocks_root();
+- unsigned top_blocks_size = blockman_.top_block_size();
++ block_idx_type top_blocks_size = blockman_.top_block_size();
+
+- bm::for_each_nzblock_range(blk_root, top_blocks_size, nblock_left+1, nblock_right-1, func);
++ bm::for_each_nzblock_range(blk_root, top_blocks_size,
++ nblock_left+1, nblock_right-1, func);
+ cnt += func.count();
+ }
+
+@@ -3106,19 +2789,197 @@
+ return cnt;
+ }
+
++// -----------------------------------------------------------------------
+
++template<typename Alloc>
++bool bvector<Alloc>::is_all_one_range(size_type left,
++ size_type right) const BMNOEXCEPT
++{
++ if (!blockman_.is_init())
++ return false; // nothing to do
++
++ if (right < left)
++ bm::xor_swap(left, right);
++ if (right == bm::id_max)
++ --right;
++ if (left == right)
++ return test(left);
++
++ BM_ASSERT(left < bm::id_max && right < bm::id_max);
++
++ block_idx_type nblock_left = (left >> bm::set_block_shift);
++ block_idx_type nblock_right = (right >> bm::set_block_shift);
++
++ unsigned i0, j0;
++ bm::get_block_coord(nblock_left, i0, j0);
++ const bm::word_t* block = blockman_.get_block(i0, j0);
++
++ if (nblock_left == nblock_right) // hit in the same block
++ {
++ unsigned nbit_left = unsigned(left & bm::set_block_mask);
++ unsigned nbit_right = unsigned(right & bm::set_block_mask);
++ return bm::block_is_all_one_range(block, nbit_left, nbit_right);
++ }
++
++ // process entry point block
++ {
++ unsigned nbit_left = unsigned(left & bm::set_block_mask);
++ bool all_one = bm::block_is_all_one_range(block,
++ nbit_left, (bm::gap_max_bits-1));
++ if (!all_one)
++ return all_one;
++ ++nblock_left;
++ }
++
++ // process tail block
++ {
++ bm::get_block_coord(nblock_right, i0, j0);
++ block = blockman_.get_block(i0, j0);
++ unsigned nbit_right = unsigned(right & bm::set_block_mask);
++ bool all_one = bm::block_is_all_one_range(block, 0, nbit_right);
++ if (!all_one)
++ return all_one;
++ --nblock_right;
++ }
++
++ // check all blocks in the middle
++ //
++ if (nblock_left <= nblock_right)
++ {
++ unsigned i_from, j_from, i_to, j_to;
++ bm::get_block_coord(nblock_left, i_from, j_from);
++ bm::get_block_coord(nblock_right, i_to, j_to);
++
++ bm::word_t*** blk_root = blockman_.top_blocks_root();
++
++ for (unsigned i = i_from; i <= i_to; ++i)
++ {
++ bm::word_t** blk_blk = blk_root[i];
++ if (!blk_blk)
++ return false;
++ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
++ continue;
++
++ unsigned j = (i == i_from) ? j_from : 0;
++ unsigned j_limit = (i == i_to) ? j_to+1 : bm::set_sub_array_size;
++ do
++ {
++ bool all_one = bm::check_block_one(blk_blk[j], true);
++ if (!all_one)
++ return all_one;
++ } while (++j < j_limit);
++ } // for i
++ }
++ return true;
++}
++
+ // -----------------------------------------------------------------------
+
+ template<typename Alloc>
++bool bvector<Alloc>::any_range(size_type left, size_type right) const BMNOEXCEPT
++{
++ BM_ASSERT(left < bm::id_max && right < bm::id_max);
++
++ if (!blockman_.is_init())
++ return false; // nothing to do
++
++ if (right < left)
++ bm::xor_swap(left, right);
++ if (right == bm::id_max)
++ --right;
++ if (left == right)
++ return test(left);
++
++ block_idx_type nblock_left = (left >> bm::set_block_shift);
++ block_idx_type nblock_right = (right >> bm::set_block_shift);
++
++ unsigned i0, j0;
++ bm::get_block_coord(nblock_left, i0, j0);
++ const bm::word_t* block = blockman_.get_block(i0, j0);
++
++ if (nblock_left == nblock_right) // hit in the same block
++ {
++ unsigned nbit_left = unsigned(left & bm::set_block_mask);
++ unsigned nbit_right = unsigned(right & bm::set_block_mask);
++ return bm::block_any_range(block, nbit_left, nbit_right);
++ }
++
++ // process entry point block
++ {
++ unsigned nbit_left = unsigned(left & bm::set_block_mask);
++ bool any_one = bm::block_any_range(block,
++ nbit_left, (bm::gap_max_bits-1));
++ if (any_one)
++ return any_one;
++ ++nblock_left;
++ }
++
++ // process tail block
++ {
++ bm::get_block_coord(nblock_right, i0, j0);
++ block = blockman_.get_block(i0, j0);
++ unsigned nbit_right = unsigned(right & bm::set_block_mask);
++ bool any_one = bm::block_any_range(block, 0, nbit_right);
++ if (any_one)
++ return any_one;
++ --nblock_right;
++ }
++
++ // check all blocks in the middle
++ //
++ if (nblock_left <= nblock_right)
++ {
++ unsigned i_from, j_from, i_to, j_to;
++ bm::get_block_coord(nblock_left, i_from, j_from);
++ bm::get_block_coord(nblock_right, i_to, j_to);
++
++ bm::word_t*** blk_root = blockman_.top_blocks_root();
++ {
++ block_idx_type top_size = blockman_.top_block_size();
++ if (i_from >= top_size)
++ return false;
++ if (i_to >= top_size)
++ {
++ i_to = unsigned(top_size-1);
++ j_to = bm::set_sub_array_size-1;
++ }
++ }
++
++ for (unsigned i = i_from; i <= i_to; ++i)
++ {
++ bm::word_t** blk_blk = blk_root[i];
++ if (!blk_blk)
++ continue;
++ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
++ return true;
++
++ unsigned j = (i == i_from) ? j_from : 0;
++ unsigned j_limit = (i == i_to) ? j_to+1 : bm::set_sub_array_size;
++ do
++ {
++ bool any_one = bm::block_any(blk_blk[j]);
++ if (any_one)
++ return any_one;
++ } while (++j < j_limit);
++ } // for i
++ }
++ return false;
++}
++
++// -----------------------------------------------------------------------
++
++template<typename Alloc>
+ typename bvector<Alloc>::size_type
+ bvector<Alloc>::count_range(size_type left,
+ size_type right,
+- const rs_index_type& rs_idx) const
++ const rs_index_type& rs_idx) const BMNOEXCEPT
+ {
+ BM_ASSERT(left <= right);
+
++ if (left > right)
++ bm::xor_swap(left, right);
++
+ BM_ASSERT_THROW(right < bm::id_max, BM_ERR_RANGE);
+- BM_ASSERT_THROW(left <= right, BM_ERR_RANGE);
+
+ if (left == right)
+ return this->test(left);
+@@ -3187,7 +3048,7 @@
+ // -----------------------------------------------------------------------
+
+ template<typename Alloc>
+-bool bvector<Alloc>::get_bit(size_type n) const
++bool bvector<Alloc>::get_bit(size_type n) const BMNOEXCEPT
+ {
+ BM_ASSERT(n < size_);
+ BM_ASSERT_THROW((n < size_), BM_ERR_RANGE);
+@@ -3309,7 +3170,7 @@
+ // -----------------------------------------------------------------------
+
+ template<typename Alloc>
+-int bvector<Alloc>::compare(const bvector<Alloc>& bv) const
++int bvector<Alloc>::compare(const bvector<Alloc>& bv) const BMNOEXCEPT
+ {
+ int res;
+ unsigned top_blocks = blockman_.top_block_size();
+@@ -3429,7 +3290,7 @@
+ template<typename Alloc>
+ bool bvector<Alloc>::find_first_mismatch(
+ const bvector<Alloc>& bvect, size_type& pos,
+- size_type search_to) const
++ size_type search_to) const BMNOEXCEPT
+ {
+ unsigned top_blocks = blockman_.top_block_size();
+ bm::word_t*** top_root = blockman_.top_blocks_root();
+@@ -3531,7 +3392,7 @@
+ // -----------------------------------------------------------------------
+
+ template<typename Alloc>
+-void bvector<Alloc>::swap(bvector<Alloc>& bvect) BMNOEXEPT
++void bvector<Alloc>::swap(bvector<Alloc>& bvect) BMNOEXCEPT
+ {
+ if (this != &bvect)
+ {
+@@ -3543,7 +3404,8 @@
+ // -----------------------------------------------------------------------
+
+ template<typename Alloc>
+-void bvector<Alloc>::calc_stat(struct bvector<Alloc>::statistics* st) const
++void bvector<Alloc>::calc_stat(
++ struct bvector<Alloc>::statistics* st) const BMNOEXCEPT
+ {
+ BM_ASSERT(st);
+
+@@ -3572,6 +3434,9 @@
+ if (!found)
+ break;
+ blk_blk = blk_root[i];
++ BM_ASSERT(blk_blk);
++ if (!blk_blk)
++ break;
+ }
+ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
+ continue;
+@@ -3636,8 +3501,7 @@
+
+ if (block_type) // gap block
+ {
+- bm::gap_word_t* gap_blk = BMGAP_PTR(blk);
+- gap_block_set(gap_blk, val, nblock, nbit);
++ this->gap_block_set_no_ret(BMGAP_PTR(blk), val, nblock, nbit);
+ }
+ else // bit block
+ {
+@@ -3798,7 +3662,10 @@
+ block_idx_type nblock_end = (ids[size_in-1] >> bm::set_block_shift);
+ if (nblock == nblock_end) // special case: one block import
+ {
+- import_block(ids, nblock, 0, stop);
++ if (stop == 1)
++ set_bit_no_check(ids[0]);
++ else
++ import_block(ids, nblock, 0, stop);
+ return;
+ }
+ }
+@@ -3817,7 +3684,11 @@
+ stop = bm::idx_arr_block_lookup_u32(ids, size_in, nblock, start);
+ #endif
+ BM_ASSERT(start < stop);
+- import_block(ids, nblock, start, stop);
++
++ if (stop - start == 1 && n < bm::id_max) // just one bit to set
++ set_bit_no_check(n);
++ else
++ import_block(ids, nblock, start, stop);
+ start = stop;
+ } while (start < size_in);
+ }
+@@ -3826,17 +3697,22 @@
+
+ template<class Alloc>
+ void bvector<Alloc>::import_block(const size_type* ids,
+- block_idx_type nblock,
+- size_type start, size_type stop)
++ block_idx_type nblock,
++ size_type start,
++ size_type stop)
+ {
++ BM_ASSERT(stop > start);
+ int block_type;
+ bm::word_t* blk =
+- blockman_.check_allocate_block(nblock, 1, 0, &block_type, true/*allow NULL ret*/);
++ blockman_.check_allocate_block(nblock, 1, 0, &block_type,
++ true/*allow NULL ret*/);
+ if (!IS_FULL_BLOCK(blk))
+ {
++ // TODO: add a special case when we import just a few bits per block
+ if (BM_IS_GAP(blk))
++ {
+ blk = blockman_.deoptimize_block(nblock); // TODO: try to avoid
+-
++ }
+ #ifdef BM64ADDR
+ bm::set_block_bits_u64(blk, ids, start, stop);
+ #else
+@@ -3867,40 +3743,32 @@
+ return false;
+
+ // calculate word number in block and bit
+- unsigned nbit = unsigned(n & bm::set_block_mask);
+-
++ unsigned nbit = unsigned(n & bm::set_block_mask);
+ if (block_type) // gap
+ {
+- bm::gap_word_t* gap_blk = BMGAP_PTR(blk);
+- unsigned is_set = gap_block_set(gap_blk, val, nblock, nbit);
+- return is_set;
++ return gap_block_set(BMGAP_PTR(blk), val, nblock, nbit);
+ }
+ else // bit block
+ {
+ unsigned nword = unsigned(nbit >> bm::set_word_shift);
+ nbit &= bm::set_word_mask;
+-
+ bm::word_t* word = blk + nword;
+ bm::word_t mask = (((bm::word_t)1) << nbit);
+
+ if (val)
+ {
+- if ( ((*word) & mask) == 0 )
+- {
+- *word |= mask; // set bit
+- return true;
+- }
++ val = ~(*word & mask);
++ *word |= mask; // set bit
++ return val;
+ }
+ else
+ {
+- if ((*word) & mask)
+- {
+- *word &= ~mask; // clear bit
+- return true;
+- }
++ val = ~(*word & mask);
++ *word &= ~mask; // clear bit
++ return val;
+ }
+ }
+- return false;
++ //return false;
+ }
+
+ // -----------------------------------------------------------------------
+@@ -3907,18 +3775,17 @@
+
+ template<class Alloc>
+ bool bvector<Alloc>::gap_block_set(bm::gap_word_t* gap_blk,
+- bool val, block_idx_type nblock, unsigned nbit)
++ bool val, block_idx_type nblock,
++ unsigned nbit)
+ {
+- unsigned is_set, new_block_len;
+- new_block_len =
+- bm::gap_set_value(val, gap_blk, nbit, &is_set);
+- if (is_set)
++ unsigned is_set, new_len, old_len;
++ old_len = bm::gap_length(gap_blk)-1;
++ new_len = bm::gap_set_value(val, gap_blk, nbit, &is_set);
++ if (old_len < new_len)
+ {
+ unsigned threshold = bm::gap_limit(gap_blk, blockman_.glen());
+- if (new_block_len > threshold)
+- {
++ if (new_len > threshold)
+ blockman_.extend_gap_block(nblock, gap_blk);
+- }
+ }
+ return is_set;
+ }
+@@ -3926,6 +3793,24 @@
+ // -----------------------------------------------------------------------
+
+ template<class Alloc>
++void bvector<Alloc>::gap_block_set_no_ret(bm::gap_word_t* gap_blk,
++ bool val, block_idx_type nblock, unsigned nbit)
++{
++ unsigned new_len, old_len;
++ old_len = bm::gap_length(gap_blk)-1;
++ new_len = bm::gap_set_value(val, gap_blk, nbit);
++ if (old_len < new_len)
++ {
++ unsigned threshold = bm::gap_limit(gap_blk, blockman_.glen());
++ if (new_len > threshold)
++ blockman_.extend_gap_block(nblock, gap_blk);
++ }
++}
++
++
++// -----------------------------------------------------------------------
++
++template<class Alloc>
+ bool bvector<Alloc>::inc(size_type n)
+ {
+ // calculate logical block number
+@@ -4089,11 +3974,11 @@
+ //---------------------------------------------------------------------
+
+ template<class Alloc>
+-bool bvector<Alloc>::find(size_type from, size_type& pos) const
++bool bvector<Alloc>::find(size_type from, size_type& pos) const BMNOEXCEPT
+ {
+- BM_ASSERT_THROW(from < bm::id_max, BM_ERR_RANGE);
+-
+- if (from == 0)
++ if (from == bm::id_max)
++ return false;
++ if (!from)
+ {
+ return find(pos);
+ }
+@@ -4104,7 +3989,7 @@
+ //---------------------------------------------------------------------
+
+ template<class Alloc>
+-bool bvector<Alloc>::find_reverse(size_type& pos) const
++bool bvector<Alloc>::find_reverse(size_type& pos) const BMNOEXCEPT
+ {
+ bool found;
+
+@@ -4138,7 +4023,9 @@
+ }
+ if (found)
+ {
+- block_idx_type base_idx = block_idx_type(i) * bm::set_sub_array_size * bm::gap_max_bits;
++ block_idx_type base_idx =
++ block_idx_type(i) * bm::set_sub_array_size *
++ bm::gap_max_bits;
+ base_idx += j * bm::gap_max_bits;
+ pos = base_idx + block_pos;
+ return found;
+@@ -4159,7 +4046,7 @@
+ //---------------------------------------------------------------------
+
+ template<class Alloc>
+-bool bvector<Alloc>::find(size_type& pos) const
++bool bvector<Alloc>::find(size_type& pos) const BMNOEXCEPT
+ {
+ bool found;
+
+@@ -4205,7 +4092,8 @@
+ //---------------------------------------------------------------------
+
+ template<class Alloc>
+-bool bvector<Alloc>::find_range(size_type& in_first, size_type& in_last) const
++bool bvector<Alloc>::find_range(size_type& in_first,
++ size_type& in_last) const BMNOEXCEPT
+ {
+ bool found = find(in_first);
+ if (found)
+@@ -4226,7 +4114,7 @@
+ template<class Alloc>
+ bool bvector<Alloc>::find_rank(size_type rank_in,
+ size_type from,
+- size_type& pos) const
++ size_type& pos) const BMNOEXCEPT
+ {
+ BM_ASSERT_THROW(from < bm::id_max, BM_ERR_RANGE);
+
+@@ -4280,7 +4168,7 @@
+ bool bvector<Alloc>::find_rank(size_type rank_in,
+ size_type from,
+ size_type& pos,
+- const rs_index_type& rs_idx) const
++ const rs_index_type& rs_idx) const BMNOEXCEPT
+ {
+ BM_ASSERT_THROW(from < bm::id_max, BM_ERR_RANGE);
+
+@@ -4349,7 +4237,7 @@
+
+ template<class Alloc>
+ bool bvector<Alloc>::select(size_type rank_in, size_type& pos,
+- const rs_index_type& rs_idx) const
++ const rs_index_type& rs_idx) const BMNOEXCEPT
+ {
+ bool ret = false;
+
+@@ -4385,7 +4273,7 @@
+
+ template<class Alloc>
+ typename bvector<Alloc>::size_type
+-bvector<Alloc>::check_or_next(size_type prev) const
++bvector<Alloc>::check_or_next(size_type prev) const BMNOEXCEPT
+ {
+ if (!blockman_.is_init())
+ return 0;
+@@ -4836,7 +4724,7 @@
+ //---------------------------------------------------------------------
+
+ template<class Alloc>
+-bool bvector<Alloc>::test_first_block_bit(block_idx_type nb) const
++bool bvector<Alloc>::test_first_block_bit(block_idx_type nb) const BMNOEXCEPT
+ {
+ if (nb >= bm::set_total_blocks) // last possible block
+ return false;
+@@ -6529,7 +6417,10 @@
+ BM_ASSERT(gfunc);
+ (*gfunc)(blk, BMGAP_PTR(arg_blk));
+
+- blockman_.optimize_bit_block(nb);
++ // TODO: commented out optimization, because it can be very slow
++ // need to take into account previous operation not to make
++ // fruitless attempts here
++ //blockman_.optimize_bit_block(nb);
+ return;
+ }
+
+@@ -6860,9 +6751,470 @@
+ }
+
+ //---------------------------------------------------------------------
++//
++//---------------------------------------------------------------------
+
++template<class Alloc>
++bool bvector<Alloc>::enumerator::go_up() BMNOEXCEPT
++{
++ BM_ASSERT(this->valid());
+
++ block_descr_type* bdescr = &(this->bdescr_);
++ if (this->block_type_) // GAP
++ {
++ BM_ASSERT(this->block_type_ == 1);
++ ++this->position_;
++ if (--(bdescr->gap_.gap_len))
++ return true;
++ // next gap is "OFF" by definition.
++ if (*(bdescr->gap_.ptr) != bm::gap_max_bits - 1)
++ {
++ gap_word_t prev = *(bdescr->gap_.ptr);
++ unsigned val = *(++(bdescr->gap_.ptr));
++ this->position_ += val - prev;
++ // next gap is now "ON"
++ if (*(bdescr->gap_.ptr) != bm::gap_max_bits - 1)
++ {
++ prev = *(bdescr->gap_.ptr);
++ val = *(++(bdescr->gap_.ptr));
++ bdescr->gap_.gap_len = (gap_word_t)(val - prev);
++ return true; // next "ON" found;
++ }
++ }
++ }
++ else // BIT
++ {
++ unsigned short idx = ++(bdescr->bit_.idx);
++ if (idx < bdescr->bit_.cnt)
++ {
++ this->position_ = bdescr->bit_.pos + bdescr->bit_.bits[idx];
++ return true;
++ }
++ this->position_ +=
++ (bm::set_bitscan_wave_size * 32) - bdescr->bit_.bits[--idx];
++ bdescr->bit_.ptr += bm::set_bitscan_wave_size;
++ if (decode_bit_group(bdescr))
++ return true;
++ }
+
++ if (search_in_blocks())
++ return true;
++
++ this->invalidate();
++ return false;
++}
++
++//---------------------------------------------------------------------
++
++
++template<class Alloc>
++bool bvector<Alloc>::enumerator::skip(size_type rank) BMNOEXCEPT
++{
++ if (!this->valid())
++ return false;
++ if (!rank)
++ return this->valid(); // nothing to do
++
++ for (; rank; --rank)
++ {
++ block_descr_type* bdescr = &(this->bdescr_);
++ switch (this->block_type_)
++ {
++ case 0: // BitBlock
++ for (; rank; --rank)
++ {
++ unsigned short idx = ++(bdescr->bit_.idx);
++ if (idx < bdescr->bit_.cnt)
++ {
++ this->position_ = bdescr->bit_.pos + bdescr->bit_.bits[idx];
++ continue;
++ }
++ this->position_ +=
++ (bm::set_bitscan_wave_size * 32) - bdescr->bit_.bits[--idx];
++ bdescr->bit_.ptr += bm::set_bitscan_wave_size;
++
++ if (!decode_bit_group(bdescr, rank))
++ break;
++ } // for rank
++ break;
++ case 1: // DGAP Block
++ for (; rank; --rank) // TODO: better skip logic
++ {
++ ++this->position_;
++ if (--(bdescr->gap_.gap_len))
++ continue;
++
++ // next gap is "OFF" by definition.
++ if (*(bdescr->gap_.ptr) == bm::gap_max_bits - 1)
++ break;
++ gap_word_t prev = *(bdescr->gap_.ptr);
++ unsigned int val = *(++(bdescr->gap_.ptr));
++
++ this->position_ += val - prev;
++ // next gap is now "ON"
++ if (*(bdescr->gap_.ptr) == bm::gap_max_bits - 1)
++ break;
++ prev = *(bdescr->gap_.ptr);
++ val = *(++(bdescr->gap_.ptr));
++ bdescr->gap_.gap_len = (gap_word_t)(val - prev);
++ } // for rank
++ break;
++ default:
++ BM_ASSERT(0);
++ } // switch
++
++ if (!rank)
++ return true;
++
++ if (!search_in_blocks())
++ {
++ this->invalidate();
++ return false;
++ }
++ } // for rank
++
++ return this->valid();
++}
++
++
++//---------------------------------------------------------------------
++
++
++template<class Alloc>
++bool bvector<Alloc>::enumerator::go_to(size_type pos) BMNOEXCEPT
++{
++ if (pos == 0)
++ {
++ go_first();
++ return this->valid();
++ }
++
++ size_type new_pos = this->bv_->check_or_next(pos); // find the true pos
++ if (!new_pos) // no bits available
++ {
++ this->invalidate();
++ return false;
++ }
++ BM_ASSERT(new_pos >= pos);
++ pos = new_pos;
++
++
++ this->position_ = pos;
++ size_type nb = this->block_idx_ = (pos >> bm::set_block_shift);
++ bm::bvector<Alloc>::blocks_manager_type& bman =
++ this->bv_->get_blocks_manager();
++ unsigned i0, j0;
++ bm::get_block_coord(nb, i0, j0);
++ this->block_ = bman.get_block(i0, j0);
++
++ BM_ASSERT(this->block_);
++
++ this->block_type_ = (bool)BM_IS_GAP(this->block_);
++
++ block_descr_type* bdescr = &(this->bdescr_);
++ unsigned nbit = unsigned(pos & bm::set_block_mask);
++
++ if (this->block_type_) // gap
++ {
++ this->position_ = nb * bm::set_block_size * 32;
++ search_in_gapblock();
++
++ if (this->position_ == pos)
++ return this->valid();
++ this->position_ = pos;
++
++ gap_word_t* gptr = BMGAP_PTR(this->block_);
++ unsigned is_set;
++ unsigned gpos = bm::gap_bfind(gptr, nbit, &is_set);
++ BM_ASSERT(is_set);
++
++ bdescr->gap_.ptr = gptr + gpos;
++ if (gpos == 1)
++ {
++ bdescr->gap_.gap_len = bm::gap_word_t(gptr[gpos] - (nbit - 1));
++ }
++ else
++ {
++ bm::gap_word_t interval = bm::gap_word_t(gptr[gpos] - gptr[gpos - 1]);
++ bm::gap_word_t interval2 = bm::gap_word_t(nbit - gptr[gpos - 1]);
++ bdescr->gap_.gap_len = bm::gap_word_t(interval - interval2 + 1);
++ }
++ }
++ else // bit
++ {
++ if (nbit == 0)
++ {
++ search_in_bitblock();
++ return this->valid();
++ }
++
++ unsigned nword = unsigned(nbit >> bm::set_word_shift);
++
++ // check if we need to step back to match the wave
++ unsigned parity = nword % bm::set_bitscan_wave_size;
++ bdescr->bit_.ptr = this->block_ + (nword - parity);
++ bdescr->bit_.cnt = bm::bitscan_wave(bdescr->bit_.ptr, bdescr->bit_.bits);
++ BM_ASSERT(bdescr->bit_.cnt);
++ bdescr->bit_.pos = (nb * bm::set_block_size * 32) + ((nword - parity) * 32);
++ bdescr->bit_.idx = 0;
++ nbit &= bm::set_word_mask;
++ nbit += 32 * parity;
++ for (unsigned i = 0; i < bdescr->bit_.cnt; ++i)
++ {
++ if (bdescr->bit_.bits[i] == nbit)
++ return this->valid();
++ bdescr->bit_.idx++;
++ } // for
++ BM_ASSERT(0);
++ }
++ return this->valid();
++}
++
++//---------------------------------------------------------------------
++
++template<class Alloc>
++void bvector<Alloc>::enumerator::go_first() BMNOEXCEPT
++{
++ BM_ASSERT(this->bv_);
++
++ blocks_manager_type* bman = &(this->bv_->blockman_);
++ if (!bman->is_init())
++ {
++ this->invalidate();
++ return;
++ }
++
++ bm::word_t*** blk_root = bman->top_blocks_root();
++ this->block_idx_ = this->position_= 0;
++ unsigned i, j;
++
++ for (i = 0; i < bman->top_block_size(); ++i)
++ {
++ bm::word_t** blk_blk = blk_root[i];
++ if (blk_blk == 0) // not allocated
++ {
++ this->block_idx_ += bm::set_sub_array_size;
++ this->position_ += bm::bits_in_array;
++ continue;
++ }
++
++ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
++ blk_blk = FULL_SUB_BLOCK_REAL_ADDR;
++
++ for (j = 0; j < bm::set_sub_array_size; ++j,++(this->block_idx_))
++ {
++ this->block_ = blk_blk[j];
++ if (this->block_ == 0)
++ {
++ this->position_ += bits_in_block;
++ continue;
++ }
++ if (BM_IS_GAP(this->block_))
++ {
++ this->block_type_ = 1;
++ if (search_in_gapblock())
++ return;
++ }
++ else
++ {
++ if (this->block_ == FULL_BLOCK_FAKE_ADDR)
++ this->block_ = FULL_BLOCK_REAL_ADDR;
++ this->block_type_ = 0;
++ if (search_in_bitblock())
++ return;
++ }
++ } // for j
++ } // for i
++
++ this->invalidate();
++}
++
++//---------------------------------------------------------------------
++
++template<class Alloc>
++bool
++bvector<Alloc>::enumerator::decode_wave(block_descr_type* bdescr) BMNOEXCEPT
++{
++ bdescr->bit_.cnt = bm::bitscan_wave(bdescr->bit_.ptr, bdescr->bit_.bits);
++ if (bdescr->bit_.cnt) // found
++ {
++ bdescr->bit_.idx = 0;
++ return true;
++ }
++ return false;
++}
++
++//---------------------------------------------------------------------
++
++template<class Alloc>
++bool
++bvector<Alloc>::enumerator::decode_bit_group(block_descr_type* bdescr) BMNOEXCEPT
++{
++ const word_t* block_end = this->block_ + bm::set_block_size;
++ for (; bdescr->bit_.ptr < block_end;)
++ {
++ if (decode_wave(bdescr))
++ {
++ bdescr->bit_.pos = this->position_;
++ this->position_ += bdescr->bit_.bits[0];
++ return true;
++ }
++ this->position_ += bm::set_bitscan_wave_size * 32; // wave size
++ bdescr->bit_.ptr += bm::set_bitscan_wave_size;
++ } // for
++ return false;
++}
++
++//---------------------------------------------------------------------
++
++template<class Alloc>
++bool
++bvector<Alloc>::enumerator::decode_bit_group(block_descr_type* bdescr,
++ size_type& rank) BMNOEXCEPT
++{
++ const word_t* block_end = this->block_ + bm::set_block_size;
++ for (; bdescr->bit_.ptr < block_end;)
++ {
++ const bm::id64_t* w64_p = (bm::id64_t*)bdescr->bit_.ptr;
++ BM_ASSERT(bm::set_bitscan_wave_size == 4); // TODO: better handle this
++
++ unsigned cnt = bm::word_bitcount64(w64_p[0]);
++ cnt += bm::word_bitcount64(w64_p[1]);
++ if (rank > cnt)
++ {
++ rank -= cnt;
++ }
++ else
++ {
++ if (decode_wave(bdescr))
++ {
++ bdescr->bit_.pos = this->position_;
++ this->position_ += bdescr->bit_.bits[0];
++ return true;
++ }
++ }
++ this->position_ += bm::set_bitscan_wave_size * 32; // wave size
++ bdescr->bit_.ptr += bm::set_bitscan_wave_size;
++ } // for
++ return false;
++}
++
++//---------------------------------------------------------------------
++
++template<class Alloc>
++bool bvector<Alloc>::enumerator::search_in_bitblock() BMNOEXCEPT
++{
++ BM_ASSERT(this->block_type_ == 0);
++
++ block_descr_type* bdescr = &(this->bdescr_);
++ bdescr->bit_.ptr = this->block_;
++ return decode_bit_group(bdescr);
++}
++
++//---------------------------------------------------------------------
++
++template<class Alloc>
++bool bvector<Alloc>::enumerator::search_in_gapblock() BMNOEXCEPT
++{
++ BM_ASSERT(this->block_type_ == 1);
++
++ block_descr_type* bdescr = &(this->bdescr_);
++ bdescr->gap_.ptr = BMGAP_PTR(this->block_);
++ unsigned bitval = *(bdescr->gap_.ptr) & 1;
++
++ ++(bdescr->gap_.ptr);
++
++ for (;true;)
++ {
++ unsigned val = *(bdescr->gap_.ptr);
++ if (bitval)
++ {
++ gap_word_t* first = BMGAP_PTR(this->block_) + 1;
++ if (bdescr->gap_.ptr == first)
++ {
++ bdescr->gap_.gap_len = (gap_word_t)(val + 1);
++ }
++ else
++ {
++ bdescr->gap_.gap_len =
++ (gap_word_t)(val - *(bdescr->gap_.ptr-1));
++ }
++ return true;
++ }
++ this->position_ += val + 1;
++ if (val == bm::gap_max_bits - 1)
++ break;
++ bitval ^= 1;
++ ++(bdescr->gap_.ptr);
++ }
++ return false;
++}
++
++//---------------------------------------------------------------------
++
++template<class Alloc>
++bool bvector<Alloc>::enumerator::search_in_blocks() BMNOEXCEPT
++{
++ ++(this->block_idx_);
++ const blocks_manager_type& bman = this->bv_->blockman_;
++ block_idx_type i = this->block_idx_ >> bm::set_array_shift;
++ block_idx_type top_block_size = bman.top_block_size();
++ bm::word_t*** blk_root = bman.top_blocks_root();
++ for (; i < top_block_size; ++i)
++ {
++ bm::word_t** blk_blk = blk_root[i];
++ if (blk_blk == 0)
++ {
++ // fast scan fwd in top level
++ size_type bn = this->block_idx_ + bm::set_sub_array_size;
++ size_type pos = this->position_ + bm::bits_in_array;
++ for (++i; i < top_block_size; ++i)
++ {
++ if (blk_root[i])
++ break;
++ bn += bm::set_sub_array_size;
++ pos += bm::bits_in_array;
++ } // for i
++ this->block_idx_ = bn;
++ this->position_ = pos;
++ if ((i < top_block_size) && blk_root[i])
++ --i;
++ continue;
++ }
++ if ((bm::word_t*)blk_blk == FULL_BLOCK_FAKE_ADDR)
++ blk_blk = FULL_SUB_BLOCK_REAL_ADDR;
++
++ block_idx_type j = this->block_idx_ & bm::set_array_mask;
++
++ for(; j < bm::set_sub_array_size; ++j, ++(this->block_idx_))
++ {
++ this->block_ = blk_blk[j];
++ if (this->block_ == 0)
++ {
++ this->position_ += bm::bits_in_block;
++ continue;
++ }
++ this->block_type_ = BM_IS_GAP(this->block_);
++ if (this->block_type_)
++ {
++ if (search_in_gapblock())
++ return true;
++ }
++ else
++ {
++ if (this->block_ == FULL_BLOCK_FAKE_ADDR)
++ this->block_ = FULL_BLOCK_REAL_ADDR;
++ if (search_in_bitblock())
++ return true;
++ }
++ } // for j
++ } // for i
++ return false;
++}
++
++//---------------------------------------------------------------------
++
++
+ } // namespace
+
+ #include "bmundef.h"
+Index: c++/include/util/bitset/bmgamma.h
+===================================================================
+--- a/c++/include/util/bitset/bmgamma.h (revision 90103)
++++ b/c++/include/util/bitset/bmgamma.h (revision 90104)
+@@ -42,25 +42,25 @@
+ class gamma_decoder
+ {
+ public:
+- gamma_decoder(TBitIO& bin) : bin_(bin)
++ gamma_decoder(TBitIO& bin) BMNOEXEPT : bin_(bin)
+ {}
+
+ /**
+ Start encoding sequence
+ */
+- void start()
++ void start() BMNOEXEPT
+ {}
+
+ /**
+ Stop decoding sequence
+ */
+- void stop()
++ void stop() BMNOEXEPT
+ {}
+
+ /**
+ Decode word
+ */
+- T operator()(void)
++ T operator()(void) BMNOEXEPT
+ {
+ unsigned l = bin_.eat_zero_bits();
+ bin_.get_bit(); // get border bit
+Index: c++/include/util/bitset/bmaggregator.h
+===================================================================
+--- a/c++/include/util/bitset/bmaggregator.h (revision 90103)
++++ b/c++/include/util/bitset/bmaggregator.h (revision 90104)
+@@ -89,6 +89,7 @@
+
+ public:
+
++ // -----------------------------------------------------------------------
+ /*! @name Construction and setup */
+ //@{
+ aggregator();
+@@ -105,6 +106,12 @@
+ void set_optimization(
+ typename bvector_type::optmode opt = bvector_type::opt_compress)
+ { opt_mode_ = opt; }
++
++ void set_compute_count(bool count_mode)
++ {
++ compute_count_ = count_mode; count_ = 0;
++ }
++
+ //@}
+
+
+@@ -122,12 +129,12 @@
+ @return current arg group size (0 if vector was not added (empty))
+ @sa reset
+ */
+- unsigned add(const bvector_type* bv, unsigned agr_group = 0);
++ unsigned add(const bvector_type* bv, unsigned agr_group = 0) BMNOEXCEPT;
+
+ /**
+ Reset aggregate groups, forget all attached vectors
+ */
+- void reset();
++ void reset() BMNOEXCEPT;
+
+ /**
+ Aggregate added group of vectors using logical OR
+@@ -195,7 +202,9 @@
+ Set search hint for the range, where results needs to be searched
+ (experimental for internal use).
+ */
+- void set_range_hint(size_type from, size_type to);
++ void set_range_hint(size_type from, size_type to) BMNOEXCEPT;
++
++ size_type count() const { return count_; }
+
+ //@}
+
+@@ -305,10 +314,10 @@
+ //@{
+
+ /** Get current operation code */
+- int get_operation() const { return operation_; }
++ int get_operation() const BMNOEXCEPT { return operation_; }
+
+ /** Set operation code for the aggregator */
+- void set_operation(int op_code) { operation_ = op_code; }
++ void set_operation(int op_code) BMNOEXCEPT { operation_ = op_code; }
+
+ /**
+ Prepare operation, create internal resources, analyse dependencies.
+@@ -361,19 +370,20 @@
+ bool init_clear = true);
+
+ static
+- unsigned max_top_blocks(const bvector_type_const_ptr* bv_src, unsigned src_size);
++ unsigned max_top_blocks(const bvector_type_const_ptr* bv_src,
++ unsigned src_size) BMNOEXCEPT;
+
+ bm::word_t* sort_input_blocks_or(const bvector_type_const_ptr* bv_src,
+ unsigned src_size,
+ unsigned i, unsigned j,
+ unsigned* arg_blk_count,
+- unsigned* arg_blk_gap_count);
++ unsigned* arg_blk_gap_count) BMNOEXCEPT;
+
+ bm::word_t* sort_input_blocks_and(const bvector_type_const_ptr* bv_src,
+ unsigned src_size,
+ unsigned i, unsigned j,
+ unsigned* arg_blk_count,
+- unsigned* arg_blk_gap_count);
++ unsigned* arg_blk_gap_count) BMNOEXCEPT;
+
+
+ bool process_bit_blocks_or(blocks_manager_type& bman_target,
+@@ -396,19 +406,24 @@
+ unsigned find_effective_sub_block_size(unsigned i,
+ const bvector_type_const_ptr* bv_src,
+ unsigned src_size,
+- bool top_null_as_zero);
++ bool top_null_as_zero) BMNOEXCEPT;
++
++ static
++ bool any_carry_overs(const unsigned char* carry_overs,
++ unsigned co_size) BMNOEXCEPT;
+
+- bool any_carry_overs(unsigned co_size) const;
+-
+ /**
+ @return carry over
+ */
+- bool process_shift_right_and(const bm::word_t* arg_blk,
+- digest_type& digest,
+- unsigned carry_over);
+-
++ static
++ unsigned process_shift_right_and(bm::word_t* BMRESTRICT blk,
++ const bm::word_t* BMRESTRICT arg_blk,
++ digest_type& BMRESTRICT digest,
++ unsigned carry_over) BMNOEXCEPT;
++
++ static
+ const bm::word_t* get_arg_block(const bvector_type_const_ptr* bv_src,
+- unsigned k, unsigned i, unsigned j);
++ unsigned k, unsigned i, unsigned j) BMNOEXCEPT;
+
+ bvector_type* check_create_target();
+
+@@ -418,8 +433,8 @@
+ /// @internal
+ struct arena
+ {
+- BM_DECLARE_TEMP_BLOCK(tb1);
+- BM_DECLARE_TEMP_BLOCK(tb_opt); ///< temp block for results optimization
++ BM_DECLARE_TEMP_BLOCK(tb1)
++ BM_DECLARE_TEMP_BLOCK(tb_opt) ///< temp block for results optimization
+ const bm::word_t* v_arg_or_blk[max_aggregator_cap]; ///< source blocks list (OR)
+ const bm::gap_word_t* v_arg_or_blk_gap[max_aggregator_cap]; ///< source GAP blocks list (OR)
+ const bm::word_t* v_arg_and_blk[max_aggregator_cap]; ///< source blocks list (AND)
+@@ -450,8 +465,9 @@
+ size_type range_from_ = bm::id_max; ///< search from
+ size_type range_to_ = bm::id_max; ///< search to
+
+- typename bvector_type::optmode opt_mode_;
+-
++ typename bvector_type::optmode opt_mode_; ///< perform search result optimization
++ bool compute_count_; ///< compute search result count
++ size_type count_; ///< search result count
+ };
+
+
+@@ -515,7 +531,9 @@
+
+ template<typename BV>
+ aggregator<BV>::aggregator()
+-: opt_mode_(bvector_type::opt_none)
++: opt_mode_(bvector_type::opt_none),
++ compute_count_(false),
++ count_(0)
+ {
+ ar_ = (arena*) bm::aligned_new_malloc(sizeof(arena));
+ }
+@@ -533,18 +551,19 @@
+ // ------------------------------------------------------------------------
+
+ template<typename BV>
+-void aggregator<BV>::reset()
++void aggregator<BV>::reset() BMNOEXCEPT
+ {
+ arg_group0_size = arg_group1_size = operation_ = top_block_size_ = 0;
+ operation_status_ = op_undefined;
+ range_set_ = false;
+ range_from_ = range_to_ = bm::id_max;
++ count_ = 0;
+ }
+
+ // ------------------------------------------------------------------------
+
+ template<typename BV>
+-void aggregator<BV>::set_range_hint(size_type from, size_type to)
++void aggregator<BV>::set_range_hint(size_type from, size_type to) BMNOEXCEPT
+ {
+ range_from_ = from; range_to_ = to;
+ range_set_ = true;
+@@ -553,11 +572,12 @@
+ // ------------------------------------------------------------------------
+
+ template<typename BV>
+-typename aggregator<BV>::bvector_type* aggregator<BV>::check_create_target()
++typename aggregator<BV>::bvector_type*
++aggregator<BV>::check_create_target()
+ {
+ if (!bv_target_)
+ {
+- bv_target_ = new bvector_type();
++ bv_target_ = new bvector_type(); //TODO: get rid of "new"
+ bv_target_->init();
+ }
+ return bv_target_;
+@@ -566,7 +586,8 @@
+ // ------------------------------------------------------------------------
+
+ template<typename BV>
+-unsigned aggregator<BV>::add(const bvector_type* bv, unsigned agr_group)
++unsigned aggregator<BV>::add(const bvector_type* bv,
++ unsigned agr_group) BMNOEXCEPT
+ {
+ BM_ASSERT_THROW(agr_group <= 1, BM_ERR_RANGE);
+ BM_ASSERT(agr_group <= 1);
+@@ -646,6 +667,7 @@
+ template<typename BV>
+ void aggregator<BV>::combine_shift_right_and(bvector_type& bv_target)
+ {
++ count_ = 0;
+ combine_shift_right_and(bv_target, ar_->arg_bv0, arg_group0_size, false);
+ }
+
+@@ -890,10 +912,11 @@
+
+ template<typename BV>
+ unsigned
+-aggregator<BV>::find_effective_sub_block_size(unsigned i,
+- const bvector_type_const_ptr* bv_src,
+- unsigned src_size,
+- bool top_null_as_zero)
++aggregator<BV>::find_effective_sub_block_size(
++ unsigned i,
++ const bvector_type_const_ptr* bv_src,
++ unsigned src_size,
++ bool top_null_as_zero) BMNOEXCEPT
+ {
+ // quick hack to avoid scanning large, arrays, where such scan can be quite
+ // expensive by itself (this makes this function approximate)
+@@ -924,7 +947,7 @@
+ max_size = j;
+ break;
+ }
+- }
++ } // for j
+ if (max_size == bm::set_sub_array_size - 1)
+ break;
+ } // for k
+@@ -992,8 +1015,6 @@
+ {
+ BM_ASSERT(src_size);
+
+- typename bvector_type::blocks_manager_type& bman_target = bv_target.get_blocks_manager();
+-
+ unsigned arg_blk_count = 0;
+ unsigned arg_blk_gap_count = 0;
+ bm::word_t* blk =
+@@ -1012,12 +1033,11 @@
+ if (ar_->v_arg_and_blk[0] == FULL_BLOCK_REAL_ADDR)
+ {
+ // another nothing to do: one FULL block
++ blocks_manager_type& bman_target = bv_target.get_blocks_manager();
+ bman_target.check_alloc_top_subblock(i);
+ bman_target.set_block_ptr(i, j, blk);
+ if (++j == bm::set_sub_array_size)
+- {
+ bman_target.validate_top_full(i);
+- }
+ return;
+ }
+ }
+@@ -1032,14 +1052,13 @@
+ //
+ if (arg_blk_gap_count)
+ {
+- digest =
+- process_gap_blocks_and(arg_blk_gap_count, digest);
++ digest = process_gap_blocks_and(arg_blk_gap_count, digest);
+ }
+- if (digest) // some results
++ if (digest) // we have results , allocate block and copy from temp
+ {
+- // we have some results, allocate block and copy from temp
++ blocks_manager_type& bman_target = bv_target.get_blocks_manager();
+ bman_target.opt_copy_bit_block(i, j, ar_->tb1,
+- opt_mode_, ar_->tb_opt);
++ opt_mode_, ar_->tb_opt);
+ }
+ }
+ }
+@@ -1154,7 +1173,7 @@
+ bool b = bm::gap_test_unr(ar_->v_arg_and_blk_gap[k], single_bit_idx);
+ if (!b)
+ return 0; // AND 0 causes result to turn 0
+- }
++ } // for k
+ break;
+ }
+ }
+@@ -1471,7 +1490,8 @@
+
+ template<typename BV>
+ unsigned
+-aggregator<BV>::max_top_blocks(const bvector_type_const_ptr* bv_src, unsigned src_size)
++aggregator<BV>::max_top_blocks(const bvector_type_const_ptr* bv_src,
++ unsigned src_size) BMNOEXCEPT
+ {
+ unsigned top_blocks = 1;
+
+@@ -1491,11 +1511,12 @@
+ // ------------------------------------------------------------------------
+
+ template<typename BV>
+-bm::word_t* aggregator<BV>::sort_input_blocks_or(const bvector_type_const_ptr* bv_src,
+- unsigned src_size,
+- unsigned i, unsigned j,
+- unsigned* arg_blk_count,
+- unsigned* arg_blk_gap_count)
++bm::word_t* aggregator<BV>::sort_input_blocks_or(
++ const bvector_type_const_ptr* bv_src,
++ unsigned src_size,
++ unsigned i, unsigned j,
++ unsigned* arg_blk_count,
++ unsigned* arg_blk_gap_count) BMNOEXCEPT
+ {
+ bm::word_t* blk = 0;
+ for (unsigned k = 0; k < src_size; ++k)
+@@ -1529,11 +1550,12 @@
+ // ------------------------------------------------------------------------
+
+ template<typename BV>
+-bm::word_t* aggregator<BV>::sort_input_blocks_and(const bvector_type_const_ptr* bv_src,
+- unsigned src_size,
+- unsigned i, unsigned j,
+- unsigned* arg_blk_count,
+- unsigned* arg_blk_gap_count)
++bm::word_t* aggregator<BV>::sort_input_blocks_and(
++ const bvector_type_const_ptr* bv_src,
++ unsigned src_size,
++ unsigned i, unsigned j,
++ unsigned* arg_blk_count,
++ unsigned* arg_blk_gap_count) BMNOEXCEPT
+ {
+ unsigned full_blk_cnt = 0;
+ bm::word_t* blk = FULL_BLOCK_FAKE_ADDR;
+@@ -1683,7 +1705,7 @@
+ {
+ if (i > top_block_size_)
+ {
+- if (!this->any_carry_overs(src_and_size))
++ if (!any_carry_overs(&ar_->carry_overs_[0], src_and_size))
+ break; // quit early if there is nothing to carry on
+ }
+
+@@ -1690,7 +1712,8 @@
+ unsigned j = 0;
+ do
+ {
+- bool found = combine_shift_right_and(i, j, bv_target, bv_src_and, src_and_size);
++ bool found =
++ combine_shift_right_and(i, j, bv_target, bv_src_and, src_and_size);
+ if (found && any)
+ return found;
+ } while (++j < bm::set_sub_array_size);
+@@ -1697,6 +1720,9 @@
+
+ } // for i
+
++ if (compute_count_)
++ return bool(count_);
++
+ return bv_target.any();
+ }
+
+@@ -1708,7 +1734,6 @@
+ const bvector_type_const_ptr* bv_src,
+ unsigned src_size)
+ {
+- blocks_manager_type& bman_target = bv_target.get_blocks_manager();
+ bm::word_t* blk = temp_blk_ ? temp_blk_ : ar_->tb1;
+ unsigned char* carry_overs = &(ar_->carry_overs_[0]);
+
+@@ -1748,18 +1773,33 @@
+ if (blk_zero) // delayed temp block 0-init requested
+ {
+ bm::bit_block_set(blk, 0);
+- blk_zero = false;
++ blk_zero = !blk_zero; // = false
+ }
+ const bm::word_t* arg_blk = get_arg_block(bv_src, k, i, j);
+- carry_overs[k] = process_shift_right_and(arg_blk, digest, carry_over);
++ carry_overs[k] = (unsigned char)
++ process_shift_right_and(blk, arg_blk, digest, carry_over);
++ BM_ASSERT(carry_overs[k] == 0 || carry_overs[k] == 1);
+ } // for k
+-
++
++ if (blk_zero) // delayed temp block 0-init
++ {
++ bm::bit_block_set(blk, 0);
++ }
+ // block now gets emitted into the target bit-vector
+ if (digest)
+ {
+ BM_ASSERT(!bm::bit_is_all_zero(blk));
+- bman_target.opt_copy_bit_block(i, j, blk,
+- opt_mode_, ar_->tb_opt);
++
++ if (compute_count_)
++ {
++ unsigned cnt = bm::bit_block_count(blk, digest);
++ count_ += cnt;
++ }
++ else
++ {
++ blocks_manager_type& bman_target = bv_target.get_blocks_manager();
++ bman_target.opt_copy_bit_block(i, j, blk, opt_mode_, ar_->tb_opt);
++ }
+ return true;
+ }
+ return false;
+@@ -1768,11 +1808,13 @@
+ // ------------------------------------------------------------------------
+
+ template<typename BV>
+-bool aggregator<BV>::process_shift_right_and(const bm::word_t* arg_blk,
+- digest_type& digest,
+- unsigned carry_over)
++unsigned aggregator<BV>::process_shift_right_and(
++ bm::word_t* BMRESTRICT blk,
++ const bm::word_t* BMRESTRICT arg_blk,
++ digest_type& BMRESTRICT digest,
++ unsigned carry_over) BMNOEXCEPT
+ {
+- bm::word_t* blk = temp_blk_ ? temp_blk_ : ar_->tb1;
++ BM_ASSERT(carry_over == 1 || carry_over == 0);
+
+ if (BM_IS_GAP(arg_blk)) // GAP argument
+ {
+@@ -1800,8 +1842,8 @@
+ if (digest)
+ {
+ carry_over =
+- bm::bit_block_shift_r1_and_unr(blk, carry_over, arg_blk,
+- &digest);
++ bm::bit_block_shift_r1_and_unr(blk, carry_over, arg_blk,
++ &digest);
+ }
+ else // digest == 0
+ {
+@@ -1813,13 +1855,12 @@
+ }
+ else // arg is zero - target block => zero
+ {
+- unsigned co = blk[bm::set_block_size-1] >> 31; // carry out
++ carry_over = blk[bm::set_block_size-1] >> 31; // carry out
+ if (digest)
+ {
+ bm::bit_block_set(blk, 0); // TODO: digest based set
+- digest ^= digest;
++ digest = 0;
+ }
+- carry_over = co;
+ }
+ }
+ return carry_over;
+@@ -1829,22 +1870,26 @@
+
+ template<typename BV>
+ const bm::word_t* aggregator<BV>::get_arg_block(
+- const bvector_type_const_ptr* bv_src,
+- unsigned k, unsigned i, unsigned j)
++ const bvector_type_const_ptr* bv_src,
++ unsigned k, unsigned i, unsigned j) BMNOEXCEPT
+ {
+- const blocks_manager_type& bman_arg = bv_src[k]->get_blocks_manager();
+- return bman_arg.get_block(i, j);
++ return bv_src[k]->get_blocks_manager().get_block(i, j);
+ }
+
+ // ------------------------------------------------------------------------
+
+ template<typename BV>
+-bool aggregator<BV>::any_carry_overs(unsigned co_size) const
++bool aggregator<BV>::any_carry_overs(const unsigned char* carry_overs,
++ unsigned co_size) BMNOEXCEPT
+ {
+- for (unsigned i = 0; i < co_size; ++i)
+- if (ar_->carry_overs_[i])
+- return true;
+- return false;
++ // TODO: loop unroll?
++ unsigned acc = carry_overs[0];
++ for (unsigned i = 1; i < co_size; ++i)
++ acc |= carry_overs[i];
++// if (ar_->carry_overs_[i])
++// return true;
++// return false;
++ return acc;
+ }
+
+ // ------------------------------------------------------------------------
+@@ -1888,7 +1933,7 @@
+ {
+ if (i > top_block_size_)
+ {
+- if (!this->any_carry_overs(arg_group0_size))
++ if (!this->any_carry_overs(&ar_->carry_overs_[0], arg_group0_size))
+ {
+ operation_status_ = op_done;
+ return operation_status_;
+Index: c++/include/util/bitset/bmstrsparsevec.h
+===================================================================
+--- a/c++/include/util/bitset/bmstrsparsevec.h (revision 90103)
++++ b/c++/include/util/bitset/bmstrsparsevec.h (revision 90104)
+@@ -95,19 +95,19 @@
+ {
+ public:
+ const_reference(const str_sparse_vector<CharType, BV, MAX_STR_SIZE>& str_sv,
+- size_type idx) BMNOEXEPT
++ size_type idx) BMNOEXCEPT
+ : str_sv_(str_sv), idx_(idx)
+ {}
+
+- operator const value_type*() const
++ operator const value_type*() const BMNOEXCEPT
+ {
+ str_sv_.get(idx_, buf_, MAX_STR_SIZE);
+ return &(buf_[0]);
+ }
+
+- bool operator==(const const_reference& ref) const
++ bool operator==(const const_reference& ref) const BMNOEXCEPT
+ { return bool(*this) == bool(ref); }
+- bool is_null() const { return str_sv_.is_null(idx_); }
++ bool is_null() const BMNOEXCEPT { return str_sv_.is_null(idx_); }
+ private:
+ const str_sparse_vector<CharType, BV, MAX_STR_SIZE>& str_sv_;
+ size_type idx_;
+@@ -122,11 +122,11 @@
+ {
+ public:
+ reference(str_sparse_vector<CharType, BV, MAX_STR_SIZE>& str_sv,
+- size_type idx) BMNOEXEPT
++ size_type idx) BMNOEXCEPT
+ : str_sv_(str_sv), idx_(idx)
+ {}
+
+- operator const value_type*() const
++ operator const value_type*() const BMNOEXCEPT
+ {
+ str_sv_.get(idx_, buf_, MAX_STR_SIZE);
+ return &(buf_[0]);
+@@ -144,9 +144,9 @@
+ str_sv_.set(idx_, str);
+ return *this;
+ }
+- bool operator==(const reference& ref) const
++ bool operator==(const reference& ref) const BMNOEXCEPT
+ { return bool(*this) == bool(ref); }
+- bool is_null() const { return str_sv_.is_null(idx_); }
++ bool is_null() const BMNOEXCEPT { return str_sv_.is_null(idx_); }
+ private:
+ str_sparse_vector<CharType, BV, MAX_STR_SIZE>& str_sv_;
+ size_type idx_;
+@@ -183,55 +183,56 @@
+ typedef CharType* pointer;
+ typedef CharType*& reference;
+ public:
+- const_iterator();
+- const_iterator(const str_sparse_vector_type* sv);
+- const_iterator(const str_sparse_vector_type* sv, size_type pos);
+- const_iterator(const const_iterator& it);
++ const_iterator() BMNOEXCEPT;
++ const_iterator(const str_sparse_vector_type* sv) BMNOEXCEPT;
++ const_iterator(const str_sparse_vector_type* sv, size_type pos) BMNOEXCEPT;
++ const_iterator(const const_iterator& it) BMNOEXCEPT;
+
+- bool operator==(const const_iterator& it) const
++ bool operator==(const const_iterator& it) const BMNOEXCEPT
+ { return (pos_ == it.pos_) && (sv_ == it.sv_); }
+- bool operator!=(const const_iterator& it) const
++ bool operator!=(const const_iterator& it) const BMNOEXCEPT
+ { return ! operator==(it); }
+- bool operator < (const const_iterator& it) const
++ bool operator < (const const_iterator& it) const BMNOEXCEPT
+ { return pos_ < it.pos_; }
+- bool operator <= (const const_iterator& it) const
++ bool operator <= (const const_iterator& it) const BMNOEXCEPT
+ { return pos_ <= it.pos_; }
+- bool operator > (const const_iterator& it) const
++ bool operator > (const const_iterator& it) const BMNOEXCEPT
+ { return pos_ > it.pos_; }
+- bool operator >= (const const_iterator& it) const
++ bool operator >= (const const_iterator& it) const BMNOEXCEPT
+ { return pos_ >= it.pos_; }
+
+ /// \brief Get current position (value)
+- const value_type* operator*() const { return this->value(); }
++ const value_type* operator*() const BMNOEXCEPT { return this->value(); }
+
+ /// \brief Advance to the next available value
+- const_iterator& operator++() { this->advance(); return *this; }
++ const_iterator& operator++() BMNOEXCEPT
++ { this->advance(); return *this; }
+
+ /// \brief Advance to the next available value
+- const_iterator& operator++(int)
++ const_iterator& operator++(int) BMNOEXCEPT
+ { const_iterator tmp(*this);this->advance(); return tmp; }
+
+
+ /// \brief Get current position (value)
+- const value_type* value() const;
++ const value_type* value() const BMNOEXCEPT;
+
+ /// \brief Get NULL status
+- bool is_null() const { return sv_->is_null(this->pos_); }
++ bool is_null() const BMNOEXCEPT { return sv_->is_null(this->pos_); }
+
+ /// Returns true if iterator is at a valid position
+- bool valid() const { return pos_ != bm::id_max; }
++ bool valid() const BMNOEXCEPT { return pos_ != bm::id_max; }
+
+ /// Invalidate current iterator
+- void invalidate() { pos_ = bm::id_max; }
++ void invalidate() BMNOEXCEPT { pos_ = bm::id_max; }
+
+ /// Current position (index) in the vector
+- size_type pos() const { return pos_; }
++ size_type pos() const BMNOEXCEPT { return pos_; }
+
+ /// re-position to a specified position
+- void go_to(size_type pos);
++ void go_to(size_type pos) BMNOEXCEPT;
+
+ /// advance iterator forward by one
+- void advance();
++ void advance() BMNOEXCEPT;
+
+ protected:
+ typedef bm::heap_matrix<CharType,
+@@ -279,9 +280,9 @@
+ typedef void reference;
+
+ public:
+- back_insert_iterator();
+- back_insert_iterator(str_sparse_vector_type* sv);
+- back_insert_iterator(const back_insert_iterator& bi);
++ back_insert_iterator() BMNOEXCEPT;
++ back_insert_iterator(str_sparse_vector_type* sv) BMNOEXCEPT;
++ back_insert_iterator(const back_insert_iterator& bi) BMNOEXCEPT;
+
+ back_insert_iterator& operator=(const back_insert_iterator& bi)
+ {
+@@ -321,7 +322,7 @@
+ void add_null(size_type count);
+
+ /** return true if insertion buffer is empty */
+- bool empty() const;
++ bool empty() const BMNOEXCEPT;
+
+ /** flush the accumulated buffer */
+ void flush();
+@@ -393,7 +394,7 @@
+ }
+ #ifndef BM_NO_CXX11
+ /*! move-ctor */
+- str_sparse_vector(str_sparse_vector<CharType, BV, MAX_STR_SIZE>&& str_sv) BMNOEXEPT
++ str_sparse_vector(str_sparse_vector<CharType, BV, MAX_STR_SIZE>&& str_sv) BMNOEXCEPT
+ {
+ parent_type::swap(str_sv);
+ remap_flags_ = str_sv.remap_flags_;
+@@ -403,7 +404,7 @@
+
+ /*! move assignmment operator */
+ str_sparse_vector<CharType, BV, MAX_STR_SIZE>& operator =
+- (str_sparse_vector<CharType, BV, MAX_STR_SIZE>&& str_sv) BMNOEXEPT
++ (str_sparse_vector<CharType, BV, MAX_STR_SIZE>&& str_sv) BMNOEXCEPT
+ {
+ if (this != &str_sv)
+ {
+@@ -475,7 +476,8 @@
+
+ @return string length
+ */
+- size_type get(size_type idx, value_type* str, size_type buf_size) const;
++ size_type get(size_type idx,
++ value_type* str, size_type buf_size) const BMNOEXCEPT;
+
+ /*!
+ \brief set specified element with bounds checking and automatic resize
+@@ -568,7 +570,7 @@
+ }
+
+ /*! Swap content */
+- void swap(str_sparse_vector& str_sv) BMNOEXEPT;
++ void swap(str_sparse_vector& str_sv) BMNOEXCEPT;
+
+ ///@}
+
+@@ -589,7 +591,7 @@
+
+ \return 0 - equal, < 0 - vect[i] < str, >0 otherwise
+ */
+- int compare(size_type idx, const value_type* str) const;
++ int compare(size_type idx, const value_type* str) const BMNOEXCEPT;
+
+
+ /**
+@@ -596,7 +598,7 @@
+ \brief Find size of common prefix between two vector elements in octets
+ \return size of common prefix
+ */
+- unsigned common_prefix_length(size_type idx1, size_type idx2) const;
++ unsigned common_prefix_length(size_type idx1, size_type idx2) const BMNOEXCEPT;
+
+ ///@}
+
+@@ -606,7 +608,7 @@
+ ///@{
+
+ /*! \brief resize to zero, free memory */
+- void clear() BMNOEXEPT;
++ void clear() BMNOEXCEPT;
+
+ /*!
+ \brief clear range (assign bit 0 for all plains)
+@@ -650,13 +652,11 @@
+ static size_type max_str() { return sv_octet_plains; }
+
+ /*! \brief get effective string length used in vector
+-
+- Method returns efficiency, how close are we
+- to reserved maximum.
+-
++ Calculate and returns efficiency, how close are we
++ to the reserved maximum.
+ \return current string length maximum
+ */
+- size_type effective_max_str() const;
++ size_type effective_max_str() const BMNOEXCEPT;
+
+ /*! \brief get effective string length used in vector
+ \return current string length maximum
+@@ -691,7 +691,9 @@
+
+ @sa statistics
+ */
+- void calc_stat(struct str_sparse_vector<CharType, BV, MAX_STR_SIZE>::statistics* st) const;
++ void calc_stat(
++ struct str_sparse_vector<CharType, BV, MAX_STR_SIZE>::statistics* st
++ ) const BMNOEXCEPT;
+
+
+ ///@}
+@@ -701,15 +703,15 @@
+ //@{
+
+ /** Provide const iterator access to container content */
+- const_iterator begin() const;
++ const_iterator begin() const BMNOEXCEPT;
+
+ /** Provide const iterator access to the end */
+- const_iterator end() const { return const_iterator(this, bm::id_max); }
++ const_iterator end() const BMNOEXCEPT { return const_iterator(this, bm::id_max); }
+
+ /** Get const_itertor re-positioned to specific element
+ @param idx - position in the sparse vector
+ */
+- const_iterator get_const_iterator(size_type idx) const
++ const_iterator get_const_iterator(size_type idx) const BMNOEXCEPT
+ { return const_iterator(this, idx); }
+
+ /** Provide back insert iterator
+@@ -730,7 +732,7 @@
+ /** \brief trait if sparse vector is "compressed" (false)
+ */
+ static
+- bool is_compressed() { return false; }
++ bool is_compressed() BMNOEXCEPT { return false; }
+
+ ///@}
+
+@@ -745,7 +747,7 @@
+ /**
+ Get remapping status (true|false)
+ */
+- bool is_remap() const { return remap_flags_ != 0; }
++ bool is_remap() const BMNOEXCEPT { return remap_flags_ != 0; }
+
+ /**
+ Build remapping profile and load content from another sparse vector
+@@ -757,7 +759,7 @@
+ Calculate flags which octets are present on each byte-plain.
+ @internal
+ */
+- void calc_octet_stat(plain_octet_matrix_type& octet_matrix) const;
++ void calc_octet_stat(plain_octet_matrix_type& octet_matrix) const BMNOEXCEPT;
+
+ static
+ void build_octet_remap(
+@@ -771,10 +773,11 @@
+ @internal
+ */
+ static
+- bool remap_tosv(value_type* sv_str,
+- size_type buf_size,
+- const value_type* str,
+- const plain_octet_matrix_type& octet_remap_matrix2);
++ bool remap_tosv(value_type* BMRESTRICT sv_str,
++ size_type buf_size,
++ const value_type* BMRESTRICT str,
++ const plain_octet_matrix_type& BMRESTRICT octet_remap_matrix2
++ ) BMNOEXCEPT;
+
+ /*!
+ remap string from external (ASCII) system to matrix internal code
+@@ -782,7 +785,7 @@
+ */
+ bool remap_tosv(value_type* sv_str,
+ size_type buf_size,
+- const value_type* str) const
++ const value_type* str) const BMNOEXCEPT
+ {
+ return remap_tosv(sv_str, buf_size, str, remap_matrix2_);
+ }
+@@ -793,10 +796,12 @@
+ @internal
+ */
+ static
+- bool remap_fromsv(value_type* str,
+- size_type buf_size,
+- const value_type* sv_str,
+- const plain_octet_matrix_type& octet_remap_matrix1);
++ bool remap_fromsv(
++ value_type* BMRESTRICT str,
++ size_type buf_size,
++ const value_type* BMRESTRICT sv_str,
++ const plain_octet_matrix_type& BMRESTRICT octet_remap_matrix1
++ ) BMNOEXCEPT;
+ /*!
+ re-calculate remap matrix2 based on matrix1
+ @internal
+@@ -949,18 +954,18 @@
+ \return true, if it is the same
+ */
+ bool equal(const str_sparse_vector<CharType, BV, MAX_STR_SIZE>& sv,
+- bm::null_support null_able = bm::use_null) const;
++ bm::null_support null_able = bm::use_null) const BMNOEXCEPT;
+
+ /**
+ \brief find position of compressed element by its rank
+ */
+ static
+- bool find_rank(size_type rank, size_type& pos);
++ bool find_rank(size_type rank, size_type& pos) BMNOEXCEPT;
+
+ /**
+ \brief size of sparse vector (may be different for RSC)
+ */
+- size_type effective_size() const { return size(); }
++ size_type effective_size() const BMNOEXCEPT { return size(); }
+
+ protected:
+
+@@ -1138,7 +1143,8 @@
+ //---------------------------------------------------------------------
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+-void str_sparse_vector<CharType, BV, MAX_STR_SIZE>::swap(str_sparse_vector& str_sv) BMNOEXEPT
++void str_sparse_vector<CharType, BV, MAX_STR_SIZE>::swap(
++ str_sparse_vector& str_sv) BMNOEXCEPT
+ {
+ parent_type::swap(str_sv);
+ bm::xor_swap(remap_flags_, str_sv.remap_flags_);
+@@ -1287,7 +1293,7 @@
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ typename str_sparse_vector<CharType, BV, MAX_STR_SIZE>::size_type
+ str_sparse_vector<CharType, BV, MAX_STR_SIZE>::get(
+- size_type idx, value_type* str, size_type buf_size) const
++ size_type idx, value_type* str, size_type buf_size) const BMNOEXCEPT
+ {
+ size_type i = 0;
+ for (; i < MAX_STR_SIZE; ++i)
+@@ -1330,7 +1336,8 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ void str_sparse_vector<CharType, BV, MAX_STR_SIZE>::calc_stat(
+- struct str_sparse_vector<CharType, BV, MAX_STR_SIZE>::statistics* st) const
++ struct str_sparse_vector<CharType, BV, MAX_STR_SIZE>::statistics* st
++ ) const BMNOEXCEPT
+ {
+ BM_ASSERT(st);
+ typename bvector_type::statistics stbv;
+@@ -1362,7 +1369,7 @@
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ int str_sparse_vector<CharType, BV, MAX_STR_SIZE>::compare(
+ size_type idx,
+- const value_type* str) const
++ const value_type* str) const BMNOEXCEPT
+ {
+ BM_ASSERT(str);
+ int res = 0;
+@@ -1390,7 +1397,7 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ unsigned str_sparse_vector<CharType, BV, MAX_STR_SIZE>::common_prefix_length(
+- size_type idx1, size_type idx2) const
++ size_type idx1, size_type idx2) const BMNOEXCEPT
+ {
+ unsigned i = 0;
+ for (; i < MAX_STR_SIZE; ++i)
+@@ -1416,8 +1423,9 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ bool
+-str_sparse_vector<CharType, BV, MAX_STR_SIZE>::find_rank(size_type rank,
+- size_type& pos)
++str_sparse_vector<CharType, BV, MAX_STR_SIZE>::find_rank(
++ size_type rank,
++ size_type& pos) BMNOEXCEPT
+ {
+ BM_ASSERT(rank);
+ pos = rank - 1;
+@@ -1428,7 +1436,8 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ typename str_sparse_vector<CharType, BV, MAX_STR_SIZE>::size_type
+-str_sparse_vector<CharType, BV, MAX_STR_SIZE>::effective_max_str() const
++str_sparse_vector<CharType, BV, MAX_STR_SIZE>::effective_max_str()
++ const BMNOEXCEPT
+ {
+ for (int i = MAX_STR_SIZE-1; i >= 0; --i)
+ {
+@@ -1446,7 +1455,7 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ void str_sparse_vector<CharType, BV, MAX_STR_SIZE>::calc_octet_stat(
+- plain_octet_matrix_type& octet_matrix) const
++ plain_octet_matrix_type& octet_matrix) const BMNOEXCEPT
+ {
+ octet_matrix.init();
+ octet_matrix.set_zero();
+@@ -1531,10 +1540,10 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ bool str_sparse_vector<CharType, BV, MAX_STR_SIZE>::remap_tosv(
+- value_type* sv_str,
+- size_type buf_size,
+- const value_type* str,
+- const plain_octet_matrix_type& octet_remap_matrix2)
++ value_type* BMRESTRICT sv_str,
++ size_type buf_size,
++ const value_type* BMRESTRICT str,
++ const plain_octet_matrix_type& BMRESTRICT octet_remap_matrix2) BMNOEXCEPT
+ {
+ for (unsigned i = 0; i < buf_size; ++i)
+ {
+@@ -1559,10 +1568,11 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ bool str_sparse_vector<CharType, BV, MAX_STR_SIZE>::remap_fromsv(
+- value_type* str,
+- size_type buf_size,
+- const value_type* sv_str,
+- const plain_octet_matrix_type& octet_remap_matrix1)
++ value_type* BMRESTRICT str,
++ size_type buf_size,
++ const value_type* BMRESTRICT sv_str,
++ const plain_octet_matrix_type& BMRESTRICT octet_remap_matrix1
++ ) BMNOEXCEPT
+ {
+ for (unsigned i = 0; i < buf_size; ++i)
+ {
+@@ -1586,7 +1596,8 @@
+ //---------------------------------------------------------------------
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+-void str_sparse_vector<CharType, BV, MAX_STR_SIZE>::remap_from(const str_sparse_vector& str_sv)
++void
++str_sparse_vector<CharType, BV, MAX_STR_SIZE>::remap_from(const str_sparse_vector& str_sv)
+ {
+ if (str_sv.is_remap())
+ {
+@@ -1639,7 +1650,7 @@
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ bool str_sparse_vector<CharType, BV, MAX_STR_SIZE>::equal(
+ const str_sparse_vector<CharType, BV, MAX_STR_SIZE>& sv,
+- bm::null_support null_able) const
++ bm::null_support null_able) const BMNOEXCEPT
+ {
+ // at this point both vectors should have the same remap settings
+ // to be considered "equal".
+@@ -1686,7 +1697,7 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ typename str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator
+-str_sparse_vector<CharType, BV, MAX_STR_SIZE>::begin() const
++str_sparse_vector<CharType, BV, MAX_STR_SIZE>::begin() const BMNOEXCEPT
+ {
+ typedef typename
+ str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator it_type;
+@@ -1696,7 +1707,7 @@
+ //---------------------------------------------------------------------
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+-void str_sparse_vector<CharType, BV, MAX_STR_SIZE>::clear() BMNOEXEPT
++void str_sparse_vector<CharType, BV, MAX_STR_SIZE>::clear() BMNOEXCEPT
+ {
+ parent_type::clear();
+ }
+@@ -1736,7 +1747,7 @@
+
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+-str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator::const_iterator()
++str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator::const_iterator() BMNOEXCEPT
+ : sv_(0), pos_(bm::id_max), pos_in_buf_(~size_type(0))
+ {}
+
+@@ -1744,7 +1755,7 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator::const_iterator(
+- const str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator& it)
++ const str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator& it) BMNOEXCEPT
+ : sv_(it.sv_), pos_(it.pos_), pos_in_buf_(~size_type(0))
+ {}
+
+@@ -1752,7 +1763,7 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator::const_iterator(
+- const str_sparse_vector<CharType, BV, MAX_STR_SIZE>* sv)
++ const str_sparse_vector<CharType, BV, MAX_STR_SIZE>* sv) BMNOEXCEPT
+ : sv_(sv), pos_(sv->empty() ? bm::id_max : 0), pos_in_buf_(~size_type(0))
+ {}
+
+@@ -1761,7 +1772,7 @@
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator::const_iterator(
+ const str_sparse_vector<CharType, BV, MAX_STR_SIZE>* sv,
+- typename str_sparse_vector<CharType, BV, MAX_STR_SIZE>::size_type pos)
++ typename str_sparse_vector<CharType, BV, MAX_STR_SIZE>::size_type pos) BMNOEXCEPT
+ : sv_(sv), pos_(pos >= sv->size() ? bm::id_max : pos), pos_in_buf_(~size_type(0))
+ {}
+
+@@ -1769,7 +1780,7 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ const typename str_sparse_vector<CharType, BV, MAX_STR_SIZE>::value_type*
+-str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator::value() const
++str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator::value() const BMNOEXCEPT
+ {
+ BM_ASSERT(sv_);
+ BM_ASSERT(this->valid());
+@@ -1791,8 +1802,10 @@
+ //---------------------------------------------------------------------
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+-void str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator::go_to(
+- typename str_sparse_vector<CharType, BV, MAX_STR_SIZE>::size_type pos)
++void
++str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator::go_to(
++ typename str_sparse_vector<CharType, BV, MAX_STR_SIZE>::size_type pos
++ ) BMNOEXCEPT
+ {
+ pos_ = (!sv_ || pos >= sv_->size()) ? bm::id_max : pos;
+ pos_in_buf_ = ~size_type(0);
+@@ -1801,7 +1814,8 @@
+ //---------------------------------------------------------------------
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+-void str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator::advance()
++void
++str_sparse_vector<CharType, BV, MAX_STR_SIZE>::const_iterator::advance() BMNOEXCEPT
+ {
+ if (pos_ == bm::id_max) // nothing to do, we are at the end
+ return;
+@@ -1825,7 +1839,7 @@
+ //---------------------------------------------------------------------
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+-str_sparse_vector<CharType, BV, MAX_STR_SIZE>::back_insert_iterator::back_insert_iterator()
++str_sparse_vector<CharType, BV, MAX_STR_SIZE>::back_insert_iterator::back_insert_iterator() BMNOEXCEPT
+ : sv_(0), bv_null_(0), pos_in_buf_(~size_type(0)), prev_nb_(0)
+ {}
+
+@@ -1833,7 +1847,7 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ str_sparse_vector<CharType, BV, MAX_STR_SIZE>::back_insert_iterator::back_insert_iterator(
+- str_sparse_vector<CharType, BV, MAX_STR_SIZE>* sv)
++ str_sparse_vector<CharType, BV, MAX_STR_SIZE>* sv) BMNOEXCEPT
+ : sv_(sv), pos_in_buf_(~size_type(0))
+ {
+ if (sv)
+@@ -1851,7 +1865,7 @@
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+ str_sparse_vector<CharType, BV, MAX_STR_SIZE>::back_insert_iterator::back_insert_iterator(
+-const str_sparse_vector<CharType, BV, MAX_STR_SIZE>::back_insert_iterator& bi)
++const str_sparse_vector<CharType, BV, MAX_STR_SIZE>::back_insert_iterator& bi) BMNOEXCEPT
+ : sv_(bi.sv_), bv_null_(bi.bv_null_), pos_in_buf_(~size_type(0)), prev_nb_(bi.prev_nb_)
+ {
+ BM_ASSERT(bi.empty());
+@@ -1868,7 +1882,9 @@
+ //---------------------------------------------------------------------
+
+ template<class CharType, class BV, unsigned MAX_STR_SIZE>
+-bool str_sparse_vector<CharType, BV, MAX_STR_SIZE>::back_insert_iterator::empty() const
++bool
++str_sparse_vector<CharType, BV, MAX_STR_SIZE>::back_insert_iterator::empty()
++ const BMNOEXCEPT
+ {
+ return (pos_in_buf_ == ~size_type(0) || !sv_);
+ }