diff --git a/Cargo.toml b/Cargo.toml index 4742d54..784328b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ optional = true [dev-dependencies] rand = "0.7" +typed_test_gen = "0.1" [features] default = ["parallel"] diff --git a/src/atomic.rs b/src/atomic.rs index 9ff8ac5..b8cb3d5 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -8,6 +8,12 @@ use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use util::*; use {BitSetLike, DrainableBitSet}; +const SHIFT0: usize = usize::SHIFT0; +const SHIFT1: usize = usize::SHIFT1; +const SHIFT2: usize = usize::SHIFT2; +const SHIFT3: usize = usize::SHIFT3; +const LOG_BITS: usize = ::LOG_BITS as usize; + /// This is similar to a [`BitSet`] but allows setting of value /// without unique ownership of the structure /// @@ -46,7 +52,7 @@ impl AtomicBitSet { /// this will panic if the Index is out of range. #[inline] pub fn add_atomic(&self, id: Index) -> bool { - let (_, p1, p2) = offsets(id); + let (_, p1, p2) = offsets::(id); // While it is tempting to check of the bit was set and exit here if it // was, this can result in a data race. If this thread and another @@ -65,14 +71,19 @@ impl AtomicBitSet { pub fn add(&mut self, id: Index) -> bool { use std::sync::atomic::Ordering::Relaxed; - let (_, p1, p2) = offsets(id); + let (_, p1, p2) = offsets::(id); if self.layer1[p1].add(id) { return true; } - self.layer2[p2].store(self.layer2[p2].load(Relaxed) | id.mask(SHIFT2), Relaxed); - self.layer3 - .store(self.layer3.load(Relaxed) | id.mask(SHIFT3), Relaxed); + self.layer2[p2].store( + self.layer2[p2].load(Relaxed) | id.mask::(SHIFT2), + Relaxed, + ); + self.layer3.store( + self.layer3.load(Relaxed) | id.mask::(SHIFT3), + Relaxed, + ); false } @@ -82,7 +93,7 @@ impl AtomicBitSet { #[inline] pub fn remove(&mut self, id: Index) -> bool { use std::sync::atomic::Ordering::Relaxed; - let (_, p1, p2) = offsets(id); + let (_, p1, p2) = offsets::(id); // if the bitmask was set we need to clear // its bit from layer0 to 3. the layers above only @@ -98,13 +109,13 @@ impl AtomicBitSet { return true; } - let v = self.layer2[p2].load(Relaxed) & !id.mask(SHIFT2); + let v = self.layer2[p2].load(Relaxed) & !id.mask::(SHIFT2); self.layer2[p2].store(v, Relaxed); if v != 0 { return true; } - let v = self.layer3.load(Relaxed) & !id.mask(SHIFT3); + let v = self.layer3.load(Relaxed) & !id.mask::(SHIFT3); self.layer3.store(v, Relaxed); return true; } @@ -141,7 +152,7 @@ impl AtomicBitSet { if m3 != 0 { let bit = m3.trailing_zeros() as usize; m3 &= !(1 << bit); - offset = bit << BITS; + offset = bit << LOG_BITS; m2 = self.layer2[bit].swap(0, Ordering::Relaxed); continue; } @@ -151,6 +162,8 @@ impl AtomicBitSet { } impl BitSetLike for AtomicBitSet { + type Underlying = usize; + #[inline] fn layer3(&self) -> usize { self.layer3.load(Ordering::Relaxed) @@ -165,7 +178,7 @@ impl BitSetLike for AtomicBitSet { } #[inline] fn layer0(&self, i: usize) -> usize { - let (o1, o0) = (i >> BITS, i & ((1 << BITS) - 1)); + let (o1, o0) = (i >> LOG_BITS, i & ((1 << LOG_BITS) - 1)); self.layer1[o1] .atom .get() @@ -191,19 +204,19 @@ impl Default for AtomicBitSet { layer3: Default::default(), layer2: repeat(0) .map(|_| AtomicUsize::new(0)) - .take(1 << BITS) + .take(1 << LOG_BITS) .collect(), layer1: repeat(0) .map(|_| AtomicBlock::new()) - .take(1 << (2 * BITS)) + .take(1 << (2 * LOG_BITS)) .collect(), } } } struct OnceAtom { - inner: AtomicPtr<[AtomicUsize; 1 << BITS]>, - marker: PhantomData>>, + inner: AtomicPtr<[AtomicUsize; 1 << LOG_BITS]>, + marker: PhantomData>>, } impl Drop for OnceAtom { @@ -225,11 +238,11 @@ impl OnceAtom { } } - fn get_or_init(&self) -> &[AtomicUsize; 1 << BITS] { + fn get_or_init(&self) -> &[AtomicUsize; 1 << LOG_BITS] { let current_ptr = self.inner.load(Ordering::Acquire); let ptr = if current_ptr.is_null() { const ZERO: AtomicUsize = AtomicUsize::new(0); - let new_ptr = Box::into_raw(Box::new([ZERO; 1 << BITS])); + let new_ptr = Box::into_raw(Box::new([ZERO; 1 << LOG_BITS])); if let Err(existing_ptr) = self.inner.compare_exchange( ptr::null_mut(), new_ptr, @@ -256,7 +269,7 @@ impl OnceAtom { unsafe { &*ptr } } - fn get(&self) -> Option<&[AtomicUsize; 1 << BITS]> { + fn get(&self) -> Option<&[AtomicUsize; 1 << LOG_BITS]> { let ptr = self.inner.load(Ordering::Acquire); // SAFETY: If it is not null, we created this pointer from // `Box::into_raw` and only use it to create immutable references @@ -264,7 +277,7 @@ impl OnceAtom { unsafe { ptr.as_ref() } } - fn get_mut(&mut self) -> Option<&mut [AtomicUsize; 1 << BITS]> { + fn get_mut(&mut self) -> Option<&mut [AtomicUsize; 1 << LOG_BITS]> { let ptr = self.inner.get_mut(); // SAFETY: If this is not null, we created this pointer from // `Box::into_raw` and we have an exclusive borrow of self. @@ -286,7 +299,7 @@ impl AtomicBlock { } fn add(&self, id: Index) -> bool { - let (i, m) = (id.row(SHIFT1), id.mask(SHIFT0)); + let (i, m) = (id.row::(SHIFT1), id.mask::(SHIFT0)); let old = self.atom.get_or_init()[i].fetch_or(m, Ordering::Relaxed); self.mask.fetch_or(id.mask(SHIFT1), Ordering::Relaxed); old & m != 0 @@ -295,20 +308,23 @@ impl AtomicBlock { fn contains(&self, id: Index) -> bool { self.atom .get() - .map(|layer0| layer0[id.row(SHIFT1)].load(Ordering::Relaxed) & id.mask(SHIFT0) != 0) + .map(|layer0| { + layer0[id.row::(SHIFT1)].load(Ordering::Relaxed) & id.mask::(SHIFT0) + != 0 + }) .unwrap_or(false) } fn remove(&mut self, id: Index) -> bool { if let Some(layer0) = self.atom.get_mut() { - let (i, m) = (id.row(SHIFT1), !id.mask(SHIFT0)); + let (i, m) = (id.row::(SHIFT1), !id.mask::(SHIFT0)); let v = layer0[i].get_mut(); - let was_set = *v & id.mask(SHIFT0) == id.mask(SHIFT0); + let was_set = *v & id.mask::(SHIFT0) == id.mask(SHIFT0); *v = *v & m; if *v == 0 { // no other bits are set // so unset bit in the next level up - *self.mask.get_mut() &= !id.mask(SHIFT1); + *self.mask.get_mut() &= !id.mask::(SHIFT1); } was_set } else { diff --git a/src/iter/drain.rs b/src/iter/drain.rs index 194a0dd..651a5b0 100644 --- a/src/iter/drain.rs +++ b/src/iter/drain.rs @@ -1,11 +1,11 @@ use iter::BitIter; use util::*; -use DrainableBitSet; +use {BitSetLike, DrainableBitSet}; /// A draining `Iterator` over a [`DrainableBitSet`] structure. /// /// [`DrainableBitSet`]: ../trait.DrainableBitSet.html -pub struct DrainBitIter<'a, T: 'a> { +pub struct DrainBitIter<'a, T: 'a + BitSetLike> { iter: BitIter<&'a mut T>, } @@ -14,7 +14,7 @@ impl<'a, T: DrainableBitSet> DrainBitIter<'a, T> { /// but just [`.drain()`] on a bit set. /// /// [`.drain()`]: ../trait.DrainableBitSet.html#method.drain - pub fn new(set: &'a mut T, masks: [usize; LAYERS], prefix: [u32; LAYERS - 1]) -> Self { + pub fn new(set: &'a mut T, masks: [T::Underlying; LAYERS], prefix: [u32; LAYERS - 1]) -> Self { DrainBitIter { iter: BitIter::new(set, masks, prefix), } @@ -36,10 +36,16 @@ where } } -#[test] -fn drain_all() { - use {BitSet, BitSetLike}; - let mut bit_set: BitSet = (0..10000).filter(|i| i % 2 == 0).collect(); - bit_set.drain().for_each(|_| {}); - assert_eq!(0, bit_set.iter().count()); +#[cfg(test)] +mod tests { + extern crate typed_test_gen; + use self::typed_test_gen::test_with; + use {BitSetLike, DrainableBitSet, GenericBitSet, UnsignedInteger}; + + #[test_with(u32, u64, usize)] + fn drain_all() { + let mut bit_set: GenericBitSet = (0..10000).filter(|i| i % 2 == 0).collect(); + bit_set.drain().for_each(|_| {}); + assert_eq!(0, bit_set.iter().count()); + } } diff --git a/src/iter/mod.rs b/src/iter/mod.rs index b3e31fe..acc17be 100644 --- a/src/iter/mod.rs +++ b/src/iter/mod.rs @@ -1,5 +1,5 @@ use util::*; -use {BitSet, BitSetLike}; +use {BitSetLike, GenericBitSet}; pub use self::drain::DrainBitIter; @@ -14,18 +14,18 @@ mod parallel; /// /// [`BitSetLike`]: ../trait.BitSetLike.html #[derive(Debug, Clone)] -pub struct BitIter { +pub struct BitIter { pub(crate) set: T, - pub(crate) masks: [usize; LAYERS], + pub(crate) masks: [T::Underlying; LAYERS], pub(crate) prefix: [u32; LAYERS - 1], } -impl BitIter { +impl BitIter { /// Creates a new `BitIter`. You usually don't call this function /// but just [`.iter()`] on a bit set. /// /// [`.iter()`]: ../trait.BitSetLike.html#method.iter - pub fn new(set: T, masks: [usize; LAYERS], prefix: [u32; LAYERS - 1]) -> Self { + pub fn new(set: T, masks: [T::Underlying; LAYERS], prefix: [u32; LAYERS - 1]) -> Self { BitIter { set: set, masks: masks, @@ -41,16 +41,16 @@ impl BitIter { } } -impl<'a> BitIter<&'a mut BitSet> { +impl<'a, T: UnsignedInteger> BitIter<&'a mut GenericBitSet> { /// Clears the rest of the bitset starting from the next inner layer. pub(crate) fn clear(&mut self) { use self::State::Continue; while let Some(level) = (1..LAYERS).find(|&level| self.handle_level(level) == Continue) { let lower = level - 1; - let idx = (self.prefix[lower] >> BITS) as usize; - *self.set.layer_mut(lower, idx) = 0; + let idx = (self.prefix[lower] >> T::LOG_BITS) as usize; + *self.set.layer_mut(lower, idx) = T::ZERO; if level == LAYERS - 1 { - self.set.layer3 &= !((2 << idx) - 1); + self.set.layer3 &= T::from_u64(!((2usize << idx) - 1) as u64); } } } @@ -88,13 +88,13 @@ where impl BitIter { pub(crate) fn handle_level(&mut self, level: usize) -> State { use self::State::*; - if self.masks[level] == 0 { + if self.masks[level] == T::Underlying::ZERO { Empty } else { // Take the first bit that isn't zero let first_bit = self.masks[level].trailing_zeros(); // Remove it from the mask - self.masks[level] &= !(1 << first_bit); + self.masks[level] &= !(T::Underlying::ONE << T::Underlying::from_u32(first_bit)); // Calculate the index of it let idx = self.prefix.get(level).cloned().unwrap_or(0) | first_bit; if level == 0 { @@ -103,7 +103,7 @@ impl BitIter { } else { // Take the corresponding `usize` from the layer below self.masks[level - 1] = self.set.get_from_layer(level - 1, idx as usize); - self.prefix[level - 1] = idx << BITS; + self.prefix[level - 1] = idx << T::Underlying::LOG_BITS; Continue } } @@ -112,34 +112,37 @@ impl BitIter { #[cfg(test)] mod tests { - use {BitSet, BitSetLike}; + extern crate typed_test_gen; + use self::typed_test_gen::test_with; + + use {BitSetLike, GenericBitSet, UnsignedInteger}; - #[test] - fn iterator_clear_empties() { + #[test_with(u32, u64, usize)] + fn iterator_clear_empties() { use rand::prelude::*; - let mut set = BitSet::new(); + let mut set = GenericBitSet::::new(); let mut rng = thread_rng(); let limit = 1_048_576; for _ in 0..(limit / 10) { set.add(rng.gen_range(0, limit)); } (&mut set).iter().clear(); - assert_eq!(0, set.layer3); + assert_eq!(T::ZERO, set.layer3); for &i in &set.layer2 { - assert_eq!(0, i); + assert_eq!(T::ZERO, i); } for &i in &set.layer1 { - assert_eq!(0, i); + assert_eq!(T::ZERO, i); } for &i in &set.layer0 { - assert_eq!(0, i); + assert_eq!(T::ZERO, i); } } - #[test] - fn iterator_clone() { - let mut set = BitSet::new(); + #[test_with(u32, u64, usize)] + fn iterator_clone() { + let mut set = GenericBitSet::::new(); set.add(1); set.add(3); let iter = set.iter().skip(1); diff --git a/src/iter/parallel.rs b/src/iter/parallel.rs index c676ccf..e50093b 100644 --- a/src/iter/parallel.rs +++ b/src/iter/parallel.rs @@ -1,8 +1,9 @@ use rayon::iter::plumbing::{bridge_unindexed, Folder, UnindexedConsumer, UnindexedProducer}; use rayon::iter::ParallelIterator; -use iter::{BitIter, BitSetLike, Index, BITS, LAYERS}; +use iter::{BitIter, Index, LAYERS}; use util::average_ones; +use {BitSetLike, UnsignedInteger}; /// A `ParallelIterator` over a [`BitSetLike`] structure. /// @@ -57,6 +58,7 @@ impl BitParIter { impl ParallelIterator for BitParIter where T: BitSetLike + Send + Sync, + ::Underlying: Send + Sync, { type Item = Index; @@ -72,11 +74,12 @@ where /// /// Usually used internally by `BitParIter`. #[derive(Debug)] -pub struct BitProducer<'a, T: 'a + Send + Sync>(pub BitIter<&'a T>, pub u8); +pub struct BitProducer<'a, T: 'a + BitSetLike>(pub BitIter<&'a T>, pub u8); impl<'a, T: 'a + Send + Sync> UnindexedProducer for BitProducer<'a, T> where T: BitSetLike, + ::Underlying: Send + Sync, { type Item = Index; @@ -112,7 +115,7 @@ where let splits = self.1; let other = { let mut handle_level = |level: usize| { - if self.0.masks[level] == 0 { + if self.0.masks[level] == T::Underlying::ZERO { // Skip the empty layers None } else { @@ -121,29 +124,35 @@ where let first_bit = self.0.masks[level].trailing_zeros(); average_ones(self.0.masks[level]) .and_then(|average_bit| { - let mask = (1 << average_bit) - 1; + let mask = (T::Underlying::ONE << average_bit) - T::Underlying::ONE; let mut other = BitProducer( - BitIter::new(self.0.set, [0; LAYERS], [0; LAYERS - 1]), + BitIter::new( + self.0.set, + [T::Underlying::ZERO; LAYERS], + [0; LAYERS - 1], + ), splits, ); // The `other` is the more significant half of the mask other.0.masks[level] = self.0.masks[level] & !mask; - other.0.prefix[level - 1] = (level_prefix | average_bit as u32) << BITS; + other.0.prefix[level - 1] = + (level_prefix | average_bit.to_u32()) << T::Underlying::LOG_BITS; // The upper portion of the prefix is maintained, because the `other` // will iterate the same subtree as the `self` does other.0.prefix[level..].copy_from_slice(&self.0.prefix[level..]); // And the `self` is the less significant one self.0.masks[level] &= mask; - self.0.prefix[level - 1] = (level_prefix | first_bit) << BITS; + self.0.prefix[level - 1] = + (level_prefix | first_bit) << T::Underlying::LOG_BITS; Some(other) }) .or_else(|| { // Because there is only one bit left we descend to it let idx = level_prefix as usize | first_bit as usize; - self.0.prefix[level - 1] = (idx as u32) << BITS; + self.0.prefix[level - 1] = (idx as u32) << T::Underlying::LOG_BITS; // The level that is descended from doesn't have anything // interesting so it can be skipped in the future. - self.0.masks[level] = 0; + self.0.masks[level] = T::Underlying::ZERO; self.0.masks[level - 1] = self.0.set.get_from_layer(level - 1, idx); None }) @@ -169,16 +178,18 @@ where #[cfg(test)] mod test_bit_producer { + extern crate typed_test_gen; + use self::typed_test_gen::test_with; use rayon::iter::plumbing::UnindexedProducer; use super::BitProducer; - use iter::BitSetLike; - use util::BITS; + use {BitSetLike, GenericBitSet, UnsignedInteger}; - fn test_splitting(split_levels: u8) { - fn visit(mut us: BitProducer, d: usize, i: usize, mut trail: String, c: &mut usize) + fn test_splitting(split_levels: u8) { + fn visit(mut us: BitProducer, d: usize, i: usize, mut trail: String, c: &mut usize) where - T: Send + Sync + BitSetLike, + T: Send + Sync + UnsignedInteger, + S: Send + Sync + BitSetLike, { if d == 0 { assert!(us.split().1.is_none(), "{}", trail); @@ -193,14 +204,14 @@ mod test_bit_producer { visit(them, d, i - j, trail, c); } trail.push_str("u"); - visit(us, d - 1, BITS, trail, c); + visit(us, d - 1, T::LOG_BITS, trail, c); } } - let usize_bits = ::std::mem::size_of::() * 8; + let uint_bits = ::std::mem::size_of::() * 8; - let mut c = ::BitSet::new(); - for i in 0..(usize_bits.pow(3) * 2) { + let mut c = GenericBitSet::::new(); + for i in 0..(uint_bits.pow(3) * 2) { assert!(!c.add(i as u32)); } @@ -211,32 +222,32 @@ mod test_bit_producer { visit( us, split_levels as usize - 1, - BITS, + T::LOG_BITS, "u".to_owned(), &mut count, ); visit( them.expect("Splitting top level"), split_levels as usize - 1, - BITS, + T::LOG_BITS, "t".to_owned(), &mut count, ); - assert_eq!(usize_bits.pow(split_levels as u32 - 1) * 2, count); + assert_eq!(uint_bits.pow(split_levels as u32 - 1) * 2, count); } - #[test] - fn max_3_splitting_of_two_top_bits() { - test_splitting(3); + #[test_with(u32, u64, usize)] + fn max_3_splitting_of_two_top_bits() { + test_splitting::(3); } - #[test] - fn max_2_splitting_of_two_top_bits() { - test_splitting(2); + #[test_with(u32, u64, usize)] + fn max_2_splitting_of_two_top_bits() { + test_splitting::(2); } - #[test] - fn max_1_splitting_of_two_top_bits() { - test_splitting(1); + #[test_with(u32, u64, usize)] + fn max_1_splitting_of_two_top_bits() { + test_splitting::(1); } } diff --git a/src/lib.rs b/src/lib.rs index ad2538b..5618eba 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -37,10 +37,10 @@ //! below it can be skipped.) //! //! However, there is a maximum on index size. The top layer (Layer 3) -//! of the BitSet is a single `usize` long. This makes the maximum index -//! `usize**4` (`1,048,576` for a 32-bit `usize`, `16,777,216` for a -//! 64-bit `usize`). Attempting to add indices larger than that will cause -//! the `BitSet` to panic. +//! of the BitSet is a single integer long (depending on the underlying type used). +//! This makes the maximum index `BITS**4` (`1,048,576` for a 32-bit `usize`, +//! `16,777,216` for a 64-bit `usize`). Attempting to add indices larger than that +//! will cause the `BitSet` to panic. //! #![deny(missing_docs)] @@ -63,36 +63,48 @@ pub use ops::{BitSetAll, BitSetAnd, BitSetNot, BitSetOr, BitSetXor}; use util::*; +/// A `GenericBitSet` is a simple set designed to track which indices are placed +/// into it. Is is based on an underlying type `T` which is supposed to represent an +/// unsigned integer type (in particular `u32`, `u64`, or `usize`). +/// +/// Note, a `BitSet` is limited by design to only `T::NB_BITS**4` indices. +/// Adding beyond this limit will cause the `BitSet` to panic. +#[derive(Clone, Debug, Default)] +pub struct GenericBitSet { + layer3: T, + layer2: Vec, + layer1: Vec, + layer0: Vec, +} + /// A `BitSet` is a simple set designed to track which indices are placed /// into it. /// /// Note, a `BitSet` is limited by design to only `usize**4` indices. /// Adding beyond this limit will cause the `BitSet` to panic. -#[derive(Clone, Debug, Default)] -pub struct BitSet { - layer3: usize, - layer2: Vec, - layer1: Vec, - layer0: Vec, -} +pub type BitSet = GenericBitSet; -impl BitSet { +impl GenericBitSet { /// Creates an empty `BitSet`. - pub fn new() -> BitSet { + pub fn new() -> Self { Default::default() } #[inline] fn valid_range(max: Index) { - if (MAX_EID as u32) < max { - panic!("Expected index to be less then {}, found {}", MAX_EID, max); + if T::MAX_EID < max { + panic!( + "Expected index to be less then {}, found {}", + T::MAX_EID, + max + ); } } /// Creates an empty `BitSet`, preallocated for up to `max` indices. - pub fn with_capacity(max: Index) -> BitSet { + pub fn with_capacity(max: Index) -> Self { Self::valid_range(max); - let mut value = BitSet::new(); + let mut value = Self::new(); value.extend(max); value } @@ -100,16 +112,16 @@ impl BitSet { #[inline(never)] fn extend(&mut self, id: Index) { Self::valid_range(id); - let (p0, p1, p2) = offsets(id); + let (p0, p1, p2) = offsets::(id); Self::fill_up(&mut self.layer2, p2); Self::fill_up(&mut self.layer1, p1); Self::fill_up(&mut self.layer0, p0); } - fn fill_up(vec: &mut Vec, upper_index: usize) { + fn fill_up(vec: &mut Vec, upper_index: usize) { if vec.len() <= upper_index { - vec.resize(upper_index + 1, 0); + vec.resize(upper_index + 1, T::ZERO); } } @@ -117,23 +129,23 @@ impl BitSet { /// when the lowest layer was set from 0. #[inline(never)] fn add_slow(&mut self, id: Index) { - let (_, p1, p2) = offsets(id); - self.layer1[p1] |= id.mask(SHIFT1); - self.layer2[p2] |= id.mask(SHIFT2); - self.layer3 |= id.mask(SHIFT3); + let (_, p1, p2) = offsets::(id); + self.layer1[p1] |= id.mask::(T::SHIFT1); + self.layer2[p2] |= id.mask::(T::SHIFT2); + self.layer3 |= id.mask::(T::SHIFT3); } /// Adds `id` to the `BitSet`. Returns `true` if the value was /// already in the set. #[inline] pub fn add(&mut self, id: Index) -> bool { - let (p0, mask) = (id.offset(SHIFT1), id.mask(SHIFT0)); + let (p0, mask) = (id.offset(T::SHIFT1), id.mask::(T::SHIFT0)); if p0 >= self.layer0.len() { self.extend(id); } - if self.layer0[p0] & mask != 0 { + if self.layer0[p0] & mask != T::ZERO { return true; } @@ -141,13 +153,13 @@ impl BitSet { // that the value can be found here. let old = self.layer0[p0]; self.layer0[p0] |= mask; - if old == 0 { + if old == T::ZERO { self.add_slow(id); } false } - fn layer_mut(&mut self, level: usize, idx: usize) -> &mut usize { + fn layer_mut(&mut self, level: usize, idx: usize) -> &mut T { match level { 0 => { Self::fill_up(&mut self.layer0, idx); @@ -171,13 +183,13 @@ impl BitSet { /// to begin with. #[inline] pub fn remove(&mut self, id: Index) -> bool { - let (p0, p1, p2) = offsets(id); + let (p0, p1, p2) = offsets::(id); if p0 >= self.layer0.len() { return false; } - if self.layer0[p0] & id.mask(SHIFT0) == 0 { + if self.layer0[p0] & id.mask::(T::SHIFT0) == T::ZERO { return false; } @@ -185,35 +197,38 @@ impl BitSet { // its bit from layer0 to 3. the layers abover only // should be cleared if the bit cleared was the last bit // in its set - self.layer0[p0] &= !id.mask(SHIFT0); - if self.layer0[p0] != 0 { + self.layer0[p0] &= !id.mask::(T::SHIFT0); + if self.layer0[p0] != T::ZERO { return true; } - self.layer1[p1] &= !id.mask(SHIFT1); - if self.layer1[p1] != 0 { + self.layer1[p1] &= !id.mask::(T::SHIFT1); + if self.layer1[p1] != T::ZERO { return true; } - self.layer2[p2] &= !id.mask(SHIFT2); - if self.layer2[p2] != 0 { + self.layer2[p2] &= !id.mask::(T::SHIFT2); + if self.layer2[p2] != T::ZERO { return true; } - self.layer3 &= !id.mask(SHIFT3); + self.layer3 &= !id.mask::(T::SHIFT3); return true; } /// Returns `true` if `id` is in the set. #[inline] pub fn contains(&self, id: Index) -> bool { - let p0 = id.offset(SHIFT1); - p0 < self.layer0.len() && (self.layer0[p0] & id.mask(SHIFT0)) != 0 + let p0 = id.offset(T::SHIFT1); + p0 < self.layer0.len() && (self.layer0[p0] & id.mask::(T::SHIFT0)) != T::ZERO } /// Returns `true` if all ids in `other` are contained in this set #[inline] - pub fn contains_set(&self, other: &BitSet) -> bool { + pub fn contains_set(&self, other: &S) -> bool + where + S: BitSetLike, + { for id in other.iter() { if !self.contains(id) { return false; @@ -227,7 +242,7 @@ impl BitSet { self.layer0.clear(); self.layer1.clear(); self.layer2.clear(); - self.layer3 = 0; + self.layer3 = T::ZERO; } /// How many bits are in a `usize`. @@ -282,7 +297,7 @@ impl BitSet { /// /// assert_eq!(slice[slice_index], 1 << bit_at_index); /// ``` - pub fn layer0_as_slice(&self) -> &[usize] { + pub fn layer0_as_slice(&self) -> &[T] { self.layer0.as_slice() } @@ -328,7 +343,7 @@ impl BitSet { /// /// assert_eq!(slice[slice_index], 1 << bit_at_index); /// ``` - pub fn layer1_as_slice(&self) -> &[usize] { + pub fn layer1_as_slice(&self) -> &[T] { self.layer1.as_slice() } @@ -373,7 +388,7 @@ impl BitSet { /// /// assert_eq!(slice[slice_index], 1 << bit_at_index); /// ``` - pub fn layer2_as_slice(&self) -> &[usize] { + pub fn layer2_as_slice(&self) -> &[T] { self.layer2.as_slice() } } @@ -392,10 +407,13 @@ impl BitSet { /// /// [`BitSetLike`]: ../trait.BitSetLike.html pub trait BitSetLike { + /// Type of the underlying bit storage + type Underlying: UnsignedInteger; + /// Gets the `usize` corresponding to layer and index. /// /// The `layer` should be in the range [0, 3] - fn get_from_layer(&self, layer: usize, idx: usize) -> usize { + fn get_from_layer(&self, layer: usize, idx: usize) -> Self::Underlying { match layer { 0 => self.layer0(idx), 1 => self.layer1(idx), @@ -407,24 +425,24 @@ pub trait BitSetLike { /// Returns true if this `BitSetLike` contains nothing, and false otherwise. fn is_empty(&self) -> bool { - self.layer3() == 0 + self.layer3() == Self::Underlying::ZERO } /// Return a `usize` where each bit represents if any word in layer2 /// has been set. - fn layer3(&self) -> usize; + fn layer3(&self) -> Self::Underlying; /// Return the `usize` from the array of usizes that indicates if any /// bit has been set in layer1 - fn layer2(&self, i: usize) -> usize; + fn layer2(&self, i: usize) -> Self::Underlying; /// Return the `usize` from the array of usizes that indicates if any /// bit has been set in layer0 - fn layer1(&self, i: usize) -> usize; + fn layer1(&self, i: usize) -> Self::Underlying; /// Return a `usize` that maps to the direct 1:1 association with /// each index of the set - fn layer0(&self, i: usize) -> usize; + fn layer0(&self, i: usize) -> Self::Underlying; /// Allows checking if set bit is contained in the bit set. fn contains(&self, i: Index) -> bool; @@ -436,7 +454,16 @@ pub trait BitSetLike { { let layer3 = self.layer3(); - BitIter::new(self, [0, 0, 0, layer3], [0; LAYERS - 1]) + BitIter::new( + self, + [ + Self::Underlying::ZERO, + Self::Underlying::ZERO, + Self::Underlying::ZERO, + layer3, + ], + [0; LAYERS - 1], + ) } /// Create a parallel iterator that will scan over the keyspace @@ -463,7 +490,16 @@ pub trait DrainableBitSet: BitSetLike { { let layer3 = self.layer3(); - DrainBitIter::new(self, [0, 0, 0, layer3], [0; LAYERS - 1]) + DrainBitIter::new( + self, + [ + Self::Underlying::ZERO, + Self::Underlying::ZERO, + Self::Underlying::ZERO, + layer3, + ], + [0; LAYERS - 1], + ) } } @@ -471,23 +507,25 @@ impl<'a, T> BitSetLike for &'a T where T: BitSetLike + ?Sized, { + type Underlying = T::Underlying; + #[inline] - fn layer3(&self) -> usize { + fn layer3(&self) -> T::Underlying { (*self).layer3() } #[inline] - fn layer2(&self, i: usize) -> usize { + fn layer2(&self, i: usize) -> T::Underlying { (*self).layer2(i) } #[inline] - fn layer1(&self, i: usize) -> usize { + fn layer1(&self, i: usize) -> T::Underlying { (*self).layer1(i) } #[inline] - fn layer0(&self, i: usize) -> usize { + fn layer0(&self, i: usize) -> T::Underlying { (*self).layer0(i) } @@ -501,23 +539,25 @@ impl<'a, T> BitSetLike for &'a mut T where T: BitSetLike + ?Sized, { + type Underlying = T::Underlying; + #[inline] - fn layer3(&self) -> usize { + fn layer3(&self) -> T::Underlying { (**self).layer3() } #[inline] - fn layer2(&self, i: usize) -> usize { + fn layer2(&self, i: usize) -> T::Underlying { (**self).layer2(i) } #[inline] - fn layer1(&self, i: usize) -> usize { + fn layer1(&self, i: usize) -> T::Underlying { (**self).layer1(i) } #[inline] - fn layer0(&self, i: usize) -> usize { + fn layer0(&self, i: usize) -> T::Underlying { (**self).layer0(i) } @@ -537,25 +577,27 @@ where } } -impl BitSetLike for BitSet { +impl BitSetLike for GenericBitSet { + type Underlying = T; + #[inline] - fn layer3(&self) -> usize { + fn layer3(&self) -> T { self.layer3 } #[inline] - fn layer2(&self, i: usize) -> usize { - self.layer2.get(i).map(|&x| x).unwrap_or(0) + fn layer2(&self, i: usize) -> T { + self.layer2.get(i).map(|&x| x).unwrap_or(T::ZERO) } #[inline] - fn layer1(&self, i: usize) -> usize { - self.layer1.get(i).map(|&x| x).unwrap_or(0) + fn layer1(&self, i: usize) -> T { + self.layer1.get(i).map(|&x| x).unwrap_or(T::ZERO) } #[inline] - fn layer0(&self, i: usize) -> usize { - self.layer0.get(i).map(|&x| x).unwrap_or(0) + fn layer0(&self, i: usize) -> T { + self.layer0.get(i).map(|&x| x).unwrap_or(T::ZERO) } #[inline] @@ -564,16 +606,16 @@ impl BitSetLike for BitSet { } } -impl DrainableBitSet for BitSet { +impl DrainableBitSet for GenericBitSet { #[inline] fn remove(&mut self, i: Index) -> bool { self.remove(i) } } -impl PartialEq for BitSet { +impl PartialEq for GenericBitSet { #[inline] - fn eq(&self, rhv: &BitSet) -> bool { + fn eq(&self, rhv: &GenericBitSet) -> bool { if self.layer3 != rhv.layer3 { return false; } @@ -603,15 +645,18 @@ impl PartialEq for BitSet { true } } -impl Eq for BitSet {} +impl Eq for GenericBitSet {} #[cfg(test)] mod tests { - use super::{BitSet, BitSetAnd, BitSetLike, BitSetNot}; + extern crate typed_test_gen; + use self::typed_test_gen::test_with; + + use super::{BitSetAnd, BitSetLike, BitSetNot, GenericBitSet, UnsignedInteger}; - #[test] - fn insert() { - let mut c = BitSet::new(); + #[test_with(u32, u64, usize)] + fn insert() { + let mut c = GenericBitSet::::new(); for i in 0..1_000 { assert!(!c.add(i)); assert!(c.add(i)); @@ -622,9 +667,9 @@ mod tests { } } - #[test] - fn insert_100k() { - let mut c = BitSet::new(); + #[test_with(u32, u64, usize)] + fn insert_100k() { + let mut c = GenericBitSet::::new(); for i in 0..100_000 { assert!(!c.add(i)); assert!(c.add(i)); @@ -634,9 +679,10 @@ mod tests { assert!(c.contains(i)); } } - #[test] - fn remove() { - let mut c = BitSet::new(); + + #[test_with(u32, u64, usize)] + fn remove() { + let mut c = GenericBitSet::::new(); for i in 0..1_000 { assert!(!c.add(i)); } @@ -649,9 +695,9 @@ mod tests { } } - #[test] - fn iter() { - let mut c = BitSet::new(); + #[test_with(u32, u64, usize)] + fn iter() { + let mut c = GenericBitSet::::new(); for i in 0..100_000 { c.add(i); } @@ -664,10 +710,10 @@ mod tests { assert_eq!(count, 100_000); } - #[test] - fn iter_odd_even() { - let mut odd = BitSet::new(); - let mut even = BitSet::new(); + #[test_with(u32, u64, usize)] + fn iter_odd_even() { + let mut odd = GenericBitSet::::new(); + let mut even = GenericBitSet::::new(); for i in 0..100_000 { if i % 2 == 1 { odd.add(i); @@ -681,11 +727,11 @@ mod tests { assert_eq!(BitSetAnd(&odd, &even).iter().count(), 0); } - #[test] - fn iter_random_add() { + #[test_with(u32, u64, usize)] + fn iter_random_add() { use rand::prelude::*; - let mut set = BitSet::new(); + let mut set = GenericBitSet::::new(); let mut rng = thread_rng(); let limit = 1_048_576; let mut added = 0; @@ -698,13 +744,13 @@ mod tests { assert_eq!(set.iter().count(), added as usize); } - #[test] - fn iter_clusters() { - let mut set = BitSet::new(); + #[test_with(u32, u64, usize)] + fn iter_clusters() { + let mut set = GenericBitSet::::new(); for x in 0..8 { - let x = (x * 3) << (::BITS * 2); // scale to the last slot + let x = (x * 3) << (T::LOG_BITS * 2); // scale to the last slot for y in 0..8 { - let y = (y * 3) << (::BITS); + let y = (y * 3) << (T::LOG_BITS); for z in 0..8 { let z = z * 2; set.add(x + y + z); @@ -714,9 +760,9 @@ mod tests { assert_eq!(set.iter().count(), 8usize.pow(3)); } - #[test] - fn not() { - let mut c = BitSet::new(); + #[test_with(u32, u64, usize)] + fn not() { + let mut c = GenericBitSet::::new(); for i in 0..10_000 { if i % 2 == 1 { c.add(i); @@ -731,31 +777,34 @@ mod tests { #[cfg(all(test, feature = "parallel"))] mod test_parallel { - use super::{BitSet, BitSetAnd, BitSetLike}; + extern crate typed_test_gen; + use self::typed_test_gen::test_with; + + use super::{BitSetAnd, BitSetLike, GenericBitSet, UnsignedInteger}; use rayon::iter::ParallelIterator; - #[test] - fn par_iter_one() { + #[test_with(u32, u64, usize)] + fn par_iter_one() { let step = 5000; let tests = 1_048_576 / step; for n in 0..tests { let n = n * step; - let mut set = BitSet::new(); + let mut set = GenericBitSet::::new(); set.add(n); assert_eq!(set.par_iter().count(), 1); } - let mut set = BitSet::new(); + let mut set = GenericBitSet::::new(); set.add(1_048_576 - 1); assert_eq!(set.par_iter().count(), 1); } - #[test] - fn par_iter_random_add() { + #[test_with(u32, u64, usize)] + fn par_iter_random_add() { use rand::prelude::*; use std::collections::HashSet; use std::sync::{Arc, Mutex}; - let mut set = BitSet::new(); + let mut set = GenericBitSet::::new(); let mut check_set = HashSet::new(); let mut rng = thread_rng(); let limit = 1_048_576; @@ -798,10 +847,10 @@ mod test_parallel { } } - #[test] - fn par_iter_odd_even() { - let mut odd = BitSet::new(); - let mut even = BitSet::new(); + #[test_with(u32, u64, usize)] + fn par_iter_odd_even() { + let mut odd = GenericBitSet::::new(); + let mut even = GenericBitSet::::new(); for i in 0..100_000 { if i % 2 == 1 { odd.add(i); @@ -815,16 +864,16 @@ mod test_parallel { assert_eq!(BitSetAnd(&odd, &even).par_iter().count(), 0); } - #[test] - fn par_iter_clusters() { + #[test_with(u32, u64, usize)] + fn par_iter_clusters() { use std::collections::HashSet; use std::sync::{Arc, Mutex}; - let mut set = BitSet::new(); + let mut set = GenericBitSet::::new(); let mut check_set = HashSet::new(); for x in 0..8 { - let x = (x * 3) << (::BITS * 2); // scale to the last slot + let x = (x * 3) << (T::LOG_BITS * 2); // scale to the last slot for y in 0..8 { - let y = (y * 3) << (::BITS); + let y = (y * 3) << (T::LOG_BITS); for z in 0..8 { let z = z * 2; let index = x + y + z; diff --git a/src/ops.rs b/src/ops.rs index 262b621..e5475d3 100644 --- a/src/ops.rs +++ b/src/ops.rs @@ -1,30 +1,33 @@ use std::iter::{FromIterator, IntoIterator}; +use std::marker::PhantomData; use std::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Not}; use std::usize; use util::*; -use {AtomicBitSet, BitIter, BitSet, BitSetLike, DrainableBitSet}; +use {AtomicBitSet, BitIter, BitSetLike, DrainableBitSet, GenericBitSet}; -impl<'a, B> BitOrAssign<&'a B> for BitSet +impl<'a, B, T> BitOrAssign<&'a B> for GenericBitSet where - B: BitSetLike, + T: UnsignedInteger, + B: BitSetLike, { fn bitor_assign(&mut self, lhs: &B) { use iter::State::Continue; let mut iter = lhs.iter(); while let Some(level) = (1..LAYERS).find(|&level| iter.handle_level(level) == Continue) { let lower = level - 1; - let idx = iter.prefix[lower] as usize >> BITS; + let idx = iter.prefix[lower] as usize >> T::LOG_BITS; *self.layer_mut(lower, idx) |= lhs.get_from_layer(lower, idx); } self.layer3 |= lhs.layer3(); } } -impl<'a, B> BitAndAssign<&'a B> for BitSet +impl<'a, B, T> BitAndAssign<&'a B> for GenericBitSet where - B: BitSetLike, + T: UnsignedInteger, + B: BitSetLike, { fn bitand_assign(&mut self, lhs: &B) { use iter::State::*; @@ -32,19 +35,19 @@ where iter.masks[LAYERS - 1] &= self.layer3(); while let Some(level) = (1..LAYERS).find(|&level| iter.handle_level(level) == Continue) { let lower = level - 1; - let idx = iter.prefix[lower] as usize >> BITS; + let idx = iter.prefix[lower] as usize >> T::LOG_BITS; let our_layer = self.get_from_layer(lower, idx); let their_layer = lhs.get_from_layer(lower, idx); iter.masks[lower] &= our_layer; - let mut masks = [0; LAYERS]; + let mut masks = [T::ZERO; LAYERS]; masks[lower] = our_layer & !their_layer; BitIter::new(&mut *self, masks, iter.prefix).clear(); *self.layer_mut(lower, idx) &= their_layer; } - let mut masks = [0; LAYERS]; + let mut masks = [T::ZERO; LAYERS]; masks[LAYERS - 1] = self.layer3() & !lhs.layer3(); BitIter::new(&mut *self, masks, [0; LAYERS - 1]).clear(); @@ -52,35 +55,36 @@ where } } -impl<'a, B> BitXorAssign<&'a B> for BitSet +impl<'a, B, T> BitXorAssign<&'a B> for GenericBitSet where - B: BitSetLike, + T: UnsignedInteger, + B: BitSetLike, { fn bitxor_assign(&mut self, lhs: &B) { use iter::State::*; let mut iter = lhs.iter(); while let Some(level) = (1..LAYERS).find(|&level| iter.handle_level(level) == Continue) { let lower = level - 1; - let idx = iter.prefix[lower] as usize >> BITS; + let idx = iter.prefix[lower] as usize >> T::LOG_BITS; if lower == 0 { *self.layer_mut(lower, idx) ^= lhs.get_from_layer(lower, idx); let mut change_bit = |level| { let lower = level - 1; - let h = iter.prefix.get(level).cloned().unwrap_or(0) as usize; - let l = iter.prefix[lower] as usize >> BITS; - let mask = 1 << (l & !h); + let h = iter.prefix.get(level).cloned().unwrap_or(0); + let l = iter.prefix[lower] >> T::LOG_BITS; + let mask = T::ONE << T::from_u32(l & !h); - if self.get_from_layer(lower, l) == 0 { - *self.layer_mut(level, h >> BITS) &= !mask; + if self.get_from_layer(lower, l as usize) == T::ZERO { + *self.layer_mut(level, h as usize >> T::LOG_BITS) &= !mask; } else { - *self.layer_mut(level, h >> BITS) |= mask; + *self.layer_mut(level, h as usize >> T::LOG_BITS) |= mask; } }; change_bit(level); - if iter.masks[level] == 0 { + if iter.masks[level] == T::ZERO { (2..LAYERS).for_each(change_bit); } } @@ -94,23 +98,32 @@ where /// /// [`BitSetLike`]: ../trait.BitSetLike.html #[derive(Debug, Clone)] -pub struct BitSetAnd(pub A, pub B); +pub struct BitSetAnd(pub A, pub B) +where + A: BitSetLike, + B: BitSetLike; + +impl BitSetLike for BitSetAnd +where + A: BitSetLike, + B: BitSetLike, +{ + type Underlying = A::Underlying; -impl BitSetLike for BitSetAnd { #[inline] - fn layer3(&self) -> usize { + fn layer3(&self) -> Self::Underlying { self.0.layer3() & self.1.layer3() } #[inline] - fn layer2(&self, i: usize) -> usize { + fn layer2(&self, i: usize) -> Self::Underlying { self.0.layer2(i) & self.1.layer2(i) } #[inline] - fn layer1(&self, i: usize) -> usize { + fn layer1(&self, i: usize) -> Self::Underlying { self.0.layer1(i) & self.1.layer1(i) } #[inline] - fn layer0(&self, i: usize) -> usize { + fn layer0(&self, i: usize) -> Self::Underlying { self.0.layer0(i) & self.1.layer0(i) } #[inline] @@ -119,7 +132,11 @@ impl BitSetLike for BitSetAnd { } } -impl DrainableBitSet for BitSetAnd { +impl DrainableBitSet for BitSetAnd +where + A: DrainableBitSet, + B: DrainableBitSet, +{ #[inline] fn remove(&mut self, i: Index) -> bool { if self.contains(i) { @@ -138,23 +155,32 @@ impl DrainableBitSet for BitSetAnd /// /// [`BitSetLike`]: ../trait.BitSetLike.html #[derive(Debug, Clone)] -pub struct BitSetOr(pub A, pub B); +pub struct BitSetOr(pub A, pub B) +where + A: BitSetLike, + B: BitSetLike; + +impl BitSetLike for BitSetOr +where + A: BitSetLike, + B: BitSetLike, +{ + type Underlying = A::Underlying; -impl BitSetLike for BitSetOr { #[inline] - fn layer3(&self) -> usize { + fn layer3(&self) -> Self::Underlying { self.0.layer3() | self.1.layer3() } #[inline] - fn layer2(&self, i: usize) -> usize { + fn layer2(&self, i: usize) -> Self::Underlying { self.0.layer2(i) | self.1.layer2(i) } #[inline] - fn layer1(&self, i: usize) -> usize { + fn layer1(&self, i: usize) -> Self::Underlying { self.0.layer1(i) | self.1.layer1(i) } #[inline] - fn layer0(&self, i: usize) -> usize { + fn layer0(&self, i: usize) -> Self::Underlying { self.0.layer0(i) | self.1.layer0(i) } #[inline] @@ -163,7 +189,11 @@ impl BitSetLike for BitSetOr { } } -impl DrainableBitSet for BitSetOr { +impl DrainableBitSet for BitSetOr +where + A: DrainableBitSet, + B: DrainableBitSet, +{ #[inline] fn remove(&mut self, i: Index) -> bool { if self.contains(i) { @@ -181,23 +211,30 @@ impl DrainableBitSet for BitSetOr /// /// [`BitSetLike`]: ../trait.BitSetLike.html #[derive(Debug, Clone)] -pub struct BitSetNot(pub A); +pub struct BitSetNot(pub A) +where + A: BitSetLike; + +impl BitSetLike for BitSetNot +where + A: BitSetLike, +{ + type Underlying = A::Underlying; -impl BitSetLike for BitSetNot { #[inline] - fn layer3(&self) -> usize { - !0 + fn layer3(&self) -> A::Underlying { + !A::Underlying::ZERO } #[inline] - fn layer2(&self, _: usize) -> usize { - !0 + fn layer2(&self, _: usize) -> A::Underlying { + !A::Underlying::ZERO } #[inline] - fn layer1(&self, _: usize) -> usize { - !0 + fn layer1(&self, _: usize) -> A::Underlying { + !A::Underlying::ZERO } #[inline] - fn layer0(&self, i: usize) -> usize { + fn layer0(&self, i: usize) -> A::Underlying { !self.0.layer0(i) } #[inline] @@ -212,11 +249,20 @@ impl BitSetLike for BitSetNot { /// /// [`BitSetLike`]: ../trait.BitSetLike.html #[derive(Debug, Clone)] -pub struct BitSetXor(pub A, pub B); +pub struct BitSetXor(pub A, pub B) +where + A: BitSetLike, + B: BitSetLike; + +impl BitSetLike for BitSetXor +where + A: BitSetLike, + B: BitSetLike, +{ + type Underlying = A::Underlying; -impl BitSetLike for BitSetXor { #[inline] - fn layer3(&self) -> usize { + fn layer3(&self) -> Self::Underlying { let xor = BitSetAnd( BitSetOr(&self.0, &self.1), BitSetNot(BitSetAnd(&self.0, &self.1)), @@ -224,7 +270,7 @@ impl BitSetLike for BitSetXor { xor.layer3() } #[inline] - fn layer2(&self, id: usize) -> usize { + fn layer2(&self, id: usize) -> Self::Underlying { let xor = BitSetAnd( BitSetOr(&self.0, &self.1), BitSetNot(BitSetAnd(&self.0, &self.1)), @@ -232,7 +278,7 @@ impl BitSetLike for BitSetXor { xor.layer2(id) } #[inline] - fn layer1(&self, id: usize) -> usize { + fn layer1(&self, id: usize) -> Self::Underlying { let xor = BitSetAnd( BitSetOr(&self.0, &self.1), BitSetNot(BitSetAnd(&self.0, &self.1)), @@ -240,7 +286,7 @@ impl BitSetLike for BitSetXor { xor.layer1(id) } #[inline] - fn layer0(&self, id: usize) -> usize { + fn layer0(&self, id: usize) -> Self::Underlying { let xor = BitSetAnd( BitSetOr(&self.0, &self.1), BitSetNot(BitSetAnd(&self.0, &self.1)), @@ -260,23 +306,27 @@ impl BitSetLike for BitSetXor { /// `BitSetAll` is a bitset with all bits set. Essentially the same as /// `BitSetNot(BitSet::new())` but without any allocation. #[derive(Debug, Clone)] -pub struct BitSetAll; -impl BitSetLike for BitSetAll { +pub struct BitSetAll { + _phantom: PhantomData, +} +impl BitSetLike for BitSetAll { + type Underlying = T; + #[inline] - fn layer3(&self) -> usize { - usize::MAX + fn layer3(&self) -> Self::Underlying { + T::MAX } #[inline] - fn layer2(&self, _id: usize) -> usize { - usize::MAX + fn layer2(&self, _id: usize) -> Self::Underlying { + T::MAX } #[inline] - fn layer1(&self, _id: usize) -> usize { - usize::MAX + fn layer1(&self, _id: usize) -> Self::Underlying { + T::MAX } #[inline] - fn layer0(&self, _id: usize) -> usize { - usize::MAX + fn layer0(&self, _id: usize) -> Self::Underlying { + T::MAX } #[inline] fn contains(&self, _i: Index) -> bool { @@ -286,8 +336,10 @@ impl BitSetLike for BitSetAll { macro_rules! operator { ( impl < ( $( $lifetime:tt )* ) ( $( $arg:ident ),* ) > for $bitset:ty ) => { - impl<$( $lifetime, )* $( $arg ),*> IntoIterator for $bitset - where $( $arg: BitSetLike ),* + impl<$( $lifetime, )* T, $( $arg ),*> IntoIterator for $bitset + where + T: UnsignedInteger, + $( $arg: BitSetLike ),* { type Item = as Iterator>::Item; type IntoIter = BitIter; @@ -296,8 +348,10 @@ macro_rules! operator { } } - impl<$( $lifetime, )* $( $arg ),*> Not for $bitset - where $( $arg: BitSetLike ),* + impl<$( $lifetime, )* T, $( $arg ),*> Not for $bitset + where + T: UnsignedInteger, + $( $arg: BitSetLike ),* { type Output = BitSetNot; fn not(self) -> Self::Output { @@ -305,32 +359,38 @@ macro_rules! operator { } } - impl<$( $lifetime, )* $( $arg, )* T> BitAnd for $bitset - where T: BitSetLike, - $( $arg: BitSetLike ),* + impl<$( $lifetime, )* T, $( $arg, )* OtherBitSetLike> BitAnd for $bitset + where + T: UnsignedInteger, + OtherBitSetLike: BitSetLike, + $( $arg: BitSetLike ),* { - type Output = BitSetAnd; - fn bitand(self, rhs: T) -> Self::Output { + type Output = BitSetAnd; + fn bitand(self, rhs: OtherBitSetLike) -> Self::Output { BitSetAnd(self, rhs) } } - impl<$( $lifetime, )* $( $arg, )* T> BitOr for $bitset - where T: BitSetLike, - $( $arg: BitSetLike ),* + impl<$( $lifetime, )* T, $( $arg, )* OtherBitSetLike> BitOr for $bitset + where + T: UnsignedInteger, + OtherBitSetLike: BitSetLike, + $( $arg: BitSetLike ),* { - type Output = BitSetOr; - fn bitor(self, rhs: T) -> Self::Output { + type Output = BitSetOr; + fn bitor(self, rhs: OtherBitSetLike) -> Self::Output { BitSetOr(self, rhs) } } - impl<$( $lifetime, )* $( $arg, )* T> BitXor for $bitset - where T: BitSetLike, - $( $arg: BitSetLike ),* + impl<$( $lifetime, )* T, $( $arg, )* OtherBitSetLike> BitXor for $bitset + where + T: UnsignedInteger, + OtherBitSetLike: BitSetLike, + $( $arg: BitSetLike ),* { - type Output = BitSetXor; - fn bitxor(self, rhs: T) -> Self::Output { + type Output = BitSetXor; + fn bitxor(self, rhs: OtherBitSetLike) -> Self::Output { BitSetXor(self, rhs) } } @@ -338,10 +398,8 @@ macro_rules! operator { } } -operator!(impl<()()> for BitSet); -operator!(impl<('a)()> for &'a BitSet); -operator!(impl<()()> for AtomicBitSet); -operator!(impl<('a)()> for &'a AtomicBitSet); +operator!(impl<()()> for GenericBitSet); +operator!(impl<('a)()> for &'a GenericBitSet); operator!(impl<()(A)> for BitSetNot); operator!(impl<('a)(A)> for &'a BitSetNot); operator!(impl<()(A, B)> for BitSetAnd); @@ -350,80 +408,203 @@ operator!(impl<()(A, B)> for BitSetOr); operator!(impl<('a)(A, B)> for &'a BitSetOr); operator!(impl<()(A, B)> for BitSetXor); operator!(impl<('a)(A, B)> for &'a BitSetXor); -operator!(impl<()()> for BitSetAll); -operator!(impl<('a)()> for &'a BitSetAll); - -macro_rules! iterator { - ( $bitset:ident ) => { - impl FromIterator for $bitset { - fn from_iter(iter: T) -> Self - where - T: IntoIterator, - { - let mut bitset = $bitset::new(); - for item in iter { - bitset.add(item); - } - bitset - } +operator!(impl<()()> for BitSetAll); +operator!(impl<('a)()> for &'a BitSetAll); + +impl FromIterator for GenericBitSet { + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let mut bitset = Self::new(); + for item in iter { + bitset.add(item); } + bitset + } +} - impl<'a> FromIterator<&'a Index> for $bitset { - fn from_iter(iter: T) -> Self - where - T: IntoIterator, - { - let mut bitset = $bitset::new(); - for item in iter { - bitset.add(*item); - } - bitset - } +impl<'a, T: UnsignedInteger> FromIterator<&'a Index> for GenericBitSet { + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let mut bitset = Self::new(); + for item in iter { + bitset.add(*item); } + bitset + } +} - impl Extend for $bitset { - fn extend(&mut self, iter: T) - where - T: IntoIterator, - { - for item in iter { - self.add(item); - } - } +impl Extend for GenericBitSet { + fn extend(&mut self, iter: I) + where + I: IntoIterator, + { + for item in iter { + self.add(item); } + } +} - impl<'a> Extend<&'a Index> for $bitset { - fn extend(&mut self, iter: T) - where - T: IntoIterator, - { - for item in iter { - self.add(*item); - } - } +impl<'a, T: UnsignedInteger> Extend<&'a Index> for GenericBitSet { + fn extend(&mut self, iter: I) + where + I: IntoIterator, + { + for item in iter { + self.add(*item); } - }; + } } -iterator!(BitSet); -iterator!(AtomicBitSet); +// All specialized implementations for `AtomicBitSet` + +impl FromIterator for AtomicBitSet { + fn from_iter(iter: T) -> Self + where + T: IntoIterator, + { + let mut bitset = AtomicBitSet::new(); + for item in iter { + bitset.add(item); + } + bitset + } +} +impl<'a> FromIterator<&'a Index> for AtomicBitSet { + fn from_iter(iter: T) -> Self + where + T: IntoIterator, + { + let mut bitset = AtomicBitSet::new(); + for item in iter { + bitset.add(*item); + } + bitset + } +} +impl Extend for AtomicBitSet { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { + for item in iter { + self.add(item); + } + } +} +impl<'a> Extend<&'a Index> for AtomicBitSet { + fn extend(&mut self, iter: T) + where + T: IntoIterator, + { + for item in iter { + self.add(*item); + } + } +} +impl IntoIterator for AtomicBitSet { + type Item = as Iterator>::Item; + type IntoIter = BitIter; + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} +impl<'a> IntoIterator for &'a AtomicBitSet { + type Item = as Iterator>::Item; + type IntoIter = BitIter; + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} +impl Not for AtomicBitSet { + type Output = BitSetNot; + fn not(self) -> Self::Output { + BitSetNot(self) + } +} +impl<'a> Not for &'a AtomicBitSet { + type Output = BitSetNot; + fn not(self) -> Self::Output { + BitSetNot(self) + } +} +impl BitAnd for AtomicBitSet +where + OtherBitSetLike: BitSetLike, +{ + type Output = BitSetAnd; + fn bitand(self, rhs: OtherBitSetLike) -> Self::Output { + BitSetAnd(self, rhs) + } +} +impl<'a, OtherBitSetLike> BitAnd for &'a AtomicBitSet +where + OtherBitSetLike: BitSetLike, +{ + type Output = BitSetAnd; + fn bitand(self, rhs: OtherBitSetLike) -> Self::Output { + BitSetAnd(self, rhs) + } +} +impl BitOr for AtomicBitSet +where + OtherBitSetLike: BitSetLike, +{ + type Output = BitSetOr; + fn bitor(self, rhs: OtherBitSetLike) -> Self::Output { + BitSetOr(self, rhs) + } +} +impl<'a, OtherBitSetLike> BitOr for &'a AtomicBitSet +where + OtherBitSetLike: BitSetLike, +{ + type Output = BitSetOr; + fn bitor(self, rhs: OtherBitSetLike) -> Self::Output { + BitSetOr(self, rhs) + } +} +impl BitXor for AtomicBitSet +where + OtherBitSetLike: BitSetLike, +{ + type Output = BitSetXor; + fn bitxor(self, rhs: OtherBitSetLike) -> Self::Output { + BitSetXor(self, rhs) + } +} +impl<'a, OtherBitSetLike> BitXor for &'a AtomicBitSet +where + OtherBitSetLike: BitSetLike, +{ + type Output = BitSetXor; + fn bitxor(self, rhs: OtherBitSetLike) -> Self::Output { + BitSetXor(self, rhs) + } +} #[cfg(test)] mod tests { - use {BitSet, BitSetLike, BitSetXor, Index}; + extern crate typed_test_gen; + use self::typed_test_gen::test_with; + + use {BitSetLike, BitSetXor, GenericBitSet, Index, UnsignedInteger}; - #[test] - fn or_assign() { + #[test_with(u32, u64, usize)] + fn or_assign() { use std::collections::HashSet; use std::mem::size_of; - let usize_bits = size_of::() as u32 * 8; + let uint_bits = size_of::() as u32 * 8; let n = 10_000; - let f1 = &|n| 7 * usize_bits * n; - let f2 = &|n| 13 * usize_bits * n; + let f1 = &|i| (7 * uint_bits * i) % T::MAX_EID; + let f2 = &|i| (13 * uint_bits * i) % T::MAX_EID; - let mut c1: BitSet = (0..n).map(f1).collect(); - let c2: BitSet = (0..n).map(f2).collect(); + let mut c1: GenericBitSet = (0..n).map(f1).collect(); + let c2: GenericBitSet = (0..n).map(f2).collect(); c1 |= &c2; @@ -432,15 +613,15 @@ mod tests { assert_eq!(c1.iter().collect::>(), &h1 | &h2); } - #[test] - fn or_assign_random() { + #[test_with(u32, u64, usize)] + fn or_assign_random() { use rand::prelude::*; use std::collections::HashSet; let limit = 1_048_576; let mut rng = thread_rng(); - let mut set1 = BitSet::new(); + let mut set1 = GenericBitSet::::new(); let mut check_set1 = HashSet::new(); for _ in 0..(limit / 100) { let index = rng.gen_range(0, limit); @@ -448,7 +629,7 @@ mod tests { check_set1.insert(index); } - let mut set2 = BitSet::new(); + let mut set2 = GenericBitSet::::new(); let mut check_set2 = HashSet::new(); for _ in 0..(limit / 100) { let index = rng.gen_range(0, limit); @@ -471,18 +652,18 @@ mod tests { assert_eq!(hs, set1.iter().collect()); } - #[test] - fn and_assign() { + #[test_with(u32, u64, usize)] + fn and_assign() { use std::collections::HashSet; use std::mem::size_of; - let usize_bits = size_of::() as u32 * 8; + let uint_bits = size_of::() as u32 * 8; let n = 10_000; - let f1 = &|n| 7 * usize_bits * n; - let f2 = &|n| 13 * usize_bits * n; + let f1 = &|n| (7 * uint_bits * n) % T::MAX_EID; + let f2 = &|n| (13 * uint_bits * n) % T::MAX_EID; - let mut c1: BitSet = (0..n).map(f1).collect(); - let c2: BitSet = (0..n).map(f2).collect(); + let mut c1: GenericBitSet = (0..n).map(f1).collect(); + let c2: GenericBitSet = (0..n).map(f2).collect(); c1 &= &c2; @@ -491,53 +672,49 @@ mod tests { assert_eq!(c1.iter().collect::>(), &h1 & &h2); } - #[test] - fn and_assign_specific() { - use util::BITS; - - let mut c1 = BitSet::new(); + #[test_with(u32, u64, usize)] + fn and_assign_specific() { + let mut c1 = GenericBitSet::::new(); c1.add(0); - let common = ((1 << BITS) << BITS) << BITS; + let common = ((1 << T::LOG_BITS) << T::LOG_BITS) << T::LOG_BITS; c1.add(common); - c1.add((((1 << BITS) << BITS) + 1) << BITS); + c1.add((((1 << T::LOG_BITS) << T::LOG_BITS) + 1) << T::LOG_BITS); - let mut c2: BitSet = BitSet::new(); + let mut c2 = GenericBitSet::::new(); c2.add(common); - c2.add((((1 << BITS) << BITS) + 2) << BITS); + c2.add((((1 << T::LOG_BITS) << T::LOG_BITS) + 2) << T::LOG_BITS); c1 &= &c2; assert_eq!(c1.iter().collect::>(), [common]); } - #[test] - fn and_assign_with_modification() { - use util::BITS; - - let mut c1 = BitSet::new(); + #[test_with(u32, u64, usize)] + fn and_assign_with_modification() { + let mut c1 = GenericBitSet::::new(); c1.add(0); - c1.add((1 << BITS) << BITS); + c1.add((1 << T::LOG_BITS) << T::LOG_BITS); - let mut c2: BitSet = BitSet::new(); + let mut c2 = GenericBitSet::::new(); c2.add(0); c1 &= &c2; - let added = ((1 << BITS) + 1) << BITS; + let added = ((1 << T::LOG_BITS) + 1) << T::LOG_BITS; c1.add(added); assert_eq!(c1.iter().collect::>(), [0, added]); } - #[test] - fn and_assign_random() { + #[test_with(u32, u64, usize)] + fn and_assign_random() { use rand::prelude::*; use std::collections::HashSet; let limit = 1_048_576; let mut rng = thread_rng(); - let mut set1 = BitSet::new(); + let mut set1 = GenericBitSet::::new(); let mut check_set1 = HashSet::new(); for _ in 0..(limit / 100) { let index = rng.gen_range(0, limit); @@ -545,7 +722,7 @@ mod tests { check_set1.insert(index); } - let mut set2 = BitSet::new(); + let mut set2 = GenericBitSet::::new(); let mut check_set2 = HashSet::new(); for _ in 0..(limit / 100) { let index = rng.gen_range(0, limit); @@ -568,18 +745,18 @@ mod tests { assert_eq!(hs, set1.iter().collect()); } - #[test] - fn xor_assign() { + #[test_with(u32, u64, usize)] + fn xor_assign() { use std::collections::HashSet; use std::mem::size_of; - let usize_bits = size_of::() as u32 * 8; + let uint_bits = size_of::() as u32 * 8; let n = 10_000; - let f1 = &|n| 7 * usize_bits * n; - let f2 = &|n| 13 * usize_bits * n; + let f1 = &|n| (7 * uint_bits * n) % T::MAX_EID; + let f2 = &|n| (13 * uint_bits * n) % T::MAX_EID; - let mut c1: BitSet = (0..n).map(f1).collect(); - let c2: BitSet = (0..n).map(f2).collect(); + let mut c1: GenericBitSet = (0..n).map(f1).collect(); + let c2: GenericBitSet = (0..n).map(f2).collect(); c1 ^= &c2; let h1: HashSet<_> = (0..n).map(f1).collect(); @@ -587,20 +764,18 @@ mod tests { assert_eq!(c1.iter().collect::>(), &h1 ^ &h2); } - #[test] - fn xor_assign_specific() { - use util::BITS; - - let mut c1 = BitSet::new(); + #[test_with(u32, u64, usize)] + fn xor_assign_specific() { + let mut c1 = GenericBitSet::::new(); c1.add(0); - let common = ((1 << BITS) << BITS) << BITS; + let common = ((1 << T::LOG_BITS) << T::LOG_BITS) << T::LOG_BITS; c1.add(common); - let a = (((1 << BITS) + 1) << BITS) << BITS; + let a = (((1 << T::LOG_BITS) + 1) << T::LOG_BITS) << T::LOG_BITS; c1.add(a); - let mut c2: BitSet = BitSet::new(); + let mut c2 = GenericBitSet::::new(); c2.add(common); - let b = (((1 << BITS) + 2) << BITS) << BITS; + let b = (((1 << T::LOG_BITS) + 2) << T::LOG_BITS) << T::LOG_BITS; c2.add(b); c1 ^= &c2; @@ -608,14 +783,14 @@ mod tests { assert_eq!(c1.iter().collect::>(), [0, a, b]); } - #[test] - fn xor_assign_random() { + #[test_with(u32, u64, usize)] + fn xor_assign_random() { use rand::prelude::*; use std::collections::HashSet; let limit = 1_048_576; let mut rng = thread_rng(); - let mut set1 = BitSet::new(); + let mut set1 = GenericBitSet::::new(); let mut check_set1 = HashSet::new(); for _ in 0..(limit / 100) { let index = rng.gen_range(0, limit); @@ -623,7 +798,7 @@ mod tests { check_set1.insert(index); } - let mut set2 = BitSet::new(); + let mut set2 = GenericBitSet::::new(); let mut check_set2 = HashSet::new(); for _ in 0..(limit / 100) { let index = rng.gen_range(0, limit); @@ -646,9 +821,9 @@ mod tests { assert_eq!(hs, set1.iter().collect()); } - #[test] - fn operators() { - let mut bitset = BitSet::new(); + #[test_with(u32, u64, usize)] + fn operators() { + let mut bitset = GenericBitSet::::new(); bitset.add(1); bitset.add(3); bitset.add(5); @@ -656,7 +831,7 @@ mod tests { bitset.add(200); bitset.add(50001); - let mut other = BitSet::new(); + let mut other = GenericBitSet::::new(); other.add(1); other.add(3); other.add(50000); @@ -695,16 +870,16 @@ mod tests { } } - #[test] - fn xor() { + #[test_with(u32, u64, usize)] + fn xor() { // 0011 - let mut bitset = BitSet::new(); + let mut bitset = GenericBitSet::::new(); bitset.add(2); bitset.add(3); bitset.add(50000); // 0101 - let mut other = BitSet::new(); + let mut other = GenericBitSet::::new(); other.add(1); other.add(3); other.add(50000); diff --git a/src/util.rs b/src/util.rs index b2b294d..93fb12b 100644 --- a/src/util.rs +++ b/src/util.rs @@ -1,44 +1,126 @@ +use std::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXorAssign, Div, Not, Shl, Shr, Sub}; + /// Type used for indexing. pub type Index = u32; -/// Base two log of the number of bits in a usize. -#[cfg(target_pointer_width = "64")] -pub const BITS: usize = 6; -#[cfg(target_pointer_width = "32")] -pub const BITS: usize = 5; +/// helper function to get the base 2 log of a const number +const fn base_2_log() -> usize { + match N { + 32 => 5, + 64 => 6, + _ => unimplemented!(), + } +} + +/// Specifies the interface necessary for a `BitSet` to be built on top of `Self` +pub trait UnsignedInteger: + Sized + + Clone + + Copy + + Default + + std::fmt::Debug + + PartialEq + + Not + + BitAnd + + BitAndAssign + + BitOr + + BitOrAssign + + BitXorAssign + + Shl + + Shr + + Div + + Sub +{ + /// value of zero + const ZERO: Self; + /// value of one + const ONE: Self; + /// all ones + const MAX: Self; + /// Number of bits per integer + const BITS: usize; + /// Base two log of the number of bits. + const LOG_BITS: usize; + /// Maximum amount of bits per bitset. + const MAX_EID: u32 = (2 << (Self::LOG_BITS * LAYERS) - 1) as u32; + /// Layer0 shift (bottom layer, true bitset). + const SHIFT0: usize = 0; + /// Layer1 shift (third layer). + const SHIFT1: usize = Self::SHIFT0 + Self::LOG_BITS; + /// Layer2 shift (second layer). + const SHIFT2: usize = Self::SHIFT1 + Self::LOG_BITS; + /// Top layer shift. + const SHIFT3: usize = Self::SHIFT2 + Self::LOG_BITS; + + /// conversion function from Index type + fn from_u32(val: u32) -> Self; + /// conversion function from u64 + fn from_u64(val: u64) -> Self; + /// conversion to u32 + fn to_u32(self) -> u32; + /// conversion to u64 + fn to_u64(self) -> u64; + /// Returns the number of trailing zeros in the binary representation of self. + fn trailing_zeros(self) -> u32; +} + +macro_rules! from_primitive_uint { + ($type:ident) => { + impl UnsignedInteger for $type { + const ZERO: Self = 0; + const ONE: Self = 1; + const MAX: Self = Self::MAX; + const BITS: usize = Self::BITS as usize; + const LOG_BITS: usize = base_2_log::<{ Self::BITS as usize }>(); + #[inline(always)] + fn from_u32(val: u32) -> Self { + val as Self + } + #[inline(always)] + fn from_u64(val: u64) -> Self { + val as Self + } + #[inline(always)] + fn to_u32(self) -> u32 { + self as u32 + } + #[inline(always)] + fn to_u64(self) -> u64 { + self as u64 + } + #[inline(always)] + fn trailing_zeros(self) -> u32 { + self.trailing_zeros() + } + } + }; +} + +from_primitive_uint!(usize); +from_primitive_uint!(u64); +from_primitive_uint!(u32); + /// Amount of layers in the hierarchical bitset. pub const LAYERS: usize = 4; -pub const MAX: usize = BITS * LAYERS; -/// Maximum amount of bits per bitset. -pub const MAX_EID: usize = 2 << MAX - 1; - -/// Layer0 shift (bottom layer, true bitset). -pub const SHIFT0: usize = 0; -/// Layer1 shift (third layer). -pub const SHIFT1: usize = SHIFT0 + BITS; -/// Layer2 shift (second layer). -pub const SHIFT2: usize = SHIFT1 + BITS; -/// Top layer shift. -pub const SHIFT3: usize = SHIFT2 + BITS; pub trait Row: Sized + Copy { /// Location of the bit in the row. - fn row(self, shift: usize) -> usize; + fn row(self, shift: usize) -> T; /// Index of the row that the bit is in. fn offset(self, shift: usize) -> usize; /// Bitmask of the row the bit is in. #[inline(always)] - fn mask(self, shift: usize) -> usize { - 1usize << self.row(shift) + fn mask(self, shift: usize) -> T { + T::ONE << self.row(shift) } } impl Row for Index { #[inline(always)] - fn row(self, shift: usize) -> usize { - ((self >> shift) as usize) & ((1 << BITS) - 1) + fn row(self, shift: usize) -> T { + T::from_u32(self >> shift) & T::from_u32((1 << T::LOG_BITS) - 1) } #[inline(always)] @@ -51,8 +133,12 @@ impl Row for Index { /// /// Returns them in (Layer0, Layer1, Layer2) order. #[inline] -pub fn offsets(bit: Index) -> (usize, usize, usize) { - (bit.offset(SHIFT1), bit.offset(SHIFT2), bit.offset(SHIFT3)) +pub fn offsets(bit: Index) -> (usize, usize, usize) { + ( + bit.offset(T::SHIFT1), + bit.offset(T::SHIFT2), + bit.offset(T::SHIFT3), + ) } /// Finds the highest bit that splits set bits of the `usize` @@ -72,17 +158,17 @@ pub fn offsets(bit: Index) -> (usize, usize, usize) { // TODO: Can 64/32 bit variants be merged to one implementation? // Seems that this would need integer generics to do. #[cfg(feature = "parallel")] -pub fn average_ones(n: usize) -> Option { - #[cfg(target_pointer_width = "64")] - let average = average_ones_u64(n as u64).map(|n| n as usize); - - #[cfg(target_pointer_width = "32")] - let average = average_ones_u32(n as u32).map(|n| n as usize); +pub fn average_ones(n: T) -> Option { + let average = match T::BITS { + 32 => average_ones_u32(n.to_u32()).map(T::from_u32), + 64 => average_ones_u64(n.to_u64()).map(T::from_u64), + _ => unimplemented!(), + }; average } -#[cfg(all(any(test, target_pointer_width = "32"), feature = "parallel"))] +#[cfg(feature = "parallel")] fn average_ones_u32(n: u32) -> Option { // !0 / ((1 << (1 << n)) | 1) const PAR: [u32; 5] = [!0 / 0x3, !0 / 0x5, !0 / 0x11, !0 / 0x101, !0 / 0x10001]; @@ -126,7 +212,7 @@ fn average_ones_u32(n: u32) -> Option { Some(result - 1) } -#[cfg(all(any(test, target_pointer_width = "64"), feature = "parallel"))] +#[cfg(feature = "parallel")] fn average_ones_u64(n: u64) -> Option { // !0 / ((1 << (1 << n)) | 1) const PAR: [u64; 6] = [