Skip to content

Commit

Permalink
feat: generalize internal type used, to be able to force u32 or `u6…
Browse files Browse the repository at this point in the history
…4` if needed
  • Loading branch information
twiby committed Feb 3, 2024
1 parent 02d85de commit a2c28c3
Show file tree
Hide file tree
Showing 8 changed files with 766 additions and 419 deletions.
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ optional = true

[dev-dependencies]
rand = "0.7"
typed_test_gen = "0.1"

[features]
default = ["parallel"]
Expand Down
42 changes: 29 additions & 13 deletions src/atomic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,12 @@ use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use util::*;
use {BitSetLike, DrainableBitSet};

const SHIFT0: usize = usize::SHIFT0;
const SHIFT1: usize = usize::SHIFT1;
const SHIFT2: usize = usize::SHIFT2;
const SHIFT3: usize = usize::SHIFT3;
const BITS: usize = <usize as UnsignedInteger>::BITS as usize;

/// This is similar to a [`BitSet`] but allows setting of value
/// without unique ownership of the structure
///
Expand Down Expand Up @@ -46,7 +52,7 @@ impl AtomicBitSet {
/// this will panic if the Index is out of range.
#[inline]
pub fn add_atomic(&self, id: Index) -> bool {
let (_, p1, p2) = offsets(id);
let (_, p1, p2) = offsets::<usize>(id);

// While it is tempting to check of the bit was set and exit here if it
// was, this can result in a data race. If this thread and another
Expand All @@ -65,14 +71,19 @@ impl AtomicBitSet {
pub fn add(&mut self, id: Index) -> bool {
use std::sync::atomic::Ordering::Relaxed;

let (_, p1, p2) = offsets(id);
let (_, p1, p2) = offsets::<usize>(id);
if self.layer1[p1].add(id) {
return true;
}

self.layer2[p2].store(self.layer2[p2].load(Relaxed) | id.mask(SHIFT2), Relaxed);
self.layer3
.store(self.layer3.load(Relaxed) | id.mask(SHIFT3), Relaxed);
self.layer2[p2].store(
self.layer2[p2].load(Relaxed) | id.mask::<usize>(SHIFT2),
Relaxed,
);
self.layer3.store(
self.layer3.load(Relaxed) | id.mask::<usize>(SHIFT3),
Relaxed,
);
false
}

Expand All @@ -82,7 +93,7 @@ impl AtomicBitSet {
#[inline]
pub fn remove(&mut self, id: Index) -> bool {
use std::sync::atomic::Ordering::Relaxed;
let (_, p1, p2) = offsets(id);
let (_, p1, p2) = offsets::<usize>(id);

// if the bitmask was set we need to clear
// its bit from layer0 to 3. the layers above only
Expand All @@ -98,13 +109,13 @@ impl AtomicBitSet {
return true;
}

let v = self.layer2[p2].load(Relaxed) & !id.mask(SHIFT2);
let v = self.layer2[p2].load(Relaxed) & !id.mask::<usize>(SHIFT2);
self.layer2[p2].store(v, Relaxed);
if v != 0 {
return true;
}

let v = self.layer3.load(Relaxed) & !id.mask(SHIFT3);
let v = self.layer3.load(Relaxed) & !id.mask::<usize>(SHIFT3);
self.layer3.store(v, Relaxed);
return true;
}
Expand Down Expand Up @@ -151,6 +162,8 @@ impl AtomicBitSet {
}

impl BitSetLike for AtomicBitSet {
type Underlying = usize;

#[inline]
fn layer3(&self) -> usize {
self.layer3.load(Ordering::Relaxed)
Expand Down Expand Up @@ -286,7 +299,7 @@ impl AtomicBlock {
}

fn add(&self, id: Index) -> bool {
let (i, m) = (id.row(SHIFT1), id.mask(SHIFT0));
let (i, m) = (id.row::<usize>(SHIFT1), id.mask::<usize>(SHIFT0));
let old = self.atom.get_or_init()[i].fetch_or(m, Ordering::Relaxed);
self.mask.fetch_or(id.mask(SHIFT1), Ordering::Relaxed);
old & m != 0
Expand All @@ -295,20 +308,23 @@ impl AtomicBlock {
fn contains(&self, id: Index) -> bool {
self.atom
.get()
.map(|layer0| layer0[id.row(SHIFT1)].load(Ordering::Relaxed) & id.mask(SHIFT0) != 0)
.map(|layer0| {
layer0[id.row::<usize>(SHIFT1)].load(Ordering::Relaxed) & id.mask::<usize>(SHIFT0)
!= 0
})
.unwrap_or(false)
}

fn remove(&mut self, id: Index) -> bool {
if let Some(layer0) = self.atom.get_mut() {
let (i, m) = (id.row(SHIFT1), !id.mask(SHIFT0));
let (i, m) = (id.row::<usize>(SHIFT1), !id.mask::<usize>(SHIFT0));
let v = layer0[i].get_mut();
let was_set = *v & id.mask(SHIFT0) == id.mask(SHIFT0);
let was_set = *v & id.mask::<usize>(SHIFT0) == id.mask(SHIFT0);
*v = *v & m;
if *v == 0 {
// no other bits are set
// so unset bit in the next level up
*self.mask.get_mut() &= !id.mask(SHIFT1);
*self.mask.get_mut() &= !id.mask::<usize>(SHIFT1);
}
was_set
} else {
Expand Down
24 changes: 15 additions & 9 deletions src/iter/drain.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
use iter::BitIter;
use util::*;
use DrainableBitSet;
use {BitSetLike, DrainableBitSet};

/// A draining `Iterator` over a [`DrainableBitSet`] structure.
///
/// [`DrainableBitSet`]: ../trait.DrainableBitSet.html
pub struct DrainBitIter<'a, T: 'a> {
pub struct DrainBitIter<'a, T: 'a + BitSetLike> {
iter: BitIter<&'a mut T>,
}

Expand All @@ -14,7 +14,7 @@ impl<'a, T: DrainableBitSet> DrainBitIter<'a, T> {
/// but just [`.drain()`] on a bit set.
///
/// [`.drain()`]: ../trait.DrainableBitSet.html#method.drain
pub fn new(set: &'a mut T, masks: [usize; LAYERS], prefix: [u32; LAYERS - 1]) -> Self {
pub fn new(set: &'a mut T, masks: [T::Underlying; LAYERS], prefix: [u32; LAYERS - 1]) -> Self {
DrainBitIter {
iter: BitIter::new(set, masks, prefix),
}
Expand All @@ -36,10 +36,16 @@ where
}
}

#[test]
fn drain_all() {
use {BitSet, BitSetLike};
let mut bit_set: BitSet = (0..10000).filter(|i| i % 2 == 0).collect();
bit_set.drain().for_each(|_| {});
assert_eq!(0, bit_set.iter().count());
#[cfg(test)]
mod tests {
extern crate typed_test_gen;
use self::typed_test_gen::test_with;
use {BitSetLike, DrainableBitSet, GenericBitSet, UnsignedInteger};

#[test_with(u32, u64, usize)]
fn drain_all<T: UnsignedInteger>() {
let mut bit_set: GenericBitSet<T> = (0..10000).filter(|i| i % 2 == 0).collect();
bit_set.drain().for_each(|_| {});
assert_eq!(0, bit_set.iter().count());
}
}
49 changes: 26 additions & 23 deletions src/iter/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use util::*;
use {BitSet, BitSetLike};
use {BitSetLike, GenericBitSet};

pub use self::drain::DrainBitIter;

Expand All @@ -14,18 +14,18 @@ mod parallel;
///
/// [`BitSetLike`]: ../trait.BitSetLike.html
#[derive(Debug, Clone)]
pub struct BitIter<T> {
pub struct BitIter<T: BitSetLike> {
pub(crate) set: T,
pub(crate) masks: [usize; LAYERS],
pub(crate) masks: [T::Underlying; LAYERS],
pub(crate) prefix: [u32; LAYERS - 1],
}

impl<T> BitIter<T> {
impl<T: BitSetLike> BitIter<T> {
/// Creates a new `BitIter`. You usually don't call this function
/// but just [`.iter()`] on a bit set.
///
/// [`.iter()`]: ../trait.BitSetLike.html#method.iter
pub fn new(set: T, masks: [usize; LAYERS], prefix: [u32; LAYERS - 1]) -> Self {
pub fn new(set: T, masks: [T::Underlying; LAYERS], prefix: [u32; LAYERS - 1]) -> Self {
BitIter {
set: set,
masks: masks,
Expand All @@ -41,16 +41,16 @@ impl<T: BitSetLike> BitIter<T> {
}
}

impl<'a> BitIter<&'a mut BitSet> {
impl<'a, T: UnsignedInteger> BitIter<&'a mut GenericBitSet<T>> {
/// Clears the rest of the bitset starting from the next inner layer.
pub(crate) fn clear(&mut self) {
use self::State::Continue;
while let Some(level) = (1..LAYERS).find(|&level| self.handle_level(level) == Continue) {
let lower = level - 1;
let idx = (self.prefix[lower] >> BITS) as usize;
*self.set.layer_mut(lower, idx) = 0;
let idx = (self.prefix[lower] >> T::BITS) as usize;
*self.set.layer_mut(lower, idx) = T::ZERO;
if level == LAYERS - 1 {
self.set.layer3 &= !((2 << idx) - 1);
self.set.layer3 &= T::from_u32(!((2usize << idx) - 1) as u32);
}
}
}
Expand Down Expand Up @@ -88,13 +88,13 @@ where
impl<T: BitSetLike> BitIter<T> {
pub(crate) fn handle_level(&mut self, level: usize) -> State {
use self::State::*;
if self.masks[level] == 0 {
if self.masks[level] == T::Underlying::ZERO {
Empty
} else {
// Take the first bit that isn't zero
let first_bit = self.masks[level].trailing_zeros();
// Remove it from the mask
self.masks[level] &= !(1 << first_bit);
self.masks[level] &= !(T::Underlying::ONE << T::Underlying::from_u32(first_bit));
// Calculate the index of it
let idx = self.prefix.get(level).cloned().unwrap_or(0) | first_bit;
if level == 0 {
Expand All @@ -103,7 +103,7 @@ impl<T: BitSetLike> BitIter<T> {
} else {
// Take the corresponding `usize` from the layer below
self.masks[level - 1] = self.set.get_from_layer(level - 1, idx as usize);
self.prefix[level - 1] = idx << BITS;
self.prefix[level - 1] = idx << T::Underlying::BITS;
Continue
}
}
Expand All @@ -112,34 +112,37 @@ impl<T: BitSetLike> BitIter<T> {

#[cfg(test)]
mod tests {
use {BitSet, BitSetLike};
extern crate typed_test_gen;
use self::typed_test_gen::test_with;

use {BitSetLike, GenericBitSet, UnsignedInteger};

#[test]
fn iterator_clear_empties() {
#[test_with(u32, u64, usize)]
fn iterator_clear_empties<T: UnsignedInteger>() {
use rand::prelude::*;

let mut set = BitSet::new();
let mut set = GenericBitSet::<T>::new();
let mut rng = thread_rng();
let limit = 1_048_576;
for _ in 0..(limit / 10) {
set.add(rng.gen_range(0, limit));
}
(&mut set).iter().clear();
assert_eq!(0, set.layer3);
assert_eq!(T::ZERO, set.layer3);
for &i in &set.layer2 {
assert_eq!(0, i);
assert_eq!(T::ZERO, i);
}
for &i in &set.layer1 {
assert_eq!(0, i);
assert_eq!(T::ZERO, i);
}
for &i in &set.layer0 {
assert_eq!(0, i);
assert_eq!(T::ZERO, i);
}
}

#[test]
fn iterator_clone() {
let mut set = BitSet::new();
#[test_with(u32, u64, usize)]
fn iterator_clone<T: UnsignedInteger>() {
let mut set = GenericBitSet::<T>::new();
set.add(1);
set.add(3);
let iter = set.iter().skip(1);
Expand Down
Loading

0 comments on commit a2c28c3

Please sign in to comment.