Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Generalize integer type #66

Merged
merged 1 commit into from
Feb 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ optional = true

[dev-dependencies]
rand = "0.7"
typed_test_gen = "0.1"

[features]
default = ["parallel"]
Expand Down
62 changes: 39 additions & 23 deletions src/atomic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,12 @@ use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use util::*;
use {BitSetLike, DrainableBitSet};

const SHIFT0: usize = usize::SHIFT0;
const SHIFT1: usize = usize::SHIFT1;
const SHIFT2: usize = usize::SHIFT2;
const SHIFT3: usize = usize::SHIFT3;
const LOG_BITS: usize = <usize as UnsignedInteger>::LOG_BITS as usize;

/// This is similar to a [`BitSet`] but allows setting of value
/// without unique ownership of the structure
///
Expand Down Expand Up @@ -46,7 +52,7 @@ impl AtomicBitSet {
/// this will panic if the Index is out of range.
#[inline]
pub fn add_atomic(&self, id: Index) -> bool {
let (_, p1, p2) = offsets(id);
let (_, p1, p2) = offsets::<usize>(id);

// While it is tempting to check of the bit was set and exit here if it
// was, this can result in a data race. If this thread and another
Expand All @@ -65,14 +71,19 @@ impl AtomicBitSet {
pub fn add(&mut self, id: Index) -> bool {
use std::sync::atomic::Ordering::Relaxed;

let (_, p1, p2) = offsets(id);
let (_, p1, p2) = offsets::<usize>(id);
if self.layer1[p1].add(id) {
return true;
}

self.layer2[p2].store(self.layer2[p2].load(Relaxed) | id.mask(SHIFT2), Relaxed);
self.layer3
.store(self.layer3.load(Relaxed) | id.mask(SHIFT3), Relaxed);
self.layer2[p2].store(
self.layer2[p2].load(Relaxed) | id.mask::<usize>(SHIFT2),
Relaxed,
);
self.layer3.store(
self.layer3.load(Relaxed) | id.mask::<usize>(SHIFT3),
Relaxed,
);
false
}

Expand All @@ -82,7 +93,7 @@ impl AtomicBitSet {
#[inline]
pub fn remove(&mut self, id: Index) -> bool {
use std::sync::atomic::Ordering::Relaxed;
let (_, p1, p2) = offsets(id);
let (_, p1, p2) = offsets::<usize>(id);

// if the bitmask was set we need to clear
// its bit from layer0 to 3. the layers above only
Expand All @@ -98,13 +109,13 @@ impl AtomicBitSet {
return true;
}

let v = self.layer2[p2].load(Relaxed) & !id.mask(SHIFT2);
let v = self.layer2[p2].load(Relaxed) & !id.mask::<usize>(SHIFT2);
self.layer2[p2].store(v, Relaxed);
if v != 0 {
return true;
}

let v = self.layer3.load(Relaxed) & !id.mask(SHIFT3);
let v = self.layer3.load(Relaxed) & !id.mask::<usize>(SHIFT3);
self.layer3.store(v, Relaxed);
return true;
}
Expand Down Expand Up @@ -141,7 +152,7 @@ impl AtomicBitSet {
if m3 != 0 {
let bit = m3.trailing_zeros() as usize;
m3 &= !(1 << bit);
offset = bit << BITS;
offset = bit << LOG_BITS;
m2 = self.layer2[bit].swap(0, Ordering::Relaxed);
continue;
}
Expand All @@ -151,6 +162,8 @@ impl AtomicBitSet {
}

impl BitSetLike for AtomicBitSet {
type Underlying = usize;

#[inline]
fn layer3(&self) -> usize {
self.layer3.load(Ordering::Relaxed)
Expand All @@ -165,7 +178,7 @@ impl BitSetLike for AtomicBitSet {
}
#[inline]
fn layer0(&self, i: usize) -> usize {
let (o1, o0) = (i >> BITS, i & ((1 << BITS) - 1));
let (o1, o0) = (i >> LOG_BITS, i & ((1 << LOG_BITS) - 1));
self.layer1[o1]
.atom
.get()
Expand All @@ -191,19 +204,19 @@ impl Default for AtomicBitSet {
layer3: Default::default(),
layer2: repeat(0)
.map(|_| AtomicUsize::new(0))
.take(1 << BITS)
.take(1 << LOG_BITS)
.collect(),
layer1: repeat(0)
.map(|_| AtomicBlock::new())
.take(1 << (2 * BITS))
.take(1 << (2 * LOG_BITS))
.collect(),
}
}
}

struct OnceAtom {
inner: AtomicPtr<[AtomicUsize; 1 << BITS]>,
marker: PhantomData<Option<Box<[AtomicUsize; 1 << BITS]>>>,
inner: AtomicPtr<[AtomicUsize; 1 << LOG_BITS]>,
marker: PhantomData<Option<Box<[AtomicUsize; 1 << LOG_BITS]>>>,
}

impl Drop for OnceAtom {
Expand All @@ -225,11 +238,11 @@ impl OnceAtom {
}
}

fn get_or_init(&self) -> &[AtomicUsize; 1 << BITS] {
fn get_or_init(&self) -> &[AtomicUsize; 1 << LOG_BITS] {
let current_ptr = self.inner.load(Ordering::Acquire);
let ptr = if current_ptr.is_null() {
const ZERO: AtomicUsize = AtomicUsize::new(0);
let new_ptr = Box::into_raw(Box::new([ZERO; 1 << BITS]));
let new_ptr = Box::into_raw(Box::new([ZERO; 1 << LOG_BITS]));
if let Err(existing_ptr) = self.inner.compare_exchange(
ptr::null_mut(),
new_ptr,
Expand All @@ -256,15 +269,15 @@ impl OnceAtom {
unsafe { &*ptr }
}

fn get(&self) -> Option<&[AtomicUsize; 1 << BITS]> {
fn get(&self) -> Option<&[AtomicUsize; 1 << LOG_BITS]> {
let ptr = self.inner.load(Ordering::Acquire);
// SAFETY: If it is not null, we created this pointer from
// `Box::into_raw` and only use it to create immutable references
// (unless we have exclusive access to self)
unsafe { ptr.as_ref() }
}

fn get_mut(&mut self) -> Option<&mut [AtomicUsize; 1 << BITS]> {
fn get_mut(&mut self) -> Option<&mut [AtomicUsize; 1 << LOG_BITS]> {
let ptr = self.inner.get_mut();
// SAFETY: If this is not null, we created this pointer from
// `Box::into_raw` and we have an exclusive borrow of self.
Expand All @@ -286,7 +299,7 @@ impl AtomicBlock {
}

fn add(&self, id: Index) -> bool {
let (i, m) = (id.row(SHIFT1), id.mask(SHIFT0));
let (i, m) = (id.row::<usize>(SHIFT1), id.mask::<usize>(SHIFT0));
let old = self.atom.get_or_init()[i].fetch_or(m, Ordering::Relaxed);
self.mask.fetch_or(id.mask(SHIFT1), Ordering::Relaxed);
old & m != 0
Expand All @@ -295,20 +308,23 @@ impl AtomicBlock {
fn contains(&self, id: Index) -> bool {
self.atom
.get()
.map(|layer0| layer0[id.row(SHIFT1)].load(Ordering::Relaxed) & id.mask(SHIFT0) != 0)
.map(|layer0| {
layer0[id.row::<usize>(SHIFT1)].load(Ordering::Relaxed) & id.mask::<usize>(SHIFT0)
!= 0
})
.unwrap_or(false)
}

fn remove(&mut self, id: Index) -> bool {
if let Some(layer0) = self.atom.get_mut() {
let (i, m) = (id.row(SHIFT1), !id.mask(SHIFT0));
let (i, m) = (id.row::<usize>(SHIFT1), !id.mask::<usize>(SHIFT0));
let v = layer0[i].get_mut();
let was_set = *v & id.mask(SHIFT0) == id.mask(SHIFT0);
let was_set = *v & id.mask::<usize>(SHIFT0) == id.mask(SHIFT0);
*v = *v & m;
if *v == 0 {
// no other bits are set
// so unset bit in the next level up
*self.mask.get_mut() &= !id.mask(SHIFT1);
*self.mask.get_mut() &= !id.mask::<usize>(SHIFT1);
}
was_set
} else {
Expand Down
24 changes: 15 additions & 9 deletions src/iter/drain.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
use iter::BitIter;
use util::*;
use DrainableBitSet;
use {BitSetLike, DrainableBitSet};

/// A draining `Iterator` over a [`DrainableBitSet`] structure.
///
/// [`DrainableBitSet`]: ../trait.DrainableBitSet.html
pub struct DrainBitIter<'a, T: 'a> {
pub struct DrainBitIter<'a, T: 'a + BitSetLike> {
iter: BitIter<&'a mut T>,
}

Expand All @@ -14,7 +14,7 @@ impl<'a, T: DrainableBitSet> DrainBitIter<'a, T> {
/// but just [`.drain()`] on a bit set.
///
/// [`.drain()`]: ../trait.DrainableBitSet.html#method.drain
pub fn new(set: &'a mut T, masks: [usize; LAYERS], prefix: [u32; LAYERS - 1]) -> Self {
pub fn new(set: &'a mut T, masks: [T::Underlying; LAYERS], prefix: [u32; LAYERS - 1]) -> Self {
DrainBitIter {
iter: BitIter::new(set, masks, prefix),
}
Expand All @@ -36,10 +36,16 @@ where
}
}

#[test]
fn drain_all() {
use {BitSet, BitSetLike};
let mut bit_set: BitSet = (0..10000).filter(|i| i % 2 == 0).collect();
bit_set.drain().for_each(|_| {});
assert_eq!(0, bit_set.iter().count());
#[cfg(test)]
mod tests {
extern crate typed_test_gen;
use self::typed_test_gen::test_with;
use {BitSetLike, DrainableBitSet, GenericBitSet, UnsignedInteger};

#[test_with(u32, u64, usize)]
fn drain_all<T: UnsignedInteger>() {
let mut bit_set: GenericBitSet<T> = (0..10000).filter(|i| i % 2 == 0).collect();
bit_set.drain().for_each(|_| {});
assert_eq!(0, bit_set.iter().count());
}
}
49 changes: 26 additions & 23 deletions src/iter/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use util::*;
use {BitSet, BitSetLike};
use {BitSetLike, GenericBitSet};

pub use self::drain::DrainBitIter;

Expand All @@ -14,18 +14,18 @@ mod parallel;
///
/// [`BitSetLike`]: ../trait.BitSetLike.html
#[derive(Debug, Clone)]
pub struct BitIter<T> {
pub struct BitIter<T: BitSetLike> {
pub(crate) set: T,
pub(crate) masks: [usize; LAYERS],
pub(crate) masks: [T::Underlying; LAYERS],
pub(crate) prefix: [u32; LAYERS - 1],
}

impl<T> BitIter<T> {
impl<T: BitSetLike> BitIter<T> {
/// Creates a new `BitIter`. You usually don't call this function
/// but just [`.iter()`] on a bit set.
///
/// [`.iter()`]: ../trait.BitSetLike.html#method.iter
pub fn new(set: T, masks: [usize; LAYERS], prefix: [u32; LAYERS - 1]) -> Self {
pub fn new(set: T, masks: [T::Underlying; LAYERS], prefix: [u32; LAYERS - 1]) -> Self {
BitIter {
set: set,
masks: masks,
Expand All @@ -41,16 +41,16 @@ impl<T: BitSetLike> BitIter<T> {
}
}

impl<'a> BitIter<&'a mut BitSet> {
impl<'a, T: UnsignedInteger> BitIter<&'a mut GenericBitSet<T>> {
/// Clears the rest of the bitset starting from the next inner layer.
pub(crate) fn clear(&mut self) {
use self::State::Continue;
while let Some(level) = (1..LAYERS).find(|&level| self.handle_level(level) == Continue) {
let lower = level - 1;
let idx = (self.prefix[lower] >> BITS) as usize;
*self.set.layer_mut(lower, idx) = 0;
let idx = (self.prefix[lower] >> T::LOG_BITS) as usize;
*self.set.layer_mut(lower, idx) = T::ZERO;
if level == LAYERS - 1 {
self.set.layer3 &= !((2 << idx) - 1);
self.set.layer3 &= T::from_u64(!((2usize << idx) - 1) as u64);
}
}
}
Expand Down Expand Up @@ -88,13 +88,13 @@ where
impl<T: BitSetLike> BitIter<T> {
pub(crate) fn handle_level(&mut self, level: usize) -> State {
use self::State::*;
if self.masks[level] == 0 {
if self.masks[level] == T::Underlying::ZERO {
Empty
} else {
// Take the first bit that isn't zero
let first_bit = self.masks[level].trailing_zeros();
// Remove it from the mask
self.masks[level] &= !(1 << first_bit);
self.masks[level] &= !(T::Underlying::ONE << T::Underlying::from_u32(first_bit));
// Calculate the index of it
let idx = self.prefix.get(level).cloned().unwrap_or(0) | first_bit;
if level == 0 {
Expand All @@ -103,7 +103,7 @@ impl<T: BitSetLike> BitIter<T> {
} else {
// Take the corresponding `usize` from the layer below
self.masks[level - 1] = self.set.get_from_layer(level - 1, idx as usize);
self.prefix[level - 1] = idx << BITS;
self.prefix[level - 1] = idx << T::Underlying::LOG_BITS;
Continue
}
}
Expand All @@ -112,34 +112,37 @@ impl<T: BitSetLike> BitIter<T> {

#[cfg(test)]
mod tests {
use {BitSet, BitSetLike};
extern crate typed_test_gen;
use self::typed_test_gen::test_with;

use {BitSetLike, GenericBitSet, UnsignedInteger};

#[test]
fn iterator_clear_empties() {
#[test_with(u32, u64, usize)]
fn iterator_clear_empties<T: UnsignedInteger>() {
use rand::prelude::*;

let mut set = BitSet::new();
let mut set = GenericBitSet::<T>::new();
let mut rng = thread_rng();
let limit = 1_048_576;
for _ in 0..(limit / 10) {
set.add(rng.gen_range(0, limit));
}
(&mut set).iter().clear();
assert_eq!(0, set.layer3);
assert_eq!(T::ZERO, set.layer3);
for &i in &set.layer2 {
assert_eq!(0, i);
assert_eq!(T::ZERO, i);
}
for &i in &set.layer1 {
assert_eq!(0, i);
assert_eq!(T::ZERO, i);
}
for &i in &set.layer0 {
assert_eq!(0, i);
assert_eq!(T::ZERO, i);
}
}

#[test]
fn iterator_clone() {
let mut set = BitSet::new();
#[test_with(u32, u64, usize)]
fn iterator_clone<T: UnsignedInteger>() {
let mut set = GenericBitSet::<T>::new();
set.add(1);
set.add(3);
let iter = set.iter().skip(1);
Expand Down
Loading
Loading