hibitset/
lib.rs

1//! # hibitset
2//!
3//! Provides hierarchical bit sets,
4//! which allow very fast iteration
5//! on sparse data structures.
6//!
7//! ## What it does
8//!
9//! A `BitSet` may be considered analogous to a `HashSet<u32>`. It
10//! tracks whether or not certain indices exist within it. Its
11//! implementation is very different, however.
12//!
13//! At its root, a `BitSet` relies on an array of bits, which express
14//! whether or not indices exist. This provides the functionality to
15//! `add( )` and `remove( )` indices.
16//!
17//! This array is referred to as Layer 0. Above it, there is another
18//! layer: Layer 1. Layer 1 acts as a 'summary' of Layer 0. It contains
19//! one bit for each `usize` bits of Layer 0. If any bit in that `usize`
20//! of Layer 0 is set, the bit in Layer 1 will be set.
21//!
22//! There are, in total, four layers. Layers 1 through 3 are each a
23//! summary of the layer immediately below them.
24//!
25//! ```no_compile
26//! Example, with an imaginary 4-bit usize:
27//!
28//! Layer 3: 1------------------------------------------------ ...
29//! Layer 2: 1------------------ 1------------------ 0-------- ...
30//! Layer 1: 1--- 0--- 0--- 0--- 1--- 0--- 1--- 0--- 0--- 0--- ...
31//! Layer 0: 0010 0000 0000 0000 0011 0000 1111 0000 0000 0000 ...
32//! ```
33//!
34//! This method makes operations that operate over the whole `BitSet`,
35//! such as unions, intersections, and iteration, very fast (because if
36//! any bit in any summary layer is zero, an entire range of bits
37//! below it can be skipped.)
38//!
39//! However, there is a maximum on index size. The top layer (Layer 3)
40//! of the BitSet is a single `usize` long. This makes the maximum index
41//! `usize**4` (`1,048,576` for a 32-bit `usize`, `16,777,216` for a
42//! 64-bit `usize`). Attempting to add indices larger than that will cause
43//! the `BitSet` to panic.
44//!
45
46#![deny(missing_docs)]
47
48#[cfg(test)]
49extern crate rand;
50#[cfg(feature = "parallel")]
51extern crate rayon;
52
53mod atomic;
54mod iter;
55mod ops;
56mod util;
57
58pub use atomic::AtomicBitSet;
59pub use iter::{BitIter, DrainBitIter};
60#[cfg(feature = "parallel")]
61pub use iter::{BitParIter, BitProducer};
62pub use ops::{BitSetAll, BitSetAnd, BitSetNot, BitSetOr, BitSetXor};
63
64use util::*;
65
66/// A `BitSet` is a simple set designed to track which indices are placed
67/// into it.
68///
69/// Note, a `BitSet` is limited by design to only `usize**4` indices.
70/// Adding beyond this limit will cause the `BitSet` to panic.
71#[derive(Clone, Debug, Default)]
72pub struct BitSet {
73    layer3: usize,
74    layer2: Vec<usize>,
75    layer1: Vec<usize>,
76    layer0: Vec<usize>,
77}
78
79impl BitSet {
80    /// Creates an empty `BitSet`.
81    pub fn new() -> BitSet {
82        Default::default()
83    }
84
85    #[inline]
86    fn valid_range(max: Index) {
87        if (MAX_EID as u32) < max {
88            panic!("Expected index to be less then {}, found {}", MAX_EID, max);
89        }
90    }
91
92    /// Creates an empty `BitSet`, preallocated for up to `max` indices.
93    pub fn with_capacity(max: Index) -> BitSet {
94        Self::valid_range(max);
95        let mut value = BitSet::new();
96        value.extend(max);
97        value
98    }
99
100    #[inline(never)]
101    fn extend(&mut self, id: Index) {
102        Self::valid_range(id);
103        let (p0, p1, p2) = offsets(id);
104
105        Self::fill_up(&mut self.layer2, p2);
106        Self::fill_up(&mut self.layer1, p1);
107        Self::fill_up(&mut self.layer0, p0);
108    }
109
110    fn fill_up(vec: &mut Vec<usize>, upper_index: usize) {
111        if vec.len() <= upper_index {
112            vec.resize(upper_index + 1, 0);
113        }
114    }
115
116    /// This is used to set the levels in the hierarchy
117    /// when the lowest layer was set from 0.
118    #[inline(never)]
119    fn add_slow(&mut self, id: Index) {
120        let (_, p1, p2) = offsets(id);
121        self.layer1[p1] |= id.mask(SHIFT1);
122        self.layer2[p2] |= id.mask(SHIFT2);
123        self.layer3 |= id.mask(SHIFT3);
124    }
125
126    /// Adds `id` to the `BitSet`. Returns `true` if the value was
127    /// already in the set.
128    #[inline]
129    pub fn add(&mut self, id: Index) -> bool {
130        let (p0, mask) = (id.offset(SHIFT1), id.mask(SHIFT0));
131
132        if p0 >= self.layer0.len() {
133            self.extend(id);
134        }
135
136        if self.layer0[p0] & mask != 0 {
137            return true;
138        }
139
140        // we need to set the bit on every layer to indicate
141        // that the value can be found here.
142        let old = self.layer0[p0];
143        self.layer0[p0] |= mask;
144        if old == 0 {
145            self.add_slow(id);
146        }
147        false
148    }
149
150    fn layer_mut(&mut self, level: usize, idx: usize) -> &mut usize {
151        match level {
152            0 => {
153                Self::fill_up(&mut self.layer0, idx);
154                &mut self.layer0[idx]
155            }
156            1 => {
157                Self::fill_up(&mut self.layer1, idx);
158                &mut self.layer1[idx]
159            }
160            2 => {
161                Self::fill_up(&mut self.layer2, idx);
162                &mut self.layer2[idx]
163            }
164            3 => &mut self.layer3,
165            _ => panic!("Invalid layer: {}", level),
166        }
167    }
168
169    /// Removes `id` from the set, returns `true` if the value
170    /// was removed, and `false` if the value was not set
171    /// to begin with.
172    #[inline]
173    pub fn remove(&mut self, id: Index) -> bool {
174        let (p0, p1, p2) = offsets(id);
175
176        if p0 >= self.layer0.len() {
177            return false;
178        }
179
180        if self.layer0[p0] & id.mask(SHIFT0) == 0 {
181            return false;
182        }
183
184        // if the bitmask was set we need to clear
185        // its bit from layer0 to 3. the layers abover only
186        // should be cleared if the bit cleared was the last bit
187        // in its set
188        self.layer0[p0] &= !id.mask(SHIFT0);
189        if self.layer0[p0] != 0 {
190            return true;
191        }
192
193        self.layer1[p1] &= !id.mask(SHIFT1);
194        if self.layer1[p1] != 0 {
195            return true;
196        }
197
198        self.layer2[p2] &= !id.mask(SHIFT2);
199        if self.layer2[p2] != 0 {
200            return true;
201        }
202
203        self.layer3 &= !id.mask(SHIFT3);
204        return true;
205    }
206
207    /// Returns `true` if `id` is in the set.
208    #[inline]
209    pub fn contains(&self, id: Index) -> bool {
210        let p0 = id.offset(SHIFT1);
211        p0 < self.layer0.len() && (self.layer0[p0] & id.mask(SHIFT0)) != 0
212    }
213
214    /// Returns `true` if all ids in `other` are contained in this set
215    #[inline]
216    pub fn contains_set(&self, other: &BitSet) -> bool {
217        for id in other.iter() {
218            if !self.contains(id) {
219                return false;
220            }
221        }
222        true
223    }
224
225    /// Completely wipes out the bit set.
226    pub fn clear(&mut self) {
227        self.layer0.clear();
228        self.layer1.clear();
229        self.layer2.clear();
230        self.layer3 = 0;
231    }
232
233    /// How many bits are in a `usize`.
234    ///
235    /// This value can be trivially determined. It is provided here as a constant for clarity.
236    ///
237    /// # Example
238    ///
239    /// ```
240    /// use hibitset::BitSet;
241    /// assert_eq!(BitSet::BITS_PER_USIZE, std::mem::size_of::<usize>()*8);
242    /// ```
243    #[cfg(target_pointer_width = "32")]
244    pub const BITS_PER_USIZE: usize = 32;
245
246    /// How many bits are in a `usize`.
247    ///
248    /// This value can be trivially determined. It is provided here as a constant for clarity.
249    ///
250    /// # Example
251    ///
252    /// ```
253    /// use hibitset::BitSet;
254    /// assert_eq!(BitSet::BITS_PER_USIZE, std::mem::size_of::<usize>()*8);
255    /// ```
256    #[cfg(target_pointer_width = "64")]
257    pub const BITS_PER_USIZE: usize = 64;
258
259    /// Returns the bottom layer of the bitset as a slice. Each bit in this slice refers to a single
260    /// `Index`.
261    ///
262    /// The slice's length will be at least the length needed to reflect all the `1`s in the bitset,
263    /// but is not otherwise guaranteed. Consider it to be an implementation detail.
264    ///
265    /// # Example
266    ///
267    /// ```
268    /// use hibitset::BitSet;
269    ///
270    /// let index: u32 = 12345;
271    ///
272    /// let mut bitset = BitSet::new();
273    /// bitset.add(index);
274    ///
275    /// // layer 0 is 1:1 with Indexes, so we expect that bit in the slice to be set
276    /// let slice = bitset.layer0_as_slice();
277    /// let bit_index = index as usize;
278    ///
279    /// // map that bit index to a usize in the slice and a bit within that usize
280    /// let slice_index = bit_index / BitSet::BITS_PER_USIZE;
281    /// let bit_at_index = bit_index % BitSet::BITS_PER_USIZE;
282    ///
283    /// assert_eq!(slice[slice_index], 1 << bit_at_index);
284    /// ```
285    pub fn layer0_as_slice(&self) -> &[usize] {
286        self.layer0.as_slice()
287    }
288
289    /// How many `Index`es are described by as single layer 1 bit, intended for use with
290    /// `BitSet::layer1_as_slice()`.
291    ///
292    /// `BitSet`s are defined in terms of `usize`s summarizing `usize`s, so this value can be
293    /// trivially determined. It is provided here as a constant for clarity.
294    ///
295    /// # Example
296    ///
297    /// ```
298    /// use hibitset::BitSet;
299    /// assert_eq!(BitSet::LAYER1_GRANULARITY, BitSet::BITS_PER_USIZE);
300    /// ```
301    pub const LAYER1_GRANULARITY: usize = Self::BITS_PER_USIZE;
302
303    /// Returns the second layer of the bitset as a slice. Each bit in this slice summarizes a
304    /// corresponding `usize` from `layer0`. (If `usize` is 64 bits, bit 0 will be set if any
305    /// `Index`es 0-63 are set, bit 1 will be set if any `Index`es 64-127 are set, etc.)
306    /// `BitSet::LAYER1_GRANULARITY` reflects how many indexes are summarized per layer 1 bit.
307    ///
308    /// The slice's length is not guaranteed, except that it will be at least the length needed to
309    /// reflect all the `1`s in the bitset.
310    ///
311    /// # Example
312    ///
313    /// ```
314    /// use hibitset::BitSet;
315    ///
316    /// let index: u32 = 12345;
317    ///
318    /// let mut bitset = BitSet::new();
319    /// bitset.add(index);
320    ///
321    /// // layer 1 summarizes multiple indexes per bit, so divide appropriately
322    /// let slice = bitset.layer1_as_slice();
323    /// let bit_index = index as usize / BitSet::LAYER1_GRANULARITY;
324    ///
325    /// // map that bit index to a usize in the slice and a bit within that usize
326    /// let slice_index = bit_index / BitSet::BITS_PER_USIZE;
327    /// let bit_at_index = bit_index % BitSet::BITS_PER_USIZE;
328    ///
329    /// assert_eq!(slice[slice_index], 1 << bit_at_index);
330    /// ```
331    pub fn layer1_as_slice(&self) -> &[usize] {
332        self.layer1.as_slice()
333    }
334
335    /// How many `Index`es are described by as single layer 2 bit, intended for use with
336    /// `BitSet::layer2_as_slice()`.
337    ///
338    /// `BitSet`s are defined in terms of `usize`s summarizing `usize`s, so this value can be
339    /// trivially determined. It is provided here as a constant for clarity.
340    ///
341    /// # Example
342    ///
343    /// ```
344    /// use hibitset::BitSet;
345    /// assert_eq!(BitSet::LAYER2_GRANULARITY, BitSet::LAYER1_GRANULARITY * BitSet::BITS_PER_USIZE);
346    /// ```
347    pub const LAYER2_GRANULARITY: usize = Self::LAYER1_GRANULARITY * Self::BITS_PER_USIZE;
348
349    /// Returns the third layer of the bitset as a slice. Each bit in this slice summarizes a
350    /// corresponding `usize` from `layer1`. If `usize` is 64 bits, bit 0 will be set if any
351    /// `Index`es 0-4095 are set, bit 1 will be set if any `Index`es 4096-8191 are set, etc.
352    ///
353    /// The slice's length is not guaranteed, except that it will be at least the length needed to
354    /// reflect all the `1`s in the bitset.
355    ///
356    /// # Example
357    ///
358    /// ```
359    /// use hibitset::BitSet;
360    ///
361    /// let index: u32 = 12345;
362    ///
363    /// let mut bitset = BitSet::new();
364    /// bitset.add(index);
365    ///
366    /// // layer 2 summarizes multiple indexes per bit, so divide appropriately
367    /// let slice = bitset.layer2_as_slice();
368    /// let bit_index = index as usize / BitSet::LAYER2_GRANULARITY;
369    ///
370    /// // map that bit index to a usize in the slice and a bit within that usize
371    /// let slice_index = bit_index / BitSet::BITS_PER_USIZE;
372    /// let bit_at_index = bit_index % BitSet::BITS_PER_USIZE;
373    ///
374    /// assert_eq!(slice[slice_index], 1 << bit_at_index);
375    /// ```
376    pub fn layer2_as_slice(&self) -> &[usize] {
377        self.layer2.as_slice()
378    }
379}
380
381/// A generic interface for [`BitSetLike`]-like types.
382///
383/// Every `BitSetLike` is hierarchical, meaning that there
384/// are multiple levels that branch out in a tree like structure.
385///
386/// Layer0 each bit represents one Index of the set
387/// Layer1 each bit represents one `usize` of Layer0, and will be
388/// set only if the word below it is not zero.
389/// Layer2 has the same arrangement but with Layer1, and Layer3 with Layer2.
390///
391/// This arrangement allows for rapid jumps across the key-space.
392///
393/// [`BitSetLike`]: ../trait.BitSetLike.html
394pub trait BitSetLike {
395    /// Gets the `usize` corresponding to layer and index.
396    ///
397    /// The `layer` should be in the range [0, 3]
398    fn get_from_layer(&self, layer: usize, idx: usize) -> usize {
399        match layer {
400            0 => self.layer0(idx),
401            1 => self.layer1(idx),
402            2 => self.layer2(idx),
403            3 => self.layer3(),
404            _ => panic!("Invalid layer: {}", layer),
405        }
406    }
407
408    /// Returns true if this `BitSetLike` contains nothing, and false otherwise.
409    fn is_empty(&self) -> bool {
410        self.layer3() == 0
411    }
412
413    /// Return a `usize` where each bit represents if any word in layer2
414    /// has been set.
415    fn layer3(&self) -> usize;
416
417    /// Return the `usize` from the array of usizes that indicates if any
418    /// bit has been set in layer1
419    fn layer2(&self, i: usize) -> usize;
420
421    /// Return the `usize` from the array of usizes that indicates if any
422    /// bit has been set in layer0
423    fn layer1(&self, i: usize) -> usize;
424
425    /// Return a `usize` that maps to the direct 1:1 association with
426    /// each index of the set
427    fn layer0(&self, i: usize) -> usize;
428
429    /// Allows checking if set bit is contained in the bit set.
430    fn contains(&self, i: Index) -> bool;
431
432    /// Create an iterator that will scan over the keyspace
433    fn iter(self) -> BitIter<Self>
434    where
435        Self: Sized,
436    {
437        let layer3 = self.layer3();
438
439        BitIter::new(self, [0, 0, 0, layer3], [0; LAYERS - 1])
440    }
441
442    /// Create a parallel iterator that will scan over the keyspace
443    #[cfg(feature = "parallel")]
444    fn par_iter(self) -> BitParIter<Self>
445    where
446        Self: Sized,
447    {
448        BitParIter::new(self)
449    }
450}
451
452/// A extension to the [`BitSetLike`] trait which allows draining it.
453pub trait DrainableBitSet: BitSetLike {
454    /// Removes bit from the bit set.
455    ///
456    /// Returns `true` if removal happened and `false` otherwise.
457    fn remove(&mut self, i: Index) -> bool;
458
459    /// Create a draining iterator that will scan over the keyspace and clears it while doing so.
460    fn drain<'a>(&'a mut self) -> DrainBitIter<'a, Self>
461    where
462        Self: Sized,
463    {
464        let layer3 = self.layer3();
465
466        DrainBitIter::new(self, [0, 0, 0, layer3], [0; LAYERS - 1])
467    }
468}
469
470impl<'a, T> BitSetLike for &'a T
471where
472    T: BitSetLike + ?Sized,
473{
474    #[inline]
475    fn layer3(&self) -> usize {
476        (*self).layer3()
477    }
478
479    #[inline]
480    fn layer2(&self, i: usize) -> usize {
481        (*self).layer2(i)
482    }
483
484    #[inline]
485    fn layer1(&self, i: usize) -> usize {
486        (*self).layer1(i)
487    }
488
489    #[inline]
490    fn layer0(&self, i: usize) -> usize {
491        (*self).layer0(i)
492    }
493
494    #[inline]
495    fn contains(&self, i: Index) -> bool {
496        (*self).contains(i)
497    }
498}
499
500impl<'a, T> BitSetLike for &'a mut T
501where
502    T: BitSetLike + ?Sized,
503{
504    #[inline]
505    fn layer3(&self) -> usize {
506        (**self).layer3()
507    }
508
509    #[inline]
510    fn layer2(&self, i: usize) -> usize {
511        (**self).layer2(i)
512    }
513
514    #[inline]
515    fn layer1(&self, i: usize) -> usize {
516        (**self).layer1(i)
517    }
518
519    #[inline]
520    fn layer0(&self, i: usize) -> usize {
521        (**self).layer0(i)
522    }
523
524    #[inline]
525    fn contains(&self, i: Index) -> bool {
526        (**self).contains(i)
527    }
528}
529
530impl<'a, T> DrainableBitSet for &'a mut T
531where
532    T: DrainableBitSet,
533{
534    #[inline]
535    fn remove(&mut self, i: Index) -> bool {
536        (**self).remove(i)
537    }
538}
539
540impl BitSetLike for BitSet {
541    #[inline]
542    fn layer3(&self) -> usize {
543        self.layer3
544    }
545
546    #[inline]
547    fn layer2(&self, i: usize) -> usize {
548        self.layer2.get(i).map(|&x| x).unwrap_or(0)
549    }
550
551    #[inline]
552    fn layer1(&self, i: usize) -> usize {
553        self.layer1.get(i).map(|&x| x).unwrap_or(0)
554    }
555
556    #[inline]
557    fn layer0(&self, i: usize) -> usize {
558        self.layer0.get(i).map(|&x| x).unwrap_or(0)
559    }
560
561    #[inline]
562    fn contains(&self, i: Index) -> bool {
563        self.contains(i)
564    }
565}
566
567impl DrainableBitSet for BitSet {
568    #[inline]
569    fn remove(&mut self, i: Index) -> bool {
570        self.remove(i)
571    }
572}
573
574impl PartialEq for BitSet {
575    #[inline]
576    fn eq(&self, rhv: &BitSet) -> bool {
577        if self.layer3 != rhv.layer3 {
578            return false;
579        }
580        if self.layer2.len() != rhv.layer2.len()
581            || self.layer1.len() != rhv.layer1.len()
582            || self.layer0.len() != rhv.layer0.len()
583        {
584            return false;
585        }
586
587        for i in 0..self.layer2.len() {
588            if self.layer2(i) != rhv.layer2(i) {
589                return false;
590            }
591        }
592        for i in 0..self.layer1.len() {
593            if self.layer1(i) != rhv.layer1(i) {
594                return false;
595            }
596        }
597        for i in 0..self.layer0.len() {
598            if self.layer0(i) != rhv.layer0(i) {
599                return false;
600            }
601        }
602
603        true
604    }
605}
606impl Eq for BitSet {}
607
608#[cfg(test)]
609mod tests {
610    use super::{BitSet, BitSetAnd, BitSetLike, BitSetNot};
611
612    #[test]
613    fn insert() {
614        let mut c = BitSet::new();
615        for i in 0..1_000 {
616            assert!(!c.add(i));
617            assert!(c.add(i));
618        }
619
620        for i in 0..1_000 {
621            assert!(c.contains(i));
622        }
623    }
624
625    #[test]
626    fn insert_100k() {
627        let mut c = BitSet::new();
628        for i in 0..100_000 {
629            assert!(!c.add(i));
630            assert!(c.add(i));
631        }
632
633        for i in 0..100_000 {
634            assert!(c.contains(i));
635        }
636    }
637    #[test]
638    fn remove() {
639        let mut c = BitSet::new();
640        for i in 0..1_000 {
641            assert!(!c.add(i));
642        }
643
644        for i in 0..1_000 {
645            assert!(c.contains(i));
646            assert!(c.remove(i));
647            assert!(!c.contains(i));
648            assert!(!c.remove(i));
649        }
650    }
651
652    #[test]
653    fn iter() {
654        let mut c = BitSet::new();
655        for i in 0..100_000 {
656            c.add(i);
657        }
658
659        let mut count = 0;
660        for (idx, i) in c.iter().enumerate() {
661            count += 1;
662            assert_eq!(idx, i as usize);
663        }
664        assert_eq!(count, 100_000);
665    }
666
667    #[test]
668    fn iter_odd_even() {
669        let mut odd = BitSet::new();
670        let mut even = BitSet::new();
671        for i in 0..100_000 {
672            if i % 2 == 1 {
673                odd.add(i);
674            } else {
675                even.add(i);
676            }
677        }
678
679        assert_eq!((&odd).iter().count(), 50_000);
680        assert_eq!((&even).iter().count(), 50_000);
681        assert_eq!(BitSetAnd(&odd, &even).iter().count(), 0);
682    }
683
684    #[test]
685    fn iter_random_add() {
686        use rand::prelude::*;
687
688        let mut set = BitSet::new();
689        let mut rng = thread_rng();
690        let limit = 1_048_576;
691        let mut added = 0;
692        for _ in 0..(limit / 10) {
693            let index = rng.gen_range(0, limit);
694            if !set.add(index) {
695                added += 1;
696            }
697        }
698        assert_eq!(set.iter().count(), added as usize);
699    }
700
701    #[test]
702    fn iter_clusters() {
703        let mut set = BitSet::new();
704        for x in 0..8 {
705            let x = (x * 3) << (::BITS * 2); // scale to the last slot
706            for y in 0..8 {
707                let y = (y * 3) << (::BITS);
708                for z in 0..8 {
709                    let z = z * 2;
710                    set.add(x + y + z);
711                }
712            }
713        }
714        assert_eq!(set.iter().count(), 8usize.pow(3));
715    }
716
717    #[test]
718    fn not() {
719        let mut c = BitSet::new();
720        for i in 0..10_000 {
721            if i % 2 == 1 {
722                c.add(i);
723            }
724        }
725        let d = BitSetNot(c);
726        for (idx, i) in d.iter().take(5_000).enumerate() {
727            assert_eq!(idx * 2, i as usize);
728        }
729    }
730}
731
732#[cfg(all(test, feature = "parallel"))]
733mod test_parallel {
734    use super::{BitSet, BitSetAnd, BitSetLike};
735    use rayon::iter::ParallelIterator;
736
737    #[test]
738    fn par_iter_one() {
739        let step = 5000;
740        let tests = 1_048_576 / step;
741        for n in 0..tests {
742            let n = n * step;
743            let mut set = BitSet::new();
744            set.add(n);
745            assert_eq!(set.par_iter().count(), 1);
746        }
747        let mut set = BitSet::new();
748        set.add(1_048_576 - 1);
749        assert_eq!(set.par_iter().count(), 1);
750    }
751
752    #[test]
753    fn par_iter_random_add() {
754        use rand::prelude::*;
755        use std::collections::HashSet;
756        use std::sync::{Arc, Mutex};
757
758        let mut set = BitSet::new();
759        let mut check_set = HashSet::new();
760        let mut rng = thread_rng();
761        let limit = 1_048_576;
762        for _ in 0..(limit / 10) {
763            let index = rng.gen_range(0, limit);
764            set.add(index);
765            check_set.insert(index);
766        }
767        let check_set = Arc::new(Mutex::new(check_set));
768        let missing_set = Arc::new(Mutex::new(HashSet::new()));
769        set.par_iter().for_each(|n| {
770            let check_set = check_set.clone();
771            let missing_set = missing_set.clone();
772            let mut check = check_set.lock().unwrap();
773            if !check.remove(&n) {
774                let mut missing = missing_set.lock().unwrap();
775                missing.insert(n);
776            }
777        });
778        let check_set = check_set.lock().unwrap();
779        let missing_set = missing_set.lock().unwrap();
780        if !check_set.is_empty() && !missing_set.is_empty() {
781            panic!(
782                "There were values that didn't get iterated: {:?}
783            There were values that got iterated, but that shouldn't be: {:?}",
784                *check_set, *missing_set
785            );
786        }
787        if !check_set.is_empty() {
788            panic!(
789                "There were values that didn't get iterated: {:?}",
790                *check_set
791            );
792        }
793        if !missing_set.is_empty() {
794            panic!(
795                "There were values that got iterated, but that shouldn't be: {:?}",
796                *missing_set
797            );
798        }
799    }
800
801    #[test]
802    fn par_iter_odd_even() {
803        let mut odd = BitSet::new();
804        let mut even = BitSet::new();
805        for i in 0..100_000 {
806            if i % 2 == 1 {
807                odd.add(i);
808            } else {
809                even.add(i);
810            }
811        }
812
813        assert_eq!((&odd).par_iter().count(), 50_000);
814        assert_eq!((&even).par_iter().count(), 50_000);
815        assert_eq!(BitSetAnd(&odd, &even).par_iter().count(), 0);
816    }
817
818    #[test]
819    fn par_iter_clusters() {
820        use std::collections::HashSet;
821        use std::sync::{Arc, Mutex};
822        let mut set = BitSet::new();
823        let mut check_set = HashSet::new();
824        for x in 0..8 {
825            let x = (x * 3) << (::BITS * 2); // scale to the last slot
826            for y in 0..8 {
827                let y = (y * 3) << (::BITS);
828                for z in 0..8 {
829                    let z = z * 2;
830                    let index = x + y + z;
831                    set.add(index);
832                    check_set.insert(index);
833                }
834            }
835        }
836        let check_set = Arc::new(Mutex::new(check_set));
837        let missing_set = Arc::new(Mutex::new(HashSet::new()));
838        set.par_iter().for_each(|n| {
839            let check_set = check_set.clone();
840            let missing_set = missing_set.clone();
841            let mut check = check_set.lock().unwrap();
842            if !check.remove(&n) {
843                let mut missing = missing_set.lock().unwrap();
844                missing.insert(n);
845            }
846        });
847        let check_set = check_set.lock().unwrap();
848        let missing_set = missing_set.lock().unwrap();
849        if !check_set.is_empty() && !missing_set.is_empty() {
850            panic!(
851                "There were values that didn't get iterated: {:?}
852            There were values that got iterated, but that shouldn't be: {:?}",
853                *check_set, *missing_set
854            );
855        }
856        if !check_set.is_empty() {
857            panic!(
858                "There were values that didn't get iterated: {:?}",
859                *check_set
860            );
861        }
862        if !missing_set.is_empty() {
863            panic!(
864                "There were values that got iterated, but that shouldn't be: {:?}",
865                *missing_set
866            );
867        }
868    }
869}