RPM build fix (reverted CI changes which will need to be un-reverted or made conditional) and vendor Rust dependencies to make builds much faster in any CI system.

This commit is contained in:
Adam Ierymenko
2022-06-08 07:32:16 -04:00
parent 373ca30269
commit d5ca4e5f52
12611 changed files with 2898014 additions and 284 deletions

View File

@@ -0,0 +1,4 @@
#[cfg(feature = "rayon")]
pub(crate) mod rayon;
#[cfg(feature = "serde")]
mod serde;

View File

@@ -0,0 +1,26 @@
use alloc::collections::LinkedList;
use alloc::vec::Vec;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
/// Helper for collecting parallel iterators to an intermediary
pub(super) fn collect<I: IntoParallelIterator>(iter: I) -> (LinkedList<Vec<I::Item>>, usize) {
let list = iter
.into_par_iter()
.fold(Vec::new, |mut vec, elem| {
vec.push(elem);
vec
})
.map(|vec| {
let mut list = LinkedList::new();
list.push_back(vec);
list
})
.reduce(LinkedList::new, |mut list1, mut list2| {
list1.append(&mut list2);
list1
});
let len = list.iter().map(Vec::len).sum();
(list, len)
}

View File

@@ -0,0 +1,734 @@
//! Rayon extensions for `HashMap`.
use super::raw::{RawIntoParIter, RawParDrain, RawParIter};
use crate::hash_map::HashMap;
use crate::raw::{Allocator, Global};
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::marker::PhantomData;
use rayon::iter::plumbing::UnindexedConsumer;
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
/// Parallel iterator over shared references to entries in a map.
///
/// This iterator is created by the [`par_iter`] method on [`HashMap`]
/// (provided by the [`IntoParallelRefIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter`]: /hashbrown/struct.HashMap.html#method.par_iter
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html
pub struct ParIter<'a, K, V> {
inner: RawParIter<(K, V)>,
marker: PhantomData<(&'a K, &'a V)>,
}
impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> {
type Item = (&'a K, &'a V);
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner
.map(|x| unsafe {
let r = x.as_ref();
(&r.0, &r.1)
})
.drive_unindexed(consumer)
}
}
impl<K, V> Clone for ParIter<'_, K, V> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
marker: PhantomData,
}
}
}
impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug> fmt::Debug for ParIter<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let iter = unsafe { self.inner.iter() }.map(|x| unsafe {
let r = x.as_ref();
(&r.0, &r.1)
});
f.debug_list().entries(iter).finish()
}
}
/// Parallel iterator over shared references to keys in a map.
///
/// This iterator is created by the [`par_keys`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_keys`]: /hashbrown/struct.HashMap.html#method.par_keys
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParKeys<'a, K, V> {
inner: RawParIter<(K, V)>,
marker: PhantomData<(&'a K, &'a V)>,
}
impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> {
type Item = &'a K;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner
.map(|x| unsafe { &x.as_ref().0 })
.drive_unindexed(consumer)
}
}
impl<K, V> Clone for ParKeys<'_, K, V> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
marker: PhantomData,
}
}
}
impl<K: fmt::Debug + Eq + Hash, V> fmt::Debug for ParKeys<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let iter = unsafe { self.inner.iter() }.map(|x| unsafe { &x.as_ref().0 });
f.debug_list().entries(iter).finish()
}
}
/// Parallel iterator over shared references to values in a map.
///
/// This iterator is created by the [`par_values`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_values`]: /hashbrown/struct.HashMap.html#method.par_values
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParValues<'a, K, V> {
inner: RawParIter<(K, V)>,
marker: PhantomData<(&'a K, &'a V)>,
}
impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> {
type Item = &'a V;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner
.map(|x| unsafe { &x.as_ref().1 })
.drive_unindexed(consumer)
}
}
impl<K, V> Clone for ParValues<'_, K, V> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
marker: PhantomData,
}
}
}
impl<K: Eq + Hash, V: fmt::Debug> fmt::Debug for ParValues<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let iter = unsafe { self.inner.iter() }.map(|x| unsafe { &x.as_ref().1 });
f.debug_list().entries(iter).finish()
}
}
/// Parallel iterator over mutable references to entries in a map.
///
/// This iterator is created by the [`par_iter_mut`] method on [`HashMap`]
/// (provided by the [`IntoParallelRefMutIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter_mut`]: /hashbrown/struct.HashMap.html#method.par_iter_mut
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html
pub struct ParIterMut<'a, K, V> {
inner: RawParIter<(K, V)>,
marker: PhantomData<(&'a K, &'a mut V)>,
}
impl<'a, K: Sync, V: Send> ParallelIterator for ParIterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner
.map(|x| unsafe {
let r = x.as_mut();
(&r.0, &mut r.1)
})
.drive_unindexed(consumer)
}
}
impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug> fmt::Debug for ParIterMut<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParIter {
inner: self.inner.clone(),
marker: PhantomData,
}
.fmt(f)
}
}
/// Parallel iterator over mutable references to values in a map.
///
/// This iterator is created by the [`par_values_mut`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_values_mut`]: /hashbrown/struct.HashMap.html#method.par_values_mut
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParValuesMut<'a, K, V> {
inner: RawParIter<(K, V)>,
marker: PhantomData<(&'a K, &'a mut V)>,
}
impl<'a, K: Sync, V: Send> ParallelIterator for ParValuesMut<'a, K, V> {
type Item = &'a mut V;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner
.map(|x| unsafe { &mut x.as_mut().1 })
.drive_unindexed(consumer)
}
}
impl<K: Eq + Hash, V: fmt::Debug> fmt::Debug for ParValuesMut<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParValues {
inner: self.inner.clone(),
marker: PhantomData,
}
.fmt(f)
}
}
/// Parallel iterator over entries of a consumed map.
///
/// This iterator is created by the [`into_par_iter`] method on [`HashMap`]
/// (provided by the [`IntoParallelIterator`] trait).
/// See its documentation for more.
///
/// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html
pub struct IntoParIter<K, V, A: Allocator + Clone = Global> {
inner: RawIntoParIter<(K, V), A>,
}
impl<K: Send, V: Send, A: Allocator + Clone + Send> ParallelIterator for IntoParIter<K, V, A> {
type Item = (K, V);
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.drive_unindexed(consumer)
}
}
impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator + Clone> fmt::Debug
for IntoParIter<K, V, A>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParIter {
inner: unsafe { self.inner.par_iter() },
marker: PhantomData,
}
.fmt(f)
}
}
/// Parallel draining iterator over entries of a map.
///
/// This iterator is created by the [`par_drain`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParDrain<'a, K, V, A: Allocator + Clone = Global> {
inner: RawParDrain<'a, (K, V), A>,
}
impl<K: Send, V: Send, A: Allocator + Clone + Sync> ParallelIterator for ParDrain<'_, K, V, A> {
type Item = (K, V);
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.drive_unindexed(consumer)
}
}
impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator + Clone> fmt::Debug
for ParDrain<'_, K, V, A>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParIter {
inner: unsafe { self.inner.par_iter() },
marker: PhantomData,
}
.fmt(f)
}
}
impl<K: Sync, V: Sync, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_keys(&self) -> ParKeys<'_, K, V> {
ParKeys {
inner: unsafe { self.table.par_iter() },
marker: PhantomData,
}
}
/// Visits (potentially in parallel) immutably borrowed values in an arbitrary order.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_values(&self) -> ParValues<'_, K, V> {
ParValues {
inner: unsafe { self.table.par_iter() },
marker: PhantomData,
}
}
}
impl<K: Send, V: Send, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// Visits (potentially in parallel) mutably borrowed values in an arbitrary order.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> {
ParValuesMut {
inner: unsafe { self.table.par_iter() },
marker: PhantomData,
}
}
/// Consumes (potentially in parallel) all values in an arbitrary order,
/// while preserving the map's allocated memory for reuse.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_drain(&mut self) -> ParDrain<'_, K, V, A> {
ParDrain {
inner: self.table.par_drain(),
}
}
}
impl<K, V, S, A> HashMap<K, V, S, A>
where
K: Eq + Hash + Sync,
V: PartialEq + Sync,
S: BuildHasher + Sync,
A: Allocator + Clone + Sync,
{
/// Returns `true` if the map is equal to another,
/// i.e. both maps contain the same keys mapped to the same values.
///
/// This method runs in a potentially parallel fashion.
pub fn par_eq(&self, other: &Self) -> bool {
self.len() == other.len()
&& self
.into_par_iter()
.all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
}
}
impl<K: Send, V: Send, S, A: Allocator + Clone + Send> IntoParallelIterator
for HashMap<K, V, S, A>
{
type Item = (K, V);
type Iter = IntoParIter<K, V, A>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
IntoParIter {
inner: self.table.into_par_iter(),
}
}
}
impl<'a, K: Sync, V: Sync, S, A: Allocator + Clone> IntoParallelIterator
for &'a HashMap<K, V, S, A>
{
type Item = (&'a K, &'a V);
type Iter = ParIter<'a, K, V>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
ParIter {
inner: unsafe { self.table.par_iter() },
marker: PhantomData,
}
}
}
impl<'a, K: Sync, V: Send, S, A: Allocator + Clone> IntoParallelIterator
for &'a mut HashMap<K, V, S, A>
{
type Item = (&'a K, &'a mut V);
type Iter = ParIterMut<'a, K, V>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
ParIterMut {
inner: unsafe { self.table.par_iter() },
marker: PhantomData,
}
}
}
/// Collect (key, value) pairs from a parallel iterator into a
/// hashmap. If multiple pairs correspond to the same key, then the
/// ones produced earlier in the parallel iterator will be
/// overwritten, just as with a sequential iterator.
impl<K, V, S> FromParallelIterator<(K, V)> for HashMap<K, V, S, Global>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher + Default,
{
fn from_par_iter<P>(par_iter: P) -> Self
where
P: IntoParallelIterator<Item = (K, V)>,
{
let mut map = HashMap::default();
map.par_extend(par_iter);
map
}
}
/// Extend a hash map with items from a parallel iterator.
impl<K, V, S, A> ParallelExtend<(K, V)> for HashMap<K, V, S, A>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher,
A: Allocator + Clone,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
extend(self, par_iter);
}
}
/// Extend a hash map with copied items from a parallel iterator.
impl<'a, K, V, S, A> ParallelExtend<(&'a K, &'a V)> for HashMap<K, V, S, A>
where
K: Copy + Eq + Hash + Sync,
V: Copy + Sync,
S: BuildHasher,
A: Allocator + Clone,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (&'a K, &'a V)>,
{
extend(self, par_iter);
}
}
// This is equal to the normal `HashMap` -- no custom advantage.
fn extend<K, V, S, A, I>(map: &mut HashMap<K, V, S, A>, par_iter: I)
where
K: Eq + Hash,
S: BuildHasher,
I: IntoParallelIterator,
A: Allocator + Clone,
HashMap<K, V, S, A>: Extend<I::Item>,
{
let (list, len) = super::helpers::collect(par_iter);
// Keys may be already present or show multiple times in the iterator.
// Reserve the entire length if the map is empty.
// Otherwise reserve half the length (rounded up), so the map
// will only resize twice in the worst case.
let reserve = if map.is_empty() { len } else { (len + 1) / 2 };
map.reserve(reserve);
for vec in list {
map.extend(vec);
}
}
#[cfg(test)]
mod test_par_map {
use alloc::vec::Vec;
use core::hash::{Hash, Hasher};
use core::sync::atomic::{AtomicUsize, Ordering};
use rayon::prelude::*;
use crate::hash_map::HashMap;
struct Dropable<'a> {
k: usize,
counter: &'a AtomicUsize,
}
impl Dropable<'_> {
fn new(k: usize, counter: &AtomicUsize) -> Dropable<'_> {
counter.fetch_add(1, Ordering::Relaxed);
Dropable { k, counter }
}
}
impl Drop for Dropable<'_> {
fn drop(&mut self) {
self.counter.fetch_sub(1, Ordering::Relaxed);
}
}
impl Clone for Dropable<'_> {
fn clone(&self) -> Self {
Dropable::new(self.k, self.counter)
}
}
impl Hash for Dropable<'_> {
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
self.k.hash(state)
}
}
impl PartialEq for Dropable<'_> {
fn eq(&self, other: &Self) -> bool {
self.k == other.k
}
}
impl Eq for Dropable<'_> {}
#[test]
fn test_into_iter_drops() {
let key = AtomicUsize::new(0);
let value = AtomicUsize::new(0);
let hm = {
let mut hm = HashMap::new();
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
for i in 0..100 {
let d1 = Dropable::new(i, &key);
let d2 = Dropable::new(i + 100, &value);
hm.insert(d1, d2);
}
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
hm
};
// By the way, ensure that cloning doesn't screw up the dropping.
drop(hm.clone());
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// Ensure that dropping the iterator does not leak anything.
drop(hm.clone().into_par_iter());
{
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// retain only half
let _v: Vec<_> = hm
.into_par_iter()
.filter(|&(ref key, _)| key.k < 50)
.collect();
assert_eq!(key.load(Ordering::Relaxed), 50);
assert_eq!(value.load(Ordering::Relaxed), 50);
};
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
}
#[test]
fn test_drain_drops() {
let key = AtomicUsize::new(0);
let value = AtomicUsize::new(0);
let mut hm = {
let mut hm = HashMap::new();
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
for i in 0..100 {
let d1 = Dropable::new(i, &key);
let d2 = Dropable::new(i + 100, &value);
hm.insert(d1, d2);
}
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
hm
};
// By the way, ensure that cloning doesn't screw up the dropping.
drop(hm.clone());
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// Ensure that dropping the drain iterator does not leak anything.
drop(hm.clone().par_drain());
{
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// retain only half
let _v: Vec<_> = hm.drain().filter(|&(ref key, _)| key.k < 50).collect();
assert!(hm.is_empty());
assert_eq!(key.load(Ordering::Relaxed), 50);
assert_eq!(value.load(Ordering::Relaxed), 50);
};
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
}
#[test]
fn test_empty_iter() {
let mut m: HashMap<isize, bool> = HashMap::new();
assert_eq!(m.par_drain().count(), 0);
assert_eq!(m.par_keys().count(), 0);
assert_eq!(m.par_values().count(), 0);
assert_eq!(m.par_values_mut().count(), 0);
assert_eq!(m.par_iter().count(), 0);
assert_eq!(m.par_iter_mut().count(), 0);
assert_eq!(m.len(), 0);
assert!(m.is_empty());
assert_eq!(m.into_par_iter().count(), 0);
}
#[test]
fn test_iterate() {
let mut m = HashMap::with_capacity(4);
for i in 0..32 {
assert!(m.insert(i, i * 2).is_none());
}
assert_eq!(m.len(), 32);
let observed = AtomicUsize::new(0);
m.par_iter().for_each(|(k, v)| {
assert_eq!(*v, *k * 2);
observed.fetch_or(1 << *k, Ordering::Relaxed);
});
assert_eq!(observed.into_inner(), 0xFFFF_FFFF);
}
#[test]
fn test_keys() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_par_iter().collect();
let keys: Vec<_> = map.par_keys().cloned().collect();
assert_eq!(keys.len(), 3);
assert!(keys.contains(&1));
assert!(keys.contains(&2));
assert!(keys.contains(&3));
}
#[test]
fn test_values() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_par_iter().collect();
let values: Vec<_> = map.par_values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&'a'));
assert!(values.contains(&'b'));
assert!(values.contains(&'c'));
}
#[test]
fn test_values_mut() {
let vec = vec![(1, 1), (2, 2), (3, 3)];
let mut map: HashMap<_, _> = vec.into_par_iter().collect();
map.par_values_mut().for_each(|value| *value = (*value) * 2);
let values: Vec<_> = map.par_values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&2));
assert!(values.contains(&4));
assert!(values.contains(&6));
}
#[test]
fn test_eq() {
let mut m1 = HashMap::new();
m1.insert(1, 2);
m1.insert(2, 3);
m1.insert(3, 4);
let mut m2 = HashMap::new();
m2.insert(1, 2);
m2.insert(2, 3);
assert!(!m1.par_eq(&m2));
m2.insert(3, 4);
assert!(m1.par_eq(&m2));
}
#[test]
fn test_from_iter() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let map: HashMap<_, _> = xs.par_iter().cloned().collect();
for &(k, v) in &xs {
assert_eq!(map.get(&k), Some(&v));
}
}
#[test]
fn test_extend_ref() {
let mut a = HashMap::new();
a.insert(1, "one");
let mut b = HashMap::new();
b.insert(2, "two");
b.insert(3, "three");
a.par_extend(&b);
assert_eq!(a.len(), 3);
assert_eq!(a[&1], "one");
assert_eq!(a[&2], "two");
assert_eq!(a[&3], "three");
}
}

View File

@@ -0,0 +1,4 @@
mod helpers;
pub(crate) mod map;
pub(crate) mod raw;
pub(crate) mod set;

View File

@@ -0,0 +1,229 @@
use crate::raw::Bucket;
use crate::raw::{Allocator, Global, RawIter, RawIterRange, RawTable};
use crate::scopeguard::guard;
use alloc::alloc::dealloc;
use core::marker::PhantomData;
use core::mem;
use core::ptr::NonNull;
use rayon::iter::{
plumbing::{self, Folder, UnindexedConsumer, UnindexedProducer},
ParallelIterator,
};
/// Parallel iterator which returns a raw pointer to every full bucket in the table.
pub struct RawParIter<T> {
iter: RawIterRange<T>,
}
impl<T> RawParIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
pub(super) unsafe fn iter(&self) -> RawIterRange<T> {
self.iter.clone()
}
}
impl<T> Clone for RawParIter<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Self {
iter: self.iter.clone(),
}
}
}
impl<T> From<RawIter<T>> for RawParIter<T> {
fn from(it: RawIter<T>) -> Self {
RawParIter { iter: it.iter }
}
}
impl<T> ParallelIterator for RawParIter<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = ParIterProducer { iter: self.iter };
plumbing::bridge_unindexed(producer, consumer)
}
}
/// Producer which returns a `Bucket<T>` for every element.
struct ParIterProducer<T> {
iter: RawIterRange<T>,
}
impl<T> UnindexedProducer for ParIterProducer<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn split(self) -> (Self, Option<Self>) {
let (left, right) = self.iter.split();
let left = ParIterProducer { iter: left };
let right = right.map(|right| ParIterProducer { iter: right });
(left, right)
}
#[cfg_attr(feature = "inline-more", inline)]
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume_iter(self.iter)
}
}
/// Parallel iterator which consumes a table and returns elements.
pub struct RawIntoParIter<T, A: Allocator + Clone = Global> {
table: RawTable<T, A>,
}
impl<T, A: Allocator + Clone> RawIntoParIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub(super) unsafe fn par_iter(&self) -> RawParIter<T> {
self.table.par_iter()
}
}
impl<T: Send, A: Allocator + Clone> ParallelIterator for RawIntoParIter<T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let iter = unsafe { self.table.iter().iter };
let _guard = guard(self.table.into_allocation(), |alloc| {
if let Some((ptr, layout)) = *alloc {
unsafe {
dealloc(ptr.as_ptr(), layout);
}
}
});
let producer = ParDrainProducer { iter };
plumbing::bridge_unindexed(producer, consumer)
}
}
/// Parallel iterator which consumes elements without freeing the table storage.
pub struct RawParDrain<'a, T, A: Allocator + Clone = Global> {
// We don't use a &'a mut RawTable<T> because we want RawParDrain to be
// covariant over T.
table: NonNull<RawTable<T, A>>,
marker: PhantomData<&'a RawTable<T, A>>,
}
unsafe impl<T, A: Allocator + Clone> Send for RawParDrain<'_, T, A> {}
impl<T, A: Allocator + Clone> RawParDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub(super) unsafe fn par_iter(&self) -> RawParIter<T> {
self.table.as_ref().par_iter()
}
}
impl<T: Send, A: Allocator + Clone> ParallelIterator for RawParDrain<'_, T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let _guard = guard(self.table, |table| unsafe {
table.as_mut().clear_no_drop()
});
let iter = unsafe { self.table.as_ref().iter().iter };
mem::forget(self);
let producer = ParDrainProducer { iter };
plumbing::bridge_unindexed(producer, consumer)
}
}
impl<T, A: Allocator + Clone> Drop for RawParDrain<'_, T, A> {
fn drop(&mut self) {
// If drive_unindexed is not called then simply clear the table.
unsafe { self.table.as_mut().clear() }
}
}
/// Producer which will consume all elements in the range, even if it is dropped
/// halfway through.
struct ParDrainProducer<T> {
iter: RawIterRange<T>,
}
impl<T: Send> UnindexedProducer for ParDrainProducer<T> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn split(self) -> (Self, Option<Self>) {
let (left, right) = self.iter.clone().split();
mem::forget(self);
let left = ParDrainProducer { iter: left };
let right = right.map(|right| ParDrainProducer { iter: right });
(left, right)
}
#[cfg_attr(feature = "inline-more", inline)]
fn fold_with<F>(mut self, mut folder: F) -> F
where
F: Folder<Self::Item>,
{
// Make sure to modify the iterator in-place so that any remaining
// elements are processed in our Drop impl.
while let Some(item) = self.iter.next() {
folder = folder.consume(unsafe { item.read() });
if folder.full() {
return folder;
}
}
// If we processed all elements then we don't need to run the drop.
mem::forget(self);
folder
}
}
impl<T> Drop for ParDrainProducer<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
// Drop all remaining elements
if mem::needs_drop::<T>() {
while let Some(item) = self.iter.next() {
unsafe {
item.drop();
}
}
}
}
}
impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Returns a parallel iterator over the elements in a `RawTable`.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn par_iter(&self) -> RawParIter<T> {
RawParIter {
iter: self.iter().iter,
}
}
/// Returns a parallel iterator over the elements in a `RawTable`.
#[cfg_attr(feature = "inline-more", inline)]
pub fn into_par_iter(self) -> RawIntoParIter<T, A> {
RawIntoParIter { table: self }
}
/// Returns a parallel iterator which consumes all elements of a `RawTable`
/// without freeing its memory allocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_drain(&mut self) -> RawParDrain<'_, T, A> {
RawParDrain {
table: NonNull::from(self),
marker: PhantomData,
}
}
}

View File

@@ -0,0 +1,659 @@
//! Rayon extensions for `HashSet`.
use super::map;
use crate::hash_set::HashSet;
use crate::raw::{Allocator, Global};
use core::hash::{BuildHasher, Hash};
use rayon::iter::plumbing::UnindexedConsumer;
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
/// Parallel iterator over elements of a consumed set.
///
/// This iterator is created by the [`into_par_iter`] method on [`HashSet`]
/// (provided by the [`IntoParallelIterator`] trait).
/// See its documentation for more.
///
/// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter
/// [`HashSet`]: /hashbrown/struct.HashSet.html
/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html
pub struct IntoParIter<T, A: Allocator + Clone = Global> {
inner: map::IntoParIter<T, (), A>,
}
impl<T: Send, A: Allocator + Clone + Send> ParallelIterator for IntoParIter<T, A> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.map(|(k, _)| k).drive_unindexed(consumer)
}
}
/// Parallel draining iterator over entries of a set.
///
/// This iterator is created by the [`par_drain`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_drain`]: /hashbrown/struct.HashSet.html#method.par_drain
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParDrain<'a, T, A: Allocator + Clone = Global> {
inner: map::ParDrain<'a, T, (), A>,
}
impl<T: Send, A: Allocator + Clone + Send + Sync> ParallelIterator for ParDrain<'_, T, A> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.map(|(k, _)| k).drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in a set.
///
/// This iterator is created by the [`par_iter`] method on [`HashSet`]
/// (provided by the [`IntoParallelRefIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter`]: /hashbrown/struct.HashSet.html#method.par_iter
/// [`HashSet`]: /hashbrown/struct.HashSet.html
/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html
pub struct ParIter<'a, T> {
inner: map::ParKeys<'a, T, ()>,
}
impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> {
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.inner.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the difference of
/// sets.
///
/// This iterator is created by the [`par_difference`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParDifference<'a, T, S, A: Allocator + Clone = Global> {
a: &'a HashSet<T, S, A>,
b: &'a HashSet<T, S, A>,
}
impl<'a, T, S, A> ParallelIterator for ParDifference<'a, T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
A: Allocator + Clone + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.into_par_iter()
.filter(|&x| !self.b.contains(x))
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the symmetric
/// difference of sets.
///
/// This iterator is created by the [`par_symmetric_difference`] method on
/// [`HashSet`].
/// See its documentation for more.
///
/// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParSymmetricDifference<'a, T, S, A: Allocator + Clone = Global> {
a: &'a HashSet<T, S, A>,
b: &'a HashSet<T, S, A>,
}
impl<'a, T, S, A> ParallelIterator for ParSymmetricDifference<'a, T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
A: Allocator + Clone + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.par_difference(self.b)
.chain(self.b.par_difference(self.a))
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the intersection of
/// sets.
///
/// This iterator is created by the [`par_intersection`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParIntersection<'a, T, S, A: Allocator + Clone = Global> {
a: &'a HashSet<T, S, A>,
b: &'a HashSet<T, S, A>,
}
impl<'a, T, S, A> ParallelIterator for ParIntersection<'a, T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
A: Allocator + Clone + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.into_par_iter()
.filter(|&x| self.b.contains(x))
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the union of sets.
///
/// This iterator is created by the [`par_union`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParUnion<'a, T, S, A: Allocator + Clone = Global> {
a: &'a HashSet<T, S, A>,
b: &'a HashSet<T, S, A>,
}
impl<'a, T, S, A> ParallelIterator for ParUnion<'a, T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
A: Allocator + Clone + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
// We'll iterate one set in full, and only the remaining difference from the other.
// Use the smaller set for the difference in order to reduce hash lookups.
let (smaller, larger) = if self.a.len() <= self.b.len() {
(self.a, self.b)
} else {
(self.b, self.a)
};
larger
.into_par_iter()
.chain(smaller.par_difference(larger))
.drive_unindexed(consumer)
}
}
impl<T, S, A> HashSet<T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
A: Allocator + Clone + Sync,
{
/// Visits (potentially in parallel) the values representing the union,
/// i.e. all the values in `self` or `other`, without duplicates.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_union<'a>(&'a self, other: &'a Self) -> ParUnion<'a, T, S, A> {
ParUnion { a: self, b: other }
}
/// Visits (potentially in parallel) the values representing the difference,
/// i.e. the values that are in `self` but not in `other`.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_difference<'a>(&'a self, other: &'a Self) -> ParDifference<'a, T, S, A> {
ParDifference { a: self, b: other }
}
/// Visits (potentially in parallel) the values representing the symmetric
/// difference, i.e. the values that are in `self` or in `other` but not in both.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_symmetric_difference<'a>(
&'a self,
other: &'a Self,
) -> ParSymmetricDifference<'a, T, S, A> {
ParSymmetricDifference { a: self, b: other }
}
/// Visits (potentially in parallel) the values representing the
/// intersection, i.e. the values that are both in `self` and `other`.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_intersection<'a>(&'a self, other: &'a Self) -> ParIntersection<'a, T, S, A> {
ParIntersection { a: self, b: other }
}
/// Returns `true` if `self` has no elements in common with `other`.
/// This is equivalent to checking for an empty intersection.
///
/// This method runs in a potentially parallel fashion.
pub fn par_is_disjoint(&self, other: &Self) -> bool {
self.into_par_iter().all(|x| !other.contains(x))
}
/// Returns `true` if the set is a subset of another,
/// i.e. `other` contains at least all the values in `self`.
///
/// This method runs in a potentially parallel fashion.
pub fn par_is_subset(&self, other: &Self) -> bool {
if self.len() <= other.len() {
self.into_par_iter().all(|x| other.contains(x))
} else {
false
}
}
/// Returns `true` if the set is a superset of another,
/// i.e. `self` contains at least all the values in `other`.
///
/// This method runs in a potentially parallel fashion.
pub fn par_is_superset(&self, other: &Self) -> bool {
other.par_is_subset(self)
}
/// Returns `true` if the set is equal to another,
/// i.e. both sets contain the same values.
///
/// This method runs in a potentially parallel fashion.
pub fn par_eq(&self, other: &Self) -> bool {
self.len() == other.len() && self.par_is_subset(other)
}
}
impl<T, S, A> HashSet<T, S, A>
where
T: Eq + Hash + Send,
A: Allocator + Clone + Send,
{
/// Consumes (potentially in parallel) all values in an arbitrary order,
/// while preserving the set's allocated memory for reuse.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_drain(&mut self) -> ParDrain<'_, T, A> {
ParDrain {
inner: self.map.par_drain(),
}
}
}
impl<T: Send, S, A: Allocator + Clone + Send> IntoParallelIterator for HashSet<T, S, A> {
type Item = T;
type Iter = IntoParIter<T, A>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
IntoParIter {
inner: self.map.into_par_iter(),
}
}
}
impl<'a, T: Sync, S, A: Allocator + Clone> IntoParallelIterator for &'a HashSet<T, S, A> {
type Item = &'a T;
type Iter = ParIter<'a, T>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
ParIter {
inner: self.map.par_keys(),
}
}
}
/// Collect values from a parallel iterator into a hashset.
impl<T, S> FromParallelIterator<T> for HashSet<T, S, Global>
where
T: Eq + Hash + Send,
S: BuildHasher + Default,
{
fn from_par_iter<P>(par_iter: P) -> Self
where
P: IntoParallelIterator<Item = T>,
{
let mut set = HashSet::default();
set.par_extend(par_iter);
set
}
}
/// Extend a hash set with items from a parallel iterator.
impl<T, S> ParallelExtend<T> for HashSet<T, S, Global>
where
T: Eq + Hash + Send,
S: BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
extend(self, par_iter);
}
}
/// Extend a hash set with copied items from a parallel iterator.
impl<'a, T, S> ParallelExtend<&'a T> for HashSet<T, S, Global>
where
T: 'a + Copy + Eq + Hash + Sync,
S: BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
extend(self, par_iter);
}
}
// This is equal to the normal `HashSet` -- no custom advantage.
fn extend<T, S, I, A>(set: &mut HashSet<T, S, A>, par_iter: I)
where
T: Eq + Hash,
S: BuildHasher,
A: Allocator + Clone,
I: IntoParallelIterator,
HashSet<T, S, A>: Extend<I::Item>,
{
let (list, len) = super::helpers::collect(par_iter);
// Values may be already present or show multiple times in the iterator.
// Reserve the entire length if the set is empty.
// Otherwise reserve half the length (rounded up), so the set
// will only resize twice in the worst case.
let reserve = if set.is_empty() { len } else { (len + 1) / 2 };
set.reserve(reserve);
for vec in list {
set.extend(vec);
}
}
#[cfg(test)]
mod test_par_set {
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use rayon::prelude::*;
use crate::hash_set::HashSet;
#[test]
fn test_disjoint() {
let mut xs = HashSet::new();
let mut ys = HashSet::new();
assert!(xs.par_is_disjoint(&ys));
assert!(ys.par_is_disjoint(&xs));
assert!(xs.insert(5));
assert!(ys.insert(11));
assert!(xs.par_is_disjoint(&ys));
assert!(ys.par_is_disjoint(&xs));
assert!(xs.insert(7));
assert!(xs.insert(19));
assert!(xs.insert(4));
assert!(ys.insert(2));
assert!(ys.insert(-11));
assert!(xs.par_is_disjoint(&ys));
assert!(ys.par_is_disjoint(&xs));
assert!(ys.insert(7));
assert!(!xs.par_is_disjoint(&ys));
assert!(!ys.par_is_disjoint(&xs));
}
#[test]
fn test_subset_and_superset() {
let mut a = HashSet::new();
assert!(a.insert(0));
assert!(a.insert(5));
assert!(a.insert(11));
assert!(a.insert(7));
let mut b = HashSet::new();
assert!(b.insert(0));
assert!(b.insert(7));
assert!(b.insert(19));
assert!(b.insert(250));
assert!(b.insert(11));
assert!(b.insert(200));
assert!(!a.par_is_subset(&b));
assert!(!a.par_is_superset(&b));
assert!(!b.par_is_subset(&a));
assert!(!b.par_is_superset(&a));
assert!(b.insert(5));
assert!(a.par_is_subset(&b));
assert!(!a.par_is_superset(&b));
assert!(!b.par_is_subset(&a));
assert!(b.par_is_superset(&a));
}
#[test]
fn test_iterate() {
let mut a = HashSet::new();
for i in 0..32 {
assert!(a.insert(i));
}
let observed = AtomicUsize::new(0);
a.par_iter().for_each(|k| {
observed.fetch_or(1 << *k, Ordering::Relaxed);
});
assert_eq!(observed.into_inner(), 0xFFFF_FFFF);
}
#[test]
fn test_intersection() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(11));
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(77));
assert!(a.insert(103));
assert!(a.insert(5));
assert!(a.insert(-5));
assert!(b.insert(2));
assert!(b.insert(11));
assert!(b.insert(77));
assert!(b.insert(-9));
assert!(b.insert(-42));
assert!(b.insert(5));
assert!(b.insert(3));
let expected = [3, 5, 11, 77];
let i = a
.par_intersection(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_difference() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(b.insert(3));
assert!(b.insert(9));
let expected = [1, 5, 11];
let i = a
.par_difference(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_symmetric_difference() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(b.insert(-2));
assert!(b.insert(3));
assert!(b.insert(9));
assert!(b.insert(14));
assert!(b.insert(22));
let expected = [-2, 1, 5, 11, 14, 22];
let i = a
.par_symmetric_difference(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_union() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(a.insert(16));
assert!(a.insert(19));
assert!(a.insert(24));
assert!(b.insert(-2));
assert!(b.insert(1));
assert!(b.insert(5));
assert!(b.insert(9));
assert!(b.insert(13));
assert!(b.insert(19));
let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
let i = a
.par_union(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_from_iter() {
let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9];
let set: HashSet<_> = xs.par_iter().cloned().collect();
for x in &xs {
assert!(set.contains(x));
}
}
#[test]
fn test_move_iter() {
let hs = {
let mut hs = HashSet::new();
hs.insert('a');
hs.insert('b');
hs
};
let v = hs.into_par_iter().collect::<Vec<char>>();
assert!(v == ['a', 'b'] || v == ['b', 'a']);
}
#[test]
fn test_eq() {
// These constants once happened to expose a bug in insert().
// I'm keeping them around to prevent a regression.
let mut s1 = HashSet::new();
s1.insert(1);
s1.insert(2);
s1.insert(3);
let mut s2 = HashSet::new();
s2.insert(1);
s2.insert(2);
assert!(!s1.par_eq(&s2));
s2.insert(3);
assert!(s1.par_eq(&s2));
}
#[test]
fn test_extend_ref() {
let mut a = HashSet::new();
a.insert(1);
a.par_extend(&[2, 3, 4][..]);
assert_eq!(a.len(), 4);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
let mut b = HashSet::new();
b.insert(5);
b.insert(6);
a.par_extend(&b);
assert_eq!(a.len(), 6);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
assert!(a.contains(&5));
assert!(a.contains(&6));
}
}

View File

@@ -0,0 +1,200 @@
mod size_hint {
use core::cmp;
/// This presumably exists to prevent denial of service attacks.
///
/// Original discussion: https://github.com/serde-rs/serde/issues/1114.
#[cfg_attr(feature = "inline-more", inline)]
pub(super) fn cautious(hint: Option<usize>) -> usize {
cmp::min(hint.unwrap_or(0), 4096)
}
}
mod map {
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::marker::PhantomData;
use serde::de::{Deserialize, Deserializer, MapAccess, Visitor};
use serde::ser::{Serialize, Serializer};
use crate::hash_map::HashMap;
use super::size_hint;
impl<K, V, H> Serialize for HashMap<K, V, H>
where
K: Serialize + Eq + Hash,
V: Serialize,
H: BuildHasher,
{
#[cfg_attr(feature = "inline-more", inline)]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_map(self)
}
}
impl<'de, K, V, S> Deserialize<'de> for HashMap<K, V, S>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
S: BuildHasher + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct MapVisitor<K, V, S> {
marker: PhantomData<HashMap<K, V, S>>,
}
impl<'de, K, V, S> Visitor<'de> for MapVisitor<K, V, S>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
S: BuildHasher + Default,
{
type Value = HashMap<K, V, S>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
#[cfg_attr(feature = "inline-more", inline)]
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut values = HashMap::with_capacity_and_hasher(
size_hint::cautious(map.size_hint()),
S::default(),
);
while let Some((key, value)) = map.next_entry()? {
values.insert(key, value);
}
Ok(values)
}
}
let visitor = MapVisitor {
marker: PhantomData,
};
deserializer.deserialize_map(visitor)
}
}
}
mod set {
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::marker::PhantomData;
use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor};
use serde::ser::{Serialize, Serializer};
use crate::hash_set::HashSet;
use super::size_hint;
impl<T, H> Serialize for HashSet<T, H>
where
T: Serialize + Eq + Hash,
H: BuildHasher,
{
#[cfg_attr(feature = "inline-more", inline)]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_seq(self)
}
}
impl<'de, T, S> Deserialize<'de> for HashSet<T, S>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct SeqVisitor<T, S> {
marker: PhantomData<HashSet<T, S>>,
}
impl<'de, T, S> Visitor<'de> for SeqVisitor<T, S>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
{
type Value = HashSet<T, S>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
#[cfg_attr(feature = "inline-more", inline)]
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = HashSet::with_capacity_and_hasher(
size_hint::cautious(seq.size_hint()),
S::default(),
);
while let Some(value) = seq.next_element()? {
values.insert(value);
}
Ok(values)
}
}
let visitor = SeqVisitor {
marker: PhantomData,
};
deserializer.deserialize_seq(visitor)
}
fn deserialize_in_place<D>(deserializer: D, place: &mut Self) -> Result<(), D::Error>
where
D: Deserializer<'de>,
{
struct SeqInPlaceVisitor<'a, T, S>(&'a mut HashSet<T, S>);
impl<'a, 'de, T, S> Visitor<'de> for SeqInPlaceVisitor<'a, T, S>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
{
type Value = ();
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
#[cfg_attr(feature = "inline-more", inline)]
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
self.0.clear();
self.0.reserve(size_hint::cautious(seq.size_hint()));
while let Some(value) = seq.next_element()? {
self.0.insert(value);
}
Ok(())
}
}
deserializer.deserialize_seq(SeqInPlaceVisitor(place))
}
}
}

161
zeroidc/vendor/hashbrown/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,161 @@
//! This crate is a Rust port of Google's high-performance [SwissTable] hash
//! map, adapted to make it a drop-in replacement for Rust's standard `HashMap`
//! and `HashSet` types.
//!
//! The original C++ version of [SwissTable] can be found [here], and this
//! [CppCon talk] gives an overview of how the algorithm works.
//!
//! [SwissTable]: https://abseil.io/blog/20180927-swisstables
//! [here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h
//! [CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4
#![no_std]
#![cfg_attr(
feature = "nightly",
feature(
test,
core_intrinsics,
dropck_eyepatch,
min_specialization,
extend_one,
allocator_api,
slice_ptr_get,
nonnull_slice_from_raw_parts,
maybe_uninit_array_assume_init
)
)]
#![allow(
clippy::doc_markdown,
clippy::module_name_repetitions,
clippy::must_use_candidate,
clippy::option_if_let_else,
clippy::redundant_else,
clippy::manual_map
)]
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg_attr(test, macro_use)]
extern crate alloc;
#[cfg(feature = "nightly")]
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
#[macro_use]
mod macros;
#[cfg(feature = "raw")]
/// Experimental and unsafe `RawTable` API. This module is only available if the
/// `raw` feature is enabled.
pub mod raw {
// The RawTable API is still experimental and is not properly documented yet.
#[allow(missing_docs)]
#[path = "mod.rs"]
mod inner;
pub use inner::*;
#[cfg(feature = "rayon")]
/// [rayon]-based parallel iterator types for hash maps.
/// You will rarely need to interact with it directly unless you have need
/// to name one of the iterator types.
///
/// [rayon]: https://docs.rs/rayon/1.0/rayon
pub mod rayon {
pub use crate::external_trait_impls::rayon::raw::*;
}
}
#[cfg(not(feature = "raw"))]
mod raw;
mod external_trait_impls;
mod map;
#[cfg(feature = "rustc-internal-api")]
mod rustc_entry;
mod scopeguard;
mod set;
pub mod hash_map {
//! A hash map implemented with quadratic probing and SIMD lookup.
pub use crate::map::*;
#[cfg(feature = "rustc-internal-api")]
pub use crate::rustc_entry::*;
#[cfg(feature = "rayon")]
/// [rayon]-based parallel iterator types for hash maps.
/// You will rarely need to interact with it directly unless you have need
/// to name one of the iterator types.
///
/// [rayon]: https://docs.rs/rayon/1.0/rayon
pub mod rayon {
pub use crate::external_trait_impls::rayon::map::*;
}
}
pub mod hash_set {
//! A hash set implemented as a `HashMap` where the value is `()`.
pub use crate::set::*;
#[cfg(feature = "rayon")]
/// [rayon]-based parallel iterator types for hash sets.
/// You will rarely need to interact with it directly unless you have need
/// to name one of the iterator types.
///
/// [rayon]: https://docs.rs/rayon/1.0/rayon
pub mod rayon {
pub use crate::external_trait_impls::rayon::set::*;
}
}
pub use crate::map::HashMap;
pub use crate::set::HashSet;
/// The error type for `try_reserve` methods.
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum TryReserveError {
/// Error due to the computed capacity exceeding the collection's maximum
/// (usually `isize::MAX` bytes).
CapacityOverflow,
/// The memory allocator returned an error
AllocError {
/// The layout of the allocation request that failed.
layout: alloc::alloc::Layout,
},
}
/// The error type for [`RawTable::get_each_mut`](crate::raw::RawTable::get_each_mut),
/// [`HashMap::get_each_mut`], and [`HashMap::get_each_key_value_mut`].
#[cfg(feature = "nightly")]
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum UnavailableMutError {
/// The requested entry is not present in the table.
Absent,
/// The requested entry is present, but a mutable reference to it was already created and
/// returned from this call to `get_each_mut` or `get_each_key_value_mut`.
///
/// Includes the index of the existing mutable reference in the returned array.
Duplicate(usize),
}
/// Wrapper around `Bump` which allows it to be used as an allocator for
/// `HashMap`, `HashSet` and `RawTable`.
///
/// `Bump` can be used directly without this wrapper on nightly if you enable
/// the `allocator-api` feature of the `bumpalo` crate.
#[cfg(feature = "bumpalo")]
#[derive(Clone, Copy, Debug)]
pub struct BumpWrapper<'a>(pub &'a bumpalo::Bump);
#[cfg(feature = "bumpalo")]
#[test]
fn test_bumpalo() {
use bumpalo::Bump;
let bump = Bump::new();
let mut map = HashMap::new_in(BumpWrapper(&bump));
map.insert(0, 1);
}

69
zeroidc/vendor/hashbrown/src/macros.rs vendored Normal file
View File

@@ -0,0 +1,69 @@
// See the cfg-if crate.
macro_rules! cfg_if {
// match if/else chains with a final `else`
($(
if #[cfg($($meta:meta),*)] { $($it:item)* }
) else * else {
$($it2:item)*
}) => {
cfg_if! {
@__items
() ;
$( ( ($($meta),*) ($($it)*) ), )*
( () ($($it2)*) ),
}
};
// match if/else chains lacking a final `else`
(
if #[cfg($($i_met:meta),*)] { $($i_it:item)* }
$(
else if #[cfg($($e_met:meta),*)] { $($e_it:item)* }
)*
) => {
cfg_if! {
@__items
() ;
( ($($i_met),*) ($($i_it)*) ),
$( ( ($($e_met),*) ($($e_it)*) ), )*
( () () ),
}
};
// Internal and recursive macro to emit all the items
//
// Collects all the negated cfgs in a list at the beginning and after the
// semicolon is all the remaining items
(@__items ($($not:meta,)*) ; ) => {};
(@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => {
// Emit all items within one block, applying an approprate #[cfg]. The
// #[cfg] will require all `$m` matchers specified and must also negate
// all previous matchers.
cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* }
// Recurse to emit all other items in `$rest`, and when we do so add all
// our `$m` matchers to the list of `$not` matchers as future emissions
// will have to negate everything we just matched as well.
cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* }
};
// Internal macro to Apply a cfg attribute to a list of items
(@__apply $m:meta, $($it:item)*) => {
$(#[$m] $it)*
};
}
// Helper macro for specialization. This also helps avoid parse errors if the
// default fn syntax for specialization changes in the future.
#[cfg(feature = "nightly")]
macro_rules! default_fn {
($($tt:tt)*) => {
default $($tt)*
}
}
#[cfg(not(feature = "nightly"))]
macro_rules! default_fn {
($($tt:tt)*) => {
$($tt)*
}
}

4922
zeroidc/vendor/hashbrown/src/map.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,72 @@
pub(crate) use self::inner::{do_alloc, Allocator, Global};
#[cfg(feature = "nightly")]
mod inner {
use crate::alloc::alloc::Layout;
pub use crate::alloc::alloc::{Allocator, Global};
use core::ptr::NonNull;
#[allow(clippy::map_err_ignore)]
pub fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
alloc
.allocate(layout)
.map(|ptr| ptr.as_non_null_ptr())
.map_err(|_| ())
}
#[cfg(feature = "bumpalo")]
unsafe impl Allocator for crate::BumpWrapper<'_> {
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
match self.0.try_alloc_layout(layout) {
Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())),
Err(_) => Err(core::alloc::AllocError),
}
}
#[inline]
unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {}
}
}
#[cfg(not(feature = "nightly"))]
mod inner {
use crate::alloc::alloc::{alloc, dealloc, Layout};
use core::ptr::NonNull;
pub unsafe trait Allocator {
fn allocate(&self, layout: Layout) -> Result<NonNull<u8>, ()>;
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout);
}
#[derive(Copy, Clone)]
pub struct Global;
unsafe impl Allocator for Global {
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<u8>, ()> {
unsafe { NonNull::new(alloc(layout)).ok_or(()) }
}
#[inline]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
dealloc(ptr.as_ptr(), layout)
}
}
impl Default for Global {
#[inline]
fn default() -> Self {
Global
}
}
pub fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
alloc.allocate(layout)
}
#[cfg(feature = "bumpalo")]
unsafe impl Allocator for crate::BumpWrapper<'_> {
#[allow(clippy::map_err_ignore)]
fn allocate(&self, layout: Layout) -> Result<NonNull<u8>, ()> {
self.0.try_alloc_layout(layout).map_err(|_| ())
}
unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {}
}
}

View File

@@ -0,0 +1,122 @@
use super::imp::{BitMaskWord, BITMASK_MASK, BITMASK_STRIDE};
#[cfg(feature = "nightly")]
use core::intrinsics;
/// A bit mask which contains the result of a `Match` operation on a `Group` and
/// allows iterating through them.
///
/// The bit mask is arranged so that low-order bits represent lower memory
/// addresses for group match results.
///
/// For implementation reasons, the bits in the set may be sparsely packed, so
/// that there is only one bit-per-byte used (the high bit, 7). If this is the
/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be
/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is
/// similarly a mask of all the actually-used bits.
#[derive(Copy, Clone)]
pub struct BitMask(pub BitMaskWord);
#[allow(clippy::use_self)]
impl BitMask {
/// Returns a new `BitMask` with all bits inverted.
#[inline]
#[must_use]
pub fn invert(self) -> Self {
BitMask(self.0 ^ BITMASK_MASK)
}
/// Flip the bit in the mask for the entry at the given index.
///
/// Returns the bit's previous state.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
#[cfg(feature = "raw")]
pub unsafe fn flip(&mut self, index: usize) -> bool {
// NOTE: The + BITMASK_STRIDE - 1 is to set the high bit.
let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1);
self.0 ^= mask;
// The bit was set if the bit is now 0.
self.0 & mask == 0
}
/// Returns a new `BitMask` with the lowest bit removed.
#[inline]
#[must_use]
pub fn remove_lowest_bit(self) -> Self {
BitMask(self.0 & (self.0 - 1))
}
/// Returns whether the `BitMask` has at least one set bit.
#[inline]
pub fn any_bit_set(self) -> bool {
self.0 != 0
}
/// Returns the first set bit in the `BitMask`, if there is one.
#[inline]
pub fn lowest_set_bit(self) -> Option<usize> {
if self.0 == 0 {
None
} else {
Some(unsafe { self.lowest_set_bit_nonzero() })
}
}
/// Returns the first set bit in the `BitMask`, if there is one. The
/// bitmask must not be empty.
#[inline]
#[cfg(feature = "nightly")]
pub unsafe fn lowest_set_bit_nonzero(self) -> usize {
intrinsics::cttz_nonzero(self.0) as usize / BITMASK_STRIDE
}
#[inline]
#[cfg(not(feature = "nightly"))]
pub unsafe fn lowest_set_bit_nonzero(self) -> usize {
self.trailing_zeros()
}
/// Returns the number of trailing zeroes in the `BitMask`.
#[inline]
pub fn trailing_zeros(self) -> usize {
// ARM doesn't have a trailing_zeroes instruction, and instead uses
// reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM
// versions (pre-ARMv7) don't have RBIT and need to emulate it
// instead. Since we only have 1 bit set in each byte on ARM, we can
// use swap_bytes (REV) + leading_zeroes instead.
if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 {
self.0.swap_bytes().leading_zeros() as usize / BITMASK_STRIDE
} else {
self.0.trailing_zeros() as usize / BITMASK_STRIDE
}
}
/// Returns the number of leading zeroes in the `BitMask`.
#[inline]
pub fn leading_zeros(self) -> usize {
self.0.leading_zeros() as usize / BITMASK_STRIDE
}
}
impl IntoIterator for BitMask {
type Item = usize;
type IntoIter = BitMaskIter;
#[inline]
fn into_iter(self) -> BitMaskIter {
BitMaskIter(self)
}
}
/// Iterator over the contents of a `BitMask`, returning the indicies of set
/// bits.
pub struct BitMaskIter(BitMask);
impl Iterator for BitMaskIter {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<usize> {
let bit = self.0.lowest_set_bit()?;
self.0 = self.0.remove_lowest_bit();
Some(bit)
}
}

View File

@@ -0,0 +1,151 @@
use super::bitmask::BitMask;
use super::EMPTY;
use core::{mem, ptr};
// Use the native word size as the group size. Using a 64-bit group size on
// a 32-bit architecture will just end up being more expensive because
// shifts and multiplies will need to be emulated.
#[cfg(any(
target_pointer_width = "64",
target_arch = "aarch64",
target_arch = "x86_64",
))]
type GroupWord = u64;
#[cfg(all(
target_pointer_width = "32",
not(target_arch = "aarch64"),
not(target_arch = "x86_64"),
))]
type GroupWord = u32;
pub type BitMaskWord = GroupWord;
pub const BITMASK_STRIDE: usize = 8;
// We only care about the highest bit of each byte for the mask.
#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)]
pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord;
/// Helper function to replicate a byte across a `GroupWord`.
#[inline]
fn repeat(byte: u8) -> GroupWord {
GroupWord::from_ne_bytes([byte; Group::WIDTH])
}
/// Abstraction over a group of control bytes which can be scanned in
/// parallel.
///
/// This implementation uses a word-sized integer.
#[derive(Copy, Clone)]
pub struct Group(GroupWord);
// We perform all operations in the native endianess, and convert to
// little-endian just before creating a BitMask. The can potentially
// enable the compiler to eliminate unnecessary byte swaps if we are
// only checking whether a BitMask is empty.
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
pub const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty bytes, suitable for use as the initial
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
pub const fn static_empty() -> &'static [u8; Group::WIDTH] {
#[repr(C)]
struct AlignedBytes {
_align: [Group; 0],
bytes: [u8; Group::WIDTH],
}
const ALIGNED_BYTES: AlignedBytes = AlignedBytes {
_align: [],
bytes: [EMPTY; Group::WIDTH],
};
&ALIGNED_BYTES.bytes
}
/// Loads a group of bytes starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
pub unsafe fn load(ptr: *const u8) -> Self {
Group(ptr::read_unaligned(ptr.cast()))
}
/// Loads a group of bytes starting at the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn load_aligned(ptr: *const u8) -> Self {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
Group(ptr::read(ptr.cast()))
}
/// Stores the group of bytes to the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn store_aligned(self, ptr: *mut u8) {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
ptr::write(ptr.cast(), self.0);
}
/// Returns a `BitMask` indicating all bytes in the group which *may*
/// have the given value.
///
/// This function may return a false positive in certain cases where
/// the byte in the group differs from the searched value only in its
/// lowest bit. This is fine because:
/// - This never happens for `EMPTY` and `DELETED`, only full entries.
/// - The check for key equality will catch these.
/// - This only happens if there is at least 1 true match.
/// - The chance of this happening is very low (< 1% chance per byte).
#[inline]
pub fn match_byte(self, byte: u8) -> BitMask {
// This algorithm is derived from
// http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
let cmp = self.0 ^ repeat(byte);
BitMask((cmp.wrapping_sub(repeat(0x01)) & !cmp & repeat(0x80)).to_le())
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY`.
#[inline]
pub fn match_empty(self) -> BitMask {
// If the high bit is set, then the byte must be either:
// 1111_1111 (EMPTY) or 1000_0000 (DELETED).
// So we can just check if the top two bits are 1 by ANDing them.
BitMask((self.0 & (self.0 << 1) & repeat(0x80)).to_le())
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
pub fn match_empty_or_deleted(self) -> BitMask {
// A byte is EMPTY or DELETED iff the high bit is set
BitMask((self.0 & repeat(0x80)).to_le())
}
/// Returns a `BitMask` indicating all bytes in the group which are full.
#[inline]
pub fn match_full(self) -> BitMask {
self.match_empty_or_deleted().invert()
}
/// Performs the following transformation on all bytes in the group:
/// - `EMPTY => EMPTY`
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
// Here's this logic expanded to concrete values:
// let full = 1000_0000 (true) or 0000_0000 (false)
// !1000_0000 + 1 = 0111_1111 + 1 = 1000_0000 (no carry)
// !0000_0000 + 0 = 1111_1111 + 0 = 1111_1111 (no carry)
let full = !self.0 & repeat(0x80);
Group(!full + (full >> 7))
}
}

2262
zeroidc/vendor/hashbrown/src/raw/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

145
zeroidc/vendor/hashbrown/src/raw/sse2.rs vendored Normal file
View File

@@ -0,0 +1,145 @@
use super::bitmask::BitMask;
use super::EMPTY;
use core::mem;
#[cfg(target_arch = "x86")]
use core::arch::x86;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64 as x86;
pub type BitMaskWord = u16;
pub const BITMASK_STRIDE: usize = 1;
pub const BITMASK_MASK: BitMaskWord = 0xffff;
/// Abstraction over a group of control bytes which can be scanned in
/// parallel.
///
/// This implementation uses a 128-bit SSE value.
#[derive(Copy, Clone)]
pub struct Group(x86::__m128i);
// FIXME: https://github.com/rust-lang/rust-clippy/issues/3859
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
pub const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty bytes, suitable for use as the initial
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
#[allow(clippy::items_after_statements)]
pub const fn static_empty() -> &'static [u8; Group::WIDTH] {
#[repr(C)]
struct AlignedBytes {
_align: [Group; 0],
bytes: [u8; Group::WIDTH],
}
const ALIGNED_BYTES: AlignedBytes = AlignedBytes {
_align: [],
bytes: [EMPTY; Group::WIDTH],
};
&ALIGNED_BYTES.bytes
}
/// Loads a group of bytes starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
pub unsafe fn load(ptr: *const u8) -> Self {
Group(x86::_mm_loadu_si128(ptr.cast()))
}
/// Loads a group of bytes starting at the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn load_aligned(ptr: *const u8) -> Self {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
Group(x86::_mm_load_si128(ptr.cast()))
}
/// Stores the group of bytes to the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn store_aligned(self, ptr: *mut u8) {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
x86::_mm_store_si128(ptr.cast(), self.0);
}
/// Returns a `BitMask` indicating all bytes in the group which have
/// the given value.
#[inline]
pub fn match_byte(self, byte: u8) -> BitMask {
#[allow(
clippy::cast_possible_wrap, // byte: u8 as i8
// byte: i32 as u16
// note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the
// upper 16-bits of the i32 are zeroed:
clippy::cast_sign_loss,
clippy::cast_possible_truncation
)]
unsafe {
let cmp = x86::_mm_cmpeq_epi8(self.0, x86::_mm_set1_epi8(byte as i8));
BitMask(x86::_mm_movemask_epi8(cmp) as u16)
}
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY`.
#[inline]
pub fn match_empty(self) -> BitMask {
self.match_byte(EMPTY)
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
pub fn match_empty_or_deleted(self) -> BitMask {
#[allow(
// byte: i32 as u16
// note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the
// upper 16-bits of the i32 are zeroed:
clippy::cast_sign_loss,
clippy::cast_possible_truncation
)]
unsafe {
// A byte is EMPTY or DELETED iff the high bit is set
BitMask(x86::_mm_movemask_epi8(self.0) as u16)
}
}
/// Returns a `BitMask` indicating all bytes in the group which are full.
#[inline]
pub fn match_full(&self) -> BitMask {
self.match_empty_or_deleted().invert()
}
/// Performs the following transformation on all bytes in the group:
/// - `EMPTY => EMPTY`
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
// Here's this logic expanded to concrete values:
// let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false)
// 1111_1111 | 1000_0000 = 1111_1111
// 0000_0000 | 1000_0000 = 1000_0000
#[allow(
clippy::cast_possible_wrap, // byte: 0x80_u8 as i8
)]
unsafe {
let zero = x86::_mm_setzero_si128();
let special = x86::_mm_cmpgt_epi8(zero, self.0);
Group(x86::_mm_or_si128(
special,
x86::_mm_set1_epi8(0x80_u8 as i8),
))
}
}
}

View File

@@ -0,0 +1,630 @@
use self::RustcEntry::*;
use crate::map::{make_insert_hash, Drain, HashMap, IntoIter, Iter, IterMut};
use crate::raw::{Allocator, Bucket, Global, RawTable};
use core::fmt::{self, Debug};
use core::hash::{BuildHasher, Hash};
use core::mem;
impl<K, V, S, A> HashMap<K, V, S, A>
where
K: Eq + Hash,
S: BuildHasher,
A: Allocator + Clone,
{
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut letters = HashMap::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.rustc_entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_entry(&mut self, key: K) -> RustcEntry<'_, K, V, A> {
let hash = make_insert_hash(&self.hash_builder, &key);
if let Some(elem) = self.table.find(hash, |q| q.0.eq(&key)) {
RustcEntry::Occupied(RustcOccupiedEntry {
key: Some(key),
elem,
table: &mut self.table,
})
} else {
// Ideally we would put this in VacantEntry::insert, but Entry is not
// generic over the BuildHasher and adding a generic parameter would be
// a breaking change.
self.reserve(1);
RustcEntry::Vacant(RustcVacantEntry {
hash,
key,
table: &mut self.table,
})
}
}
}
/// A view into a single entry in a map, which may either be vacant or occupied.
///
/// This `enum` is constructed from the [`entry`] method on [`HashMap`].
///
/// [`HashMap`]: struct.HashMap.html
/// [`entry`]: struct.HashMap.html#method.rustc_entry
pub enum RustcEntry<'a, K, V, A = Global>
where
A: Allocator + Clone,
{
/// An occupied entry.
Occupied(RustcOccupiedEntry<'a, K, V, A>),
/// A vacant entry.
Vacant(RustcVacantEntry<'a, K, V, A>),
}
impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for RustcEntry<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(),
}
}
}
/// A view into an occupied entry in a `HashMap`.
/// It is part of the [`RustcEntry`] enum.
///
/// [`RustcEntry`]: enum.RustcEntry.html
pub struct RustcOccupiedEntry<'a, K, V, A = Global>
where
A: Allocator + Clone,
{
key: Option<K>,
elem: Bucket<(K, V)>,
table: &'a mut RawTable<(K, V), A>,
}
unsafe impl<K, V, A> Send for RustcOccupiedEntry<'_, K, V, A>
where
K: Send,
V: Send,
A: Allocator + Clone + Send,
{
}
unsafe impl<K, V, A> Sync for RustcOccupiedEntry<'_, K, V, A>
where
K: Sync,
V: Sync,
A: Allocator + Clone + Sync,
{
}
impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for RustcOccupiedEntry<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccupiedEntry")
.field("key", self.key())
.field("value", self.get())
.finish()
}
}
/// A view into a vacant entry in a `HashMap`.
/// It is part of the [`RustcEntry`] enum.
///
/// [`RustcEntry`]: enum.RustcEntry.html
pub struct RustcVacantEntry<'a, K, V, A = Global>
where
A: Allocator + Clone,
{
hash: u64,
key: K,
table: &'a mut RawTable<(K, V), A>,
}
impl<K: Debug, V, A: Allocator + Clone> Debug for RustcVacantEntry<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("VacantEntry").field(self.key()).finish()
}
}
impl<'a, K, V, A: Allocator + Clone> RustcEntry<'a, K, V, A> {
/// Sets the value of the entry, and returns a RustcOccupiedEntry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// let entry = map.entry("horseyland").insert(37);
///
/// assert_eq!(entry.key(), &"horseyland");
/// ```
pub fn insert(self, value: V) -> RustcOccupiedEntry<'a, K, V, A> {
match self {
Vacant(entry) => entry.insert_entry(value),
Occupied(mut entry) => {
entry.insert(value);
entry
}
}
}
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.rustc_entry("poneyland").or_insert(3);
/// assert_eq!(map["poneyland"], 3);
///
/// *map.rustc_entry("poneyland").or_insert(10) *= 2;
/// assert_eq!(map["poneyland"], 6);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn or_insert(self, default: V) -> &'a mut V
where
K: Hash,
{
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default),
}
}
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, String> = HashMap::new();
/// let s = "hoho".to_string();
///
/// map.rustc_entry("poneyland").or_insert_with(|| s);
///
/// assert_eq!(map["poneyland"], "hoho".to_string());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V
where
K: Hash,
{
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default()),
}
}
/// Returns a reference to this entry's key.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland");
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn key(&self) -> &K {
match *self {
Occupied(ref entry) => entry.key(),
Vacant(ref entry) => entry.key(),
}
}
/// Provides in-place mutable access to an occupied entry before any
/// potential inserts into the map.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.rustc_entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 42);
///
/// map.rustc_entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 43);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn and_modify<F>(self, f: F) -> Self
where
F: FnOnce(&mut V),
{
match self {
Occupied(mut entry) => {
f(entry.get_mut());
Occupied(entry)
}
Vacant(entry) => Vacant(entry),
}
}
}
impl<'a, K, V: Default, A: Allocator + Clone> RustcEntry<'a, K, V, A> {
/// Ensures a value is in the entry by inserting the default value if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// # fn main() {
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, Option<u32>> = HashMap::new();
/// map.rustc_entry("poneyland").or_default();
///
/// assert_eq!(map["poneyland"], None);
/// # }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn or_default(self) -> &'a mut V
where
K: Hash,
{
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(Default::default()),
}
}
}
impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> {
/// Gets a reference to the key in the entry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
/// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland");
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn key(&self) -> &K {
unsafe { &self.elem.as_ref().0 }
}
/// Take the ownership of the key and value from the map.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") {
/// // We delete the entry from the map.
/// o.remove_entry();
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
unsafe { self.table.remove(self.elem) }
}
/// Gets a reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") {
/// assert_eq!(o.get(), &12);
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn get(&self) -> &V {
unsafe { &self.elem.as_ref().1 }
}
/// Gets a mutable reference to the value in the entry.
///
/// If you need a reference to the `RustcOccupiedEntry` which may outlive the
/// destruction of the `RustcEntry` value, see [`into_mut`].
///
/// [`into_mut`]: #method.into_mut
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") {
/// *o.get_mut() += 10;
/// assert_eq!(*o.get(), 22);
///
/// // We can use the same RustcEntry multiple times.
/// *o.get_mut() += 2;
/// }
///
/// assert_eq!(map["poneyland"], 24);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn get_mut(&mut self) -> &mut V {
unsafe { &mut self.elem.as_mut().1 }
}
/// Converts the RustcOccupiedEntry into a mutable reference to the value in the entry
/// with a lifetime bound to the map itself.
///
/// If you need multiple references to the `RustcOccupiedEntry`, see [`get_mut`].
///
/// [`get_mut`]: #method.get_mut
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") {
/// *o.into_mut() += 10;
/// }
///
/// assert_eq!(map["poneyland"], 22);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn into_mut(self) -> &'a mut V {
unsafe { &mut self.elem.as_mut().1 }
}
/// Sets the value of the entry, and returns the entry's old value.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") {
/// assert_eq!(o.insert(15), 12);
/// }
///
/// assert_eq!(map["poneyland"], 15);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, mut value: V) -> V {
let old_value = self.get_mut();
mem::swap(&mut value, old_value);
value
}
/// Takes the value out of the entry, and returns it.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") {
/// assert_eq!(o.remove(), 12);
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove(self) -> V {
self.remove_entry().1
}
/// Replaces the entry, returning the old key and value. The new key in the hash map will be
/// the key used to create this entry.
///
/// # Examples
///
/// ```
/// use hashbrown::hash_map::{RustcEntry, HashMap};
/// use std::rc::Rc;
///
/// let mut map: HashMap<Rc<String>, u32> = HashMap::new();
/// map.insert(Rc::new("Stringthing".to_string()), 15);
///
/// let my_key = Rc::new("Stringthing".to_string());
///
/// if let RustcEntry::Occupied(entry) = map.rustc_entry(my_key) {
/// // Also replace the key with a handle to our other key.
/// let (old_key, old_value): (Rc<String>, u32) = entry.replace_entry(16);
/// }
///
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn replace_entry(self, value: V) -> (K, V) {
let entry = unsafe { self.elem.as_mut() };
let old_key = mem::replace(&mut entry.0, self.key.unwrap());
let old_value = mem::replace(&mut entry.1, value);
(old_key, old_value)
}
/// Replaces the key in the hash map with the key used to create this entry.
///
/// # Examples
///
/// ```
/// use hashbrown::hash_map::{RustcEntry, HashMap};
/// use std::rc::Rc;
///
/// let mut map: HashMap<Rc<String>, u32> = HashMap::new();
/// let mut known_strings: Vec<Rc<String>> = Vec::new();
///
/// // Initialise known strings, run program, etc.
///
/// reclaim_memory(&mut map, &known_strings);
///
/// fn reclaim_memory(map: &mut HashMap<Rc<String>, u32>, known_strings: &[Rc<String>] ) {
/// for s in known_strings {
/// if let RustcEntry::Occupied(entry) = map.rustc_entry(s.clone()) {
/// // Replaces the entry's key with our version of it in `known_strings`.
/// entry.replace_key();
/// }
/// }
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn replace_key(self) -> K {
let entry = unsafe { self.elem.as_mut() };
mem::replace(&mut entry.0, self.key.unwrap())
}
}
impl<'a, K, V, A: Allocator + Clone> RustcVacantEntry<'a, K, V, A> {
/// Gets a reference to the key that would be used when inserting a value
/// through the `RustcVacantEntry`.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland");
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn key(&self) -> &K {
&self.key
}
/// Take ownership of the key.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") {
/// v.into_key();
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn into_key(self) -> K {
self.key
}
/// Sets the value of the entry with the RustcVacantEntry's key,
/// and returns a mutable reference to it.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let RustcEntry::Vacant(o) = map.rustc_entry("poneyland") {
/// o.insert(37);
/// }
/// assert_eq!(map["poneyland"], 37);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(self, value: V) -> &'a mut V {
let bucket = self.table.insert_no_grow(self.hash, (self.key, value));
unsafe { &mut bucket.as_mut().1 }
}
/// Sets the value of the entry with the RustcVacantEntry's key,
/// and returns a RustcOccupiedEntry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") {
/// let o = v.insert_entry(37);
/// assert_eq!(o.get(), &37);
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert_entry(self, value: V) -> RustcOccupiedEntry<'a, K, V, A> {
let bucket = self.table.insert_no_grow(self.hash, (self.key, value));
RustcOccupiedEntry {
key: None,
elem: bucket,
table: self.table,
}
}
}
impl<K, V> IterMut<'_, K, V> {
/// Returns a iterator of references over the remaining items.
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_iter(&self) -> Iter<'_, K, V> {
self.iter()
}
}
impl<K, V> IntoIter<K, V> {
/// Returns a iterator of references over the remaining items.
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_iter(&self) -> Iter<'_, K, V> {
self.iter()
}
}
impl<K, V> Drain<'_, K, V> {
/// Returns a iterator of references over the remaining items.
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_iter(&self) -> Iter<'_, K, V> {
self.iter()
}
}

View File

@@ -0,0 +1,49 @@
// Extracted from the scopeguard crate
use core::ops::{Deref, DerefMut};
pub struct ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
dropfn: F,
value: T,
}
#[inline]
pub fn guard<T, F>(value: T, dropfn: F) -> ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
ScopeGuard { dropfn, value }
}
impl<T, F> Deref for ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.value
}
}
impl<T, F> DerefMut for ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
#[inline]
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
impl<T, F> Drop for ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
#[inline]
fn drop(&mut self) {
(self.dropfn)(&mut self.value)
}
}

2299
zeroidc/vendor/hashbrown/src/set.rs vendored Normal file

File diff suppressed because it is too large Load Diff