RPM build fix (reverted CI changes which will need to be un-reverted or made conditional) and vendor Rust dependencies to make builds much faster in any CI system.
This commit is contained in:
18
zeroidc/vendor/spin/src/lib.rs
vendored
Normal file
18
zeroidc/vendor/spin/src/lib.rs
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
#![crate_type = "lib"]
|
||||
#![warn(missing_docs)]
|
||||
|
||||
//! Synchronization primitives based on spinning
|
||||
|
||||
#![no_std]
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate std;
|
||||
|
||||
pub use mutex::*;
|
||||
pub use rw_lock::*;
|
||||
pub use once::*;
|
||||
|
||||
mod mutex;
|
||||
mod rw_lock;
|
||||
mod once;
|
||||
388
zeroidc/vendor/spin/src/mutex.rs
vendored
Normal file
388
zeroidc/vendor/spin/src/mutex.rs
vendored
Normal file
@@ -0,0 +1,388 @@
|
||||
use core::sync::atomic::{AtomicBool, Ordering, spin_loop_hint as cpu_relax};
|
||||
use core::cell::UnsafeCell;
|
||||
use core::marker::Sync;
|
||||
use core::ops::{Drop, Deref, DerefMut};
|
||||
use core::fmt;
|
||||
use core::option::Option::{self, None, Some};
|
||||
use core::default::Default;
|
||||
|
||||
/// This type provides MUTual EXclusion based on spinning.
|
||||
///
|
||||
/// # Description
|
||||
///
|
||||
/// The behaviour of these lock is similar to their namesakes in `std::sync`. they
|
||||
/// differ on the following:
|
||||
///
|
||||
/// - The lock will not be poisoned in case of failure;
|
||||
///
|
||||
/// # Simple examples
|
||||
///
|
||||
/// ```
|
||||
/// use spin;
|
||||
/// let spin_mutex = spin::Mutex::new(0);
|
||||
///
|
||||
/// // Modify the data
|
||||
/// {
|
||||
/// let mut data = spin_mutex.lock();
|
||||
/// *data = 2;
|
||||
/// }
|
||||
///
|
||||
/// // Read the data
|
||||
/// let answer =
|
||||
/// {
|
||||
/// let data = spin_mutex.lock();
|
||||
/// *data
|
||||
/// };
|
||||
///
|
||||
/// assert_eq!(answer, 2);
|
||||
/// ```
|
||||
///
|
||||
/// # Thread-safety example
|
||||
///
|
||||
/// ```
|
||||
/// use spin;
|
||||
/// use std::sync::{Arc, Barrier};
|
||||
///
|
||||
/// let numthreads = 1000;
|
||||
/// let spin_mutex = Arc::new(spin::Mutex::new(0));
|
||||
///
|
||||
/// // We use a barrier to ensure the readout happens after all writing
|
||||
/// let barrier = Arc::new(Barrier::new(numthreads + 1));
|
||||
///
|
||||
/// for _ in (0..numthreads)
|
||||
/// {
|
||||
/// let my_barrier = barrier.clone();
|
||||
/// let my_lock = spin_mutex.clone();
|
||||
/// std::thread::spawn(move||
|
||||
/// {
|
||||
/// let mut guard = my_lock.lock();
|
||||
/// *guard += 1;
|
||||
///
|
||||
/// // Release the lock to prevent a deadlock
|
||||
/// drop(guard);
|
||||
/// my_barrier.wait();
|
||||
/// });
|
||||
/// }
|
||||
///
|
||||
/// barrier.wait();
|
||||
///
|
||||
/// let answer = { *spin_mutex.lock() };
|
||||
/// assert_eq!(answer, numthreads);
|
||||
/// ```
|
||||
pub struct Mutex<T: ?Sized>
|
||||
{
|
||||
lock: AtomicBool,
|
||||
data: UnsafeCell<T>,
|
||||
}
|
||||
|
||||
/// A guard to which the protected data can be accessed
|
||||
///
|
||||
/// When the guard falls out of scope it will release the lock.
|
||||
#[derive(Debug)]
|
||||
pub struct MutexGuard<'a, T: ?Sized + 'a>
|
||||
{
|
||||
lock: &'a AtomicBool,
|
||||
data: &'a mut T,
|
||||
}
|
||||
|
||||
// Same unsafe impls as `std::sync::Mutex`
|
||||
unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
|
||||
unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
|
||||
|
||||
impl<T> Mutex<T>
|
||||
{
|
||||
/// Creates a new spinlock wrapping the supplied data.
|
||||
///
|
||||
/// May be used statically:
|
||||
///
|
||||
/// ```
|
||||
/// use spin;
|
||||
///
|
||||
/// static MUTEX: spin::Mutex<()> = spin::Mutex::new(());
|
||||
///
|
||||
/// fn demo() {
|
||||
/// let lock = MUTEX.lock();
|
||||
/// // do something with lock
|
||||
/// drop(lock);
|
||||
/// }
|
||||
/// ```
|
||||
pub const fn new(user_data: T) -> Mutex<T>
|
||||
{
|
||||
Mutex
|
||||
{
|
||||
lock: AtomicBool::new(false),
|
||||
data: UnsafeCell::new(user_data),
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes this mutex, returning the underlying data.
|
||||
pub fn into_inner(self) -> T {
|
||||
// We know statically that there are no outstanding references to
|
||||
// `self` so there's no need to lock.
|
||||
let Mutex { data, .. } = self;
|
||||
data.into_inner()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> Mutex<T>
|
||||
{
|
||||
fn obtain_lock(&self)
|
||||
{
|
||||
while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false
|
||||
{
|
||||
// Wait until the lock looks unlocked before retrying
|
||||
while self.lock.load(Ordering::Relaxed)
|
||||
{
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Locks the spinlock and returns a guard.
|
||||
///
|
||||
/// The returned value may be dereferenced for data access
|
||||
/// and the lock will be dropped when the guard falls out of scope.
|
||||
///
|
||||
/// ```
|
||||
/// let mylock = spin::Mutex::new(0);
|
||||
/// {
|
||||
/// let mut data = mylock.lock();
|
||||
/// // The lock is now locked and the data can be accessed
|
||||
/// *data += 1;
|
||||
/// // The lock is implicitly dropped
|
||||
/// }
|
||||
///
|
||||
/// ```
|
||||
pub fn lock(&self) -> MutexGuard<T>
|
||||
{
|
||||
self.obtain_lock();
|
||||
MutexGuard
|
||||
{
|
||||
lock: &self.lock,
|
||||
data: unsafe { &mut *self.data.get() },
|
||||
}
|
||||
}
|
||||
|
||||
/// Force unlock the spinlock.
|
||||
///
|
||||
/// This is *extremely* unsafe if the lock is not held by the current
|
||||
/// thread. However, this can be useful in some instances for exposing the
|
||||
/// lock to FFI that doesn't know how to deal with RAII.
|
||||
///
|
||||
/// If the lock isn't held, this is a no-op.
|
||||
pub unsafe fn force_unlock(&self) {
|
||||
self.lock.store(false, Ordering::Release);
|
||||
}
|
||||
|
||||
/// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns
|
||||
/// a guard within Some.
|
||||
pub fn try_lock(&self) -> Option<MutexGuard<T>>
|
||||
{
|
||||
if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false
|
||||
{
|
||||
Some(
|
||||
MutexGuard {
|
||||
lock: &self.lock,
|
||||
data: unsafe { &mut *self.data.get() },
|
||||
}
|
||||
)
|
||||
}
|
||||
else
|
||||
{
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T>
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
|
||||
{
|
||||
match self.try_lock()
|
||||
{
|
||||
Some(guard) => write!(f, "Mutex {{ data: ")
|
||||
.and_then(|()| (&*guard).fmt(f))
|
||||
.and_then(|()| write!(f, "}}")),
|
||||
None => write!(f, "Mutex {{ <locked> }}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized + Default> Default for Mutex<T> {
|
||||
fn default() -> Mutex<T> {
|
||||
Mutex::new(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Deref for MutexGuard<'a, T>
|
||||
{
|
||||
type Target = T;
|
||||
fn deref<'b>(&'b self) -> &'b T { &*self.data }
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T>
|
||||
{
|
||||
fn deref_mut<'b>(&'b mut self) -> &'b mut T { &mut *self.data }
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Drop for MutexGuard<'a, T>
|
||||
{
|
||||
/// The dropping of the MutexGuard will release the lock it was created from.
|
||||
fn drop(&mut self)
|
||||
{
|
||||
self.lock.store(false, Ordering::Release);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
struct NonCopy(i32);
|
||||
|
||||
#[test]
|
||||
fn smoke() {
|
||||
let m = Mutex::new(());
|
||||
drop(m.lock());
|
||||
drop(m.lock());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lots_and_lots() {
|
||||
static M: Mutex<()> = Mutex::new(());
|
||||
static mut CNT: u32 = 0;
|
||||
const J: u32 = 1000;
|
||||
const K: u32 = 3;
|
||||
|
||||
fn inc() {
|
||||
for _ in 0..J {
|
||||
unsafe {
|
||||
let _g = M.lock();
|
||||
CNT += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let (tx, rx) = channel();
|
||||
for _ in 0..K {
|
||||
let tx2 = tx.clone();
|
||||
thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
|
||||
let tx2 = tx.clone();
|
||||
thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
|
||||
}
|
||||
|
||||
drop(tx);
|
||||
for _ in 0..2 * K {
|
||||
rx.recv().unwrap();
|
||||
}
|
||||
assert_eq!(unsafe {CNT}, J * K * 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_lock() {
|
||||
let mutex = Mutex::new(42);
|
||||
|
||||
// First lock succeeds
|
||||
let a = mutex.try_lock();
|
||||
assert_eq!(a.as_ref().map(|r| **r), Some(42));
|
||||
|
||||
// Additional lock failes
|
||||
let b = mutex.try_lock();
|
||||
assert!(b.is_none());
|
||||
|
||||
// After dropping lock, it succeeds again
|
||||
::core::mem::drop(a);
|
||||
let c = mutex.try_lock();
|
||||
assert_eq!(c.as_ref().map(|r| **r), Some(42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_inner() {
|
||||
let m = Mutex::new(NonCopy(10));
|
||||
assert_eq!(m.into_inner(), NonCopy(10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_inner_drop() {
|
||||
struct Foo(Arc<AtomicUsize>);
|
||||
impl Drop for Foo {
|
||||
fn drop(&mut self) {
|
||||
self.0.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
let num_drops = Arc::new(AtomicUsize::new(0));
|
||||
let m = Mutex::new(Foo(num_drops.clone()));
|
||||
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
|
||||
{
|
||||
let _inner = m.into_inner();
|
||||
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
|
||||
}
|
||||
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mutex_arc_nested() {
|
||||
// Tests nested mutexes and access
|
||||
// to underlying data.
|
||||
let arc = Arc::new(Mutex::new(1));
|
||||
let arc2 = Arc::new(Mutex::new(arc));
|
||||
let (tx, rx) = channel();
|
||||
let _t = thread::spawn(move|| {
|
||||
let lock = arc2.lock();
|
||||
let lock2 = lock.lock();
|
||||
assert_eq!(*lock2, 1);
|
||||
tx.send(()).unwrap();
|
||||
});
|
||||
rx.recv().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mutex_arc_access_in_unwind() {
|
||||
let arc = Arc::new(Mutex::new(1));
|
||||
let arc2 = arc.clone();
|
||||
let _ = thread::spawn(move|| -> () {
|
||||
struct Unwinder {
|
||||
i: Arc<Mutex<i32>>,
|
||||
}
|
||||
impl Drop for Unwinder {
|
||||
fn drop(&mut self) {
|
||||
*self.i.lock() += 1;
|
||||
}
|
||||
}
|
||||
let _u = Unwinder { i: arc2 };
|
||||
panic!();
|
||||
}).join();
|
||||
let lock = arc.lock();
|
||||
assert_eq!(*lock, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mutex_unsized() {
|
||||
let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
|
||||
{
|
||||
let b = &mut *mutex.lock();
|
||||
b[0] = 4;
|
||||
b[2] = 5;
|
||||
}
|
||||
let comp: &[i32] = &[4, 2, 5];
|
||||
assert_eq!(&*mutex.lock(), comp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mutex_force_lock() {
|
||||
let lock = Mutex::new(());
|
||||
::std::mem::forget(lock.lock());
|
||||
unsafe {
|
||||
lock.force_unlock();
|
||||
}
|
||||
assert!(lock.try_lock().is_some());
|
||||
}
|
||||
}
|
||||
290
zeroidc/vendor/spin/src/once.rs
vendored
Normal file
290
zeroidc/vendor/spin/src/once.rs
vendored
Normal file
@@ -0,0 +1,290 @@
|
||||
use core::cell::UnsafeCell;
|
||||
use core::sync::atomic::{AtomicUsize, Ordering, spin_loop_hint as cpu_relax};
|
||||
use core::fmt;
|
||||
|
||||
/// A synchronization primitive which can be used to run a one-time global
|
||||
/// initialization. Unlike its std equivalent, this is generalized so that the
|
||||
/// closure returns a value and it is stored. Once therefore acts something like
|
||||
/// a future, too.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use spin;
|
||||
///
|
||||
/// static START: spin::Once<()> = spin::Once::new();
|
||||
///
|
||||
/// START.call_once(|| {
|
||||
/// // run initialization here
|
||||
/// });
|
||||
/// ```
|
||||
pub struct Once<T> {
|
||||
state: AtomicUsize,
|
||||
data: UnsafeCell<Option<T>>, // TODO remove option and use mem::uninitialized
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug> fmt::Debug for Once<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self.try() {
|
||||
Some(s) => write!(f, "Once {{ data: ")
|
||||
.and_then(|()| s.fmt(f))
|
||||
.and_then(|()| write!(f, "}}")),
|
||||
None => write!(f, "Once {{ <uninitialized> }}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Same unsafe impls as `std::sync::RwLock`, because this also allows for
|
||||
// concurrent reads.
|
||||
unsafe impl<T: Send + Sync> Sync for Once<T> {}
|
||||
unsafe impl<T: Send> Send for Once<T> {}
|
||||
|
||||
// Four states that a Once can be in, encoded into the lower bits of `state` in
|
||||
// the Once structure.
|
||||
const INCOMPLETE: usize = 0x0;
|
||||
const RUNNING: usize = 0x1;
|
||||
const COMPLETE: usize = 0x2;
|
||||
const PANICKED: usize = 0x3;
|
||||
|
||||
use core::hint::unreachable_unchecked as unreachable;
|
||||
|
||||
impl<T> Once<T> {
|
||||
/// Initialization constant of `Once`.
|
||||
pub const INIT: Self = Once {
|
||||
state: AtomicUsize::new(INCOMPLETE),
|
||||
data: UnsafeCell::new(None),
|
||||
};
|
||||
|
||||
/// Creates a new `Once` value.
|
||||
pub const fn new() -> Once<T> {
|
||||
Self::INIT
|
||||
}
|
||||
|
||||
fn force_get<'a>(&'a self) -> &'a T {
|
||||
match unsafe { &*self.data.get() }.as_ref() {
|
||||
None => unsafe { unreachable() },
|
||||
Some(p) => p,
|
||||
}
|
||||
}
|
||||
|
||||
/// Performs an initialization routine once and only once. The given closure
|
||||
/// will be executed if this is the first time `call_once` has been called,
|
||||
/// and otherwise the routine will *not* be invoked.
|
||||
///
|
||||
/// This method will block the calling thread if another initialization
|
||||
/// routine is currently running.
|
||||
///
|
||||
/// When this function returns, it is guaranteed that some initialization
|
||||
/// has run and completed (it may not be the closure specified). The
|
||||
/// returned pointer will point to the result from the closure that was
|
||||
/// run.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use spin;
|
||||
///
|
||||
/// static INIT: spin::Once<usize> = spin::Once::new();
|
||||
///
|
||||
/// fn get_cached_val() -> usize {
|
||||
/// *INIT.call_once(expensive_computation)
|
||||
/// }
|
||||
///
|
||||
/// fn expensive_computation() -> usize {
|
||||
/// // ...
|
||||
/// # 2
|
||||
/// }
|
||||
/// ```
|
||||
pub fn call_once<'a, F>(&'a self, builder: F) -> &'a T
|
||||
where F: FnOnce() -> T
|
||||
{
|
||||
let mut status = self.state.load(Ordering::SeqCst);
|
||||
|
||||
if status == INCOMPLETE {
|
||||
status = self.state.compare_and_swap(INCOMPLETE,
|
||||
RUNNING,
|
||||
Ordering::SeqCst);
|
||||
if status == INCOMPLETE { // We init
|
||||
// We use a guard (Finish) to catch panics caused by builder
|
||||
let mut finish = Finish { state: &self.state, panicked: true };
|
||||
unsafe { *self.data.get() = Some(builder()) };
|
||||
finish.panicked = false;
|
||||
|
||||
status = COMPLETE;
|
||||
self.state.store(status, Ordering::SeqCst);
|
||||
|
||||
// This next line is strictly an optimization
|
||||
return self.force_get();
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
match status {
|
||||
INCOMPLETE => unreachable!(),
|
||||
RUNNING => { // We spin
|
||||
cpu_relax();
|
||||
status = self.state.load(Ordering::SeqCst)
|
||||
},
|
||||
PANICKED => panic!("Once has panicked"),
|
||||
COMPLETE => return self.force_get(),
|
||||
_ => unsafe { unreachable() },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a pointer iff the `Once` was previously initialized
|
||||
pub fn try<'a>(&'a self) -> Option<&'a T> {
|
||||
match self.state.load(Ordering::SeqCst) {
|
||||
COMPLETE => Some(self.force_get()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Like try, but will spin if the `Once` is in the process of being
|
||||
/// initialized
|
||||
pub fn wait<'a>(&'a self) -> Option<&'a T> {
|
||||
loop {
|
||||
match self.state.load(Ordering::SeqCst) {
|
||||
INCOMPLETE => return None,
|
||||
RUNNING => cpu_relax(), // We spin
|
||||
COMPLETE => return Some(self.force_get()),
|
||||
PANICKED => panic!("Once has panicked"),
|
||||
_ => unsafe { unreachable() },
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct Finish<'a> {
|
||||
state: &'a AtomicUsize,
|
||||
panicked: bool,
|
||||
}
|
||||
|
||||
impl<'a> Drop for Finish<'a> {
|
||||
fn drop(&mut self) {
|
||||
if self.panicked {
|
||||
self.state.store(PANICKED, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread;
|
||||
use super::Once;
|
||||
|
||||
#[test]
|
||||
fn smoke_once() {
|
||||
static O: Once<()> = Once::new();
|
||||
let mut a = 0;
|
||||
O.call_once(|| a += 1);
|
||||
assert_eq!(a, 1);
|
||||
O.call_once(|| a += 1);
|
||||
assert_eq!(a, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn smoke_once_value() {
|
||||
static O: Once<usize> = Once::new();
|
||||
let a = O.call_once(|| 1);
|
||||
assert_eq!(*a, 1);
|
||||
let b = O.call_once(|| 2);
|
||||
assert_eq!(*b, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stampede_once() {
|
||||
static O: Once<()> = Once::new();
|
||||
static mut RUN: bool = false;
|
||||
|
||||
let (tx, rx) = channel();
|
||||
for _ in 0..10 {
|
||||
let tx = tx.clone();
|
||||
thread::spawn(move|| {
|
||||
for _ in 0..4 { thread::yield_now() }
|
||||
unsafe {
|
||||
O.call_once(|| {
|
||||
assert!(!RUN);
|
||||
RUN = true;
|
||||
});
|
||||
assert!(RUN);
|
||||
}
|
||||
tx.send(()).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
unsafe {
|
||||
O.call_once(|| {
|
||||
assert!(!RUN);
|
||||
RUN = true;
|
||||
});
|
||||
assert!(RUN);
|
||||
}
|
||||
|
||||
for _ in 0..10 {
|
||||
rx.recv().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try() {
|
||||
static INIT: Once<usize> = Once::new();
|
||||
|
||||
assert!(INIT.try().is_none());
|
||||
INIT.call_once(|| 2);
|
||||
assert_eq!(INIT.try().map(|r| *r), Some(2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_no_wait() {
|
||||
static INIT: Once<usize> = Once::new();
|
||||
|
||||
assert!(INIT.try().is_none());
|
||||
thread::spawn(move|| {
|
||||
INIT.call_once(|| loop { });
|
||||
});
|
||||
assert!(INIT.try().is_none());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn wait() {
|
||||
static INIT: Once<usize> = Once::new();
|
||||
|
||||
assert!(INIT.wait().is_none());
|
||||
INIT.call_once(|| 3);
|
||||
assert_eq!(INIT.wait().map(|r| *r), Some(3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn panic() {
|
||||
use ::std::panic;
|
||||
|
||||
static INIT: Once<()> = Once::new();
|
||||
|
||||
// poison the once
|
||||
let t = panic::catch_unwind(|| {
|
||||
INIT.call_once(|| panic!());
|
||||
});
|
||||
assert!(t.is_err());
|
||||
|
||||
// poisoning propagates
|
||||
let t = panic::catch_unwind(|| {
|
||||
INIT.call_once(|| {});
|
||||
});
|
||||
assert!(t.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn init_constant() {
|
||||
static O: Once<()> = Once::INIT;
|
||||
let mut a = 0;
|
||||
O.call_once(|| a += 1);
|
||||
assert_eq!(a, 1);
|
||||
O.call_once(|| a += 1);
|
||||
assert_eq!(a, 1);
|
||||
}
|
||||
}
|
||||
777
zeroidc/vendor/spin/src/rw_lock.rs
vendored
Normal file
777
zeroidc/vendor/spin/src/rw_lock.rs
vendored
Normal file
@@ -0,0 +1,777 @@
|
||||
use core::cell::UnsafeCell;
|
||||
use core::default::Default;
|
||||
use core::fmt;
|
||||
use core::marker::PhantomData;
|
||||
use core::mem;
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use core::ptr::NonNull;
|
||||
use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering};
|
||||
|
||||
/// A reader-writer lock
|
||||
///
|
||||
/// This type of lock allows a number of readers or at most one writer at any
|
||||
/// point in time. The write portion of this lock typically allows modification
|
||||
/// of the underlying data (exclusive access) and the read portion of this lock
|
||||
/// typically allows for read-only access (shared access).
|
||||
///
|
||||
/// The type parameter `T` represents the data that this lock protects. It is
|
||||
/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
|
||||
/// allow concurrent access through readers. The RAII guards returned from the
|
||||
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
|
||||
/// to allow access to the contained of the lock.
|
||||
///
|
||||
/// An [`RwLockUpgradeableGuard`](RwLockUpgradeableGuard) can be upgraded to a
|
||||
/// writable guard through the [`RwLockUpgradeableGuard::upgrade`](RwLockUpgradeableGuard::upgrade)
|
||||
/// [`RwLockUpgradeableGuard::try_upgrade`](RwLockUpgradeableGuard::try_upgrade) functions.
|
||||
/// Writable or upgradeable guards can be downgraded through their respective `downgrade`
|
||||
/// functions.
|
||||
///
|
||||
/// Based on Facebook's
|
||||
/// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h).
|
||||
/// This implementation is unfair to writers - if the lock always has readers, then no writers will
|
||||
/// ever get a chance. Using an upgradeable lock guard can *somewhat* alleviate this issue as no
|
||||
/// new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken
|
||||
/// when there are existing readers. However if the lock is that highly contended and writes are
|
||||
/// crucial then this implementation may be a poor choice.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use spin;
|
||||
///
|
||||
/// let lock = spin::RwLock::new(5);
|
||||
///
|
||||
/// // many reader locks can be held at once
|
||||
/// {
|
||||
/// let r1 = lock.read();
|
||||
/// let r2 = lock.read();
|
||||
/// assert_eq!(*r1, 5);
|
||||
/// assert_eq!(*r2, 5);
|
||||
/// } // read locks are dropped at this point
|
||||
///
|
||||
/// // only one write lock may be held, however
|
||||
/// {
|
||||
/// let mut w = lock.write();
|
||||
/// *w += 1;
|
||||
/// assert_eq!(*w, 6);
|
||||
/// } // write lock is dropped here
|
||||
/// ```
|
||||
pub struct RwLock<T: ?Sized> {
|
||||
lock: AtomicUsize,
|
||||
data: UnsafeCell<T>,
|
||||
}
|
||||
|
||||
const READER: usize = 1 << 2;
|
||||
const UPGRADED: usize = 1 << 1;
|
||||
const WRITER: usize = 1;
|
||||
|
||||
/// A guard from which the protected data can be read
|
||||
///
|
||||
/// When the guard falls out of scope it will decrement the read count,
|
||||
/// potentially releasing the lock.
|
||||
#[derive(Debug)]
|
||||
pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
|
||||
lock: &'a AtomicUsize,
|
||||
data: NonNull<T>,
|
||||
}
|
||||
|
||||
/// A guard to which the protected data can be written
|
||||
///
|
||||
/// When the guard falls out of scope it will release the lock.
|
||||
#[derive(Debug)]
|
||||
pub struct RwLockWriteGuard<'a, T: 'a + ?Sized> {
|
||||
lock: &'a AtomicUsize,
|
||||
data: NonNull<T>,
|
||||
#[doc(hidden)]
|
||||
_invariant: PhantomData<&'a mut T>,
|
||||
}
|
||||
|
||||
/// A guard from which the protected data can be read, and can be upgraded
|
||||
/// to a writable guard if needed
|
||||
///
|
||||
/// No writers or other upgradeable guards can exist while this is in scope. New reader
|
||||
/// creation is prevented (to alleviate writer starvation) but there may be existing readers
|
||||
/// when the lock is acquired.
|
||||
///
|
||||
/// When the guard falls out of scope it will release the lock.
|
||||
#[derive(Debug)]
|
||||
pub struct RwLockUpgradeableGuard<'a, T: 'a + ?Sized> {
|
||||
lock: &'a AtomicUsize,
|
||||
data: NonNull<T>,
|
||||
#[doc(hidden)]
|
||||
_invariant: PhantomData<&'a mut T>,
|
||||
}
|
||||
|
||||
// Same unsafe impls as `std::sync::RwLock`
|
||||
unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
|
||||
unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
|
||||
|
||||
impl<T> RwLock<T> {
|
||||
/// Creates a new spinlock wrapping the supplied data.
|
||||
///
|
||||
/// May be used statically:
|
||||
///
|
||||
/// ```
|
||||
/// use spin;
|
||||
///
|
||||
/// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
|
||||
///
|
||||
/// fn demo() {
|
||||
/// let lock = RW_LOCK.read();
|
||||
/// // do something with lock
|
||||
/// drop(lock);
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub const fn new(user_data: T) -> RwLock<T> {
|
||||
RwLock {
|
||||
lock: AtomicUsize::new(0),
|
||||
data: UnsafeCell::new(user_data),
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes this `RwLock`, returning the underlying data.
|
||||
#[inline]
|
||||
pub fn into_inner(self) -> T {
|
||||
// We know statically that there are no outstanding references to
|
||||
// `self` so there's no need to lock.
|
||||
let RwLock { data, .. } = self;
|
||||
data.into_inner()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> RwLock<T> {
|
||||
/// Locks this rwlock with shared read access, blocking the current thread
|
||||
/// until it can be acquired.
|
||||
///
|
||||
/// The calling thread will be blocked until there are no more writers which
|
||||
/// hold the lock. There may be other readers currently inside the lock when
|
||||
/// this method returns. This method does not provide any guarantees with
|
||||
/// respect to the ordering of whether contentious readers or writers will
|
||||
/// acquire the lock first.
|
||||
///
|
||||
/// Returns an RAII guard which will release this thread's shared access
|
||||
/// once it is dropped.
|
||||
///
|
||||
/// ```
|
||||
/// let mylock = spin::RwLock::new(0);
|
||||
/// {
|
||||
/// let mut data = mylock.read();
|
||||
/// // The lock is now locked and the data can be read
|
||||
/// println!("{}", *data);
|
||||
/// // The lock is dropped
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn read(&self) -> RwLockReadGuard<T> {
|
||||
loop {
|
||||
match self.try_read() {
|
||||
Some(guard) => return guard,
|
||||
None => cpu_relax(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to acquire this lock with shared read access.
|
||||
///
|
||||
/// This function will never block and will return immediately if `read`
|
||||
/// would otherwise succeed. Returns `Some` of an RAII guard which will
|
||||
/// release the shared access of this thread when dropped, or `None` if the
|
||||
/// access could not be granted. This method does not provide any
|
||||
/// guarantees with respect to the ordering of whether contentious readers
|
||||
/// or writers will acquire the lock first.
|
||||
///
|
||||
/// ```
|
||||
/// let mylock = spin::RwLock::new(0);
|
||||
/// {
|
||||
/// match mylock.try_read() {
|
||||
/// Some(data) => {
|
||||
/// // The lock is now locked and the data can be read
|
||||
/// println!("{}", *data);
|
||||
/// // The lock is dropped
|
||||
/// },
|
||||
/// None => (), // no cigar
|
||||
/// };
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
|
||||
let value = self.lock.fetch_add(READER, Ordering::Acquire);
|
||||
|
||||
// We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
|
||||
// This helps reduce writer starvation.
|
||||
if value & (WRITER | UPGRADED) != 0 {
|
||||
// Lock is taken, undo.
|
||||
self.lock.fetch_sub(READER, Ordering::Release);
|
||||
None
|
||||
} else {
|
||||
Some(RwLockReadGuard {
|
||||
lock: &self.lock,
|
||||
data: unsafe { NonNull::new_unchecked(self.data.get()) },
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Force decrement the reader count.
|
||||
///
|
||||
/// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
|
||||
/// live, or if called more times than `read` has been called, but can be
|
||||
/// useful in FFI contexts where the caller doesn't know how to deal with
|
||||
/// RAII. The underlying atomic operation uses `Ordering::Release`.
|
||||
#[inline]
|
||||
pub unsafe fn force_read_decrement(&self) {
|
||||
debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0);
|
||||
self.lock.fetch_sub(READER, Ordering::Release);
|
||||
}
|
||||
|
||||
/// Force unlock exclusive write access.
|
||||
///
|
||||
/// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
|
||||
/// live, or if called when there are current readers, but can be useful in
|
||||
/// FFI contexts where the caller doesn't know how to deal with RAII. The
|
||||
/// underlying atomic operation uses `Ordering::Release`.
|
||||
#[inline]
|
||||
pub unsafe fn force_write_unlock(&self) {
|
||||
debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0);
|
||||
self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn try_write_internal(&self, strong: bool) -> Option<RwLockWriteGuard<T>> {
|
||||
if compare_exchange(
|
||||
&self.lock,
|
||||
0,
|
||||
WRITER,
|
||||
Ordering::Acquire,
|
||||
Ordering::Relaxed,
|
||||
strong,
|
||||
)
|
||||
.is_ok()
|
||||
{
|
||||
Some(RwLockWriteGuard {
|
||||
lock: &self.lock,
|
||||
data: unsafe { NonNull::new_unchecked(self.data.get()) },
|
||||
_invariant: PhantomData,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Lock this rwlock with exclusive write access, blocking the current
|
||||
/// thread until it can be acquired.
|
||||
///
|
||||
/// This function will not return while other writers or other readers
|
||||
/// currently have access to the lock.
|
||||
///
|
||||
/// Returns an RAII guard which will drop the write access of this rwlock
|
||||
/// when dropped.
|
||||
///
|
||||
/// ```
|
||||
/// let mylock = spin::RwLock::new(0);
|
||||
/// {
|
||||
/// let mut data = mylock.write();
|
||||
/// // The lock is now locked and the data can be written
|
||||
/// *data += 1;
|
||||
/// // The lock is dropped
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn write(&self) -> RwLockWriteGuard<T> {
|
||||
loop {
|
||||
match self.try_write_internal(false) {
|
||||
Some(guard) => return guard,
|
||||
None => cpu_relax(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to lock this rwlock with exclusive write access.
|
||||
///
|
||||
/// This function does not ever block, and it will return `None` if a call
|
||||
/// to `write` would otherwise block. If successful, an RAII guard is
|
||||
/// returned.
|
||||
///
|
||||
/// ```
|
||||
/// let mylock = spin::RwLock::new(0);
|
||||
/// {
|
||||
/// match mylock.try_write() {
|
||||
/// Some(mut data) => {
|
||||
/// // The lock is now locked and the data can be written
|
||||
/// *data += 1;
|
||||
/// // The lock is implicitly dropped
|
||||
/// },
|
||||
/// None => (), // no cigar
|
||||
/// };
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
|
||||
self.try_write_internal(true)
|
||||
}
|
||||
|
||||
/// Obtain a readable lock guard that can later be upgraded to a writable lock guard.
|
||||
/// Upgrades can be done through the [`RwLockUpgradeableGuard::upgrade`](RwLockUpgradeableGuard::upgrade) method.
|
||||
#[inline]
|
||||
pub fn upgradeable_read(&self) -> RwLockUpgradeableGuard<T> {
|
||||
loop {
|
||||
match self.try_upgradeable_read() {
|
||||
Some(guard) => return guard,
|
||||
None => cpu_relax(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to obtain an upgradeable lock guard.
|
||||
#[inline]
|
||||
pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradeableGuard<T>> {
|
||||
if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 {
|
||||
Some(RwLockUpgradeableGuard {
|
||||
lock: &self.lock,
|
||||
data: unsafe { NonNull::new_unchecked(self.data.get()) },
|
||||
_invariant: PhantomData,
|
||||
})
|
||||
} else {
|
||||
// We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock.
|
||||
// When they unlock, they will clear the bit.
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self.try_read() {
|
||||
Some(guard) => write!(f, "RwLock {{ data: ")
|
||||
.and_then(|()| (&*guard).fmt(f))
|
||||
.and_then(|()| write!(f, "}}")),
|
||||
None => write!(f, "RwLock {{ <locked> }}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized + Default> Default for RwLock<T> {
|
||||
fn default() -> RwLock<T> {
|
||||
RwLock::new(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rwlock, T: ?Sized> RwLockUpgradeableGuard<'rwlock, T> {
|
||||
#[inline(always)]
|
||||
fn try_upgrade_internal(self, strong: bool) -> Result<RwLockWriteGuard<'rwlock, T>, Self> {
|
||||
if compare_exchange(
|
||||
&self.lock,
|
||||
UPGRADED,
|
||||
WRITER,
|
||||
Ordering::Acquire,
|
||||
Ordering::Relaxed,
|
||||
strong,
|
||||
)
|
||||
.is_ok()
|
||||
{
|
||||
// Upgrade successful
|
||||
let out = Ok(RwLockWriteGuard {
|
||||
lock: &self.lock,
|
||||
data: self.data,
|
||||
_invariant: PhantomData,
|
||||
});
|
||||
|
||||
// Forget the old guard so its destructor doesn't run
|
||||
mem::forget(self);
|
||||
|
||||
out
|
||||
} else {
|
||||
Err(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Upgrades an upgradeable lock guard to a writable lock guard.
|
||||
///
|
||||
/// ```
|
||||
/// let mylock = spin::RwLock::new(0);
|
||||
///
|
||||
/// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
|
||||
/// let writable = upgradeable.upgrade();
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T> {
|
||||
loop {
|
||||
self = match self.try_upgrade_internal(false) {
|
||||
Ok(guard) => return guard,
|
||||
Err(e) => e,
|
||||
};
|
||||
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
|
||||
/// Tries to upgrade an upgradeable lock guard to a writable lock guard.
|
||||
///
|
||||
/// ```
|
||||
/// let mylock = spin::RwLock::new(0);
|
||||
/// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
|
||||
///
|
||||
/// match upgradeable.try_upgrade() {
|
||||
/// Ok(writable) => /* upgrade successful - use writable lock guard */ (),
|
||||
/// Err(upgradeable) => /* upgrade unsuccessful */ (),
|
||||
/// };
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T>, Self> {
|
||||
self.try_upgrade_internal(true)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
|
||||
///
|
||||
/// ```
|
||||
/// let mylock = spin::RwLock::new(1);
|
||||
///
|
||||
/// let upgradeable = mylock.upgradeable_read();
|
||||
/// assert!(mylock.try_read().is_none());
|
||||
/// assert_eq!(*upgradeable, 1);
|
||||
///
|
||||
/// let readable = upgradeable.downgrade(); // This is guaranteed not to spin
|
||||
/// assert!(mylock.try_read().is_some());
|
||||
/// assert_eq!(*readable, 1);
|
||||
/// ```
|
||||
pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
|
||||
// Reserve the read guard for ourselves
|
||||
self.lock.fetch_add(READER, Ordering::Acquire);
|
||||
|
||||
RwLockReadGuard {
|
||||
lock: &self.lock,
|
||||
data: self.data,
|
||||
}
|
||||
|
||||
// Dropping self removes the UPGRADED bit
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
|
||||
/// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
|
||||
///
|
||||
/// ```
|
||||
/// let mylock = spin::RwLock::new(0);
|
||||
///
|
||||
/// let mut writable = mylock.write();
|
||||
/// *writable = 1;
|
||||
///
|
||||
/// let readable = writable.downgrade(); // This is guaranteed not to spin
|
||||
/// # let readable_2 = mylock.try_read().unwrap();
|
||||
/// assert_eq!(*readable, 1);
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
|
||||
// Reserve the read guard for ourselves
|
||||
self.lock.fetch_add(READER, Ordering::Acquire);
|
||||
|
||||
RwLockReadGuard {
|
||||
lock: &self.lock,
|
||||
data: self.data,
|
||||
}
|
||||
|
||||
// Dropping self removes the WRITER bit
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { self.data.as_ref() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rwlock, T: ?Sized> Deref for RwLockUpgradeableGuard<'rwlock, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { self.data.as_ref() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { self.data.as_ref() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { self.data.as_mut() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
|
||||
fn drop(&mut self) {
|
||||
debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0);
|
||||
self.lock.fetch_sub(READER, Ordering::Release);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rwlock, T: ?Sized> Drop for RwLockUpgradeableGuard<'rwlock, T> {
|
||||
fn drop(&mut self) {
|
||||
debug_assert_eq!(
|
||||
self.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED),
|
||||
UPGRADED
|
||||
);
|
||||
self.lock.fetch_sub(UPGRADED, Ordering::AcqRel);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rwlock, T: ?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
|
||||
fn drop(&mut self) {
|
||||
debug_assert_eq!(self.lock.load(Ordering::Relaxed) & WRITER, WRITER);
|
||||
|
||||
// Writer is responsible for clearing both WRITER and UPGRADED bits.
|
||||
// The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held.
|
||||
self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn compare_exchange(
|
||||
atomic: &AtomicUsize,
|
||||
current: usize,
|
||||
new: usize,
|
||||
success: Ordering,
|
||||
failure: Ordering,
|
||||
strong: bool,
|
||||
) -> Result<usize, usize> {
|
||||
if strong {
|
||||
atomic.compare_exchange(current, new, success, failure)
|
||||
} else {
|
||||
atomic.compare_exchange_weak(current, new, success, failure)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Eq, PartialEq, Debug)]
|
||||
struct NonCopy(i32);
|
||||
|
||||
#[test]
|
||||
fn smoke() {
|
||||
let l = RwLock::new(());
|
||||
drop(l.read());
|
||||
drop(l.write());
|
||||
drop((l.read(), l.read()));
|
||||
drop(l.write());
|
||||
}
|
||||
|
||||
// TODO: needs RNG
|
||||
//#[test]
|
||||
//fn frob() {
|
||||
// static R: RwLock = RwLock::new();
|
||||
// const N: usize = 10;
|
||||
// const M: usize = 1000;
|
||||
//
|
||||
// let (tx, rx) = channel::<()>();
|
||||
// for _ in 0..N {
|
||||
// let tx = tx.clone();
|
||||
// thread::spawn(move|| {
|
||||
// let mut rng = rand::thread_rng();
|
||||
// for _ in 0..M {
|
||||
// if rng.gen_weighted_bool(N) {
|
||||
// drop(R.write());
|
||||
// } else {
|
||||
// drop(R.read());
|
||||
// }
|
||||
// }
|
||||
// drop(tx);
|
||||
// });
|
||||
// }
|
||||
// drop(tx);
|
||||
// let _ = rx.recv();
|
||||
// unsafe { R.destroy(); }
|
||||
//}
|
||||
|
||||
#[test]
|
||||
fn test_rw_arc() {
|
||||
let arc = Arc::new(RwLock::new(0));
|
||||
let arc2 = arc.clone();
|
||||
let (tx, rx) = channel();
|
||||
|
||||
thread::spawn(move || {
|
||||
let mut lock = arc2.write();
|
||||
for _ in 0..10 {
|
||||
let tmp = *lock;
|
||||
*lock = -1;
|
||||
thread::yield_now();
|
||||
*lock = tmp + 1;
|
||||
}
|
||||
tx.send(()).unwrap();
|
||||
});
|
||||
|
||||
// Readers try to catch the writer in the act
|
||||
let mut children = Vec::new();
|
||||
for _ in 0..5 {
|
||||
let arc3 = arc.clone();
|
||||
children.push(thread::spawn(move || {
|
||||
let lock = arc3.read();
|
||||
assert!(*lock >= 0);
|
||||
}));
|
||||
}
|
||||
|
||||
// Wait for children to pass their asserts
|
||||
for r in children {
|
||||
assert!(r.join().is_ok());
|
||||
}
|
||||
|
||||
// Wait for writer to finish
|
||||
rx.recv().unwrap();
|
||||
let lock = arc.read();
|
||||
assert_eq!(*lock, 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rw_access_in_unwind() {
|
||||
let arc = Arc::new(RwLock::new(1));
|
||||
let arc2 = arc.clone();
|
||||
let _ = thread::spawn(move || -> () {
|
||||
struct Unwinder {
|
||||
i: Arc<RwLock<isize>>,
|
||||
}
|
||||
impl Drop for Unwinder {
|
||||
fn drop(&mut self) {
|
||||
let mut lock = self.i.write();
|
||||
*lock += 1;
|
||||
}
|
||||
}
|
||||
let _u = Unwinder { i: arc2 };
|
||||
panic!();
|
||||
})
|
||||
.join();
|
||||
let lock = arc.read();
|
||||
assert_eq!(*lock, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rwlock_unsized() {
|
||||
let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
|
||||
{
|
||||
let b = &mut *rw.write();
|
||||
b[0] = 4;
|
||||
b[2] = 5;
|
||||
}
|
||||
let comp: &[i32] = &[4, 2, 5];
|
||||
assert_eq!(&*rw.read(), comp);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rwlock_try_write() {
|
||||
use std::mem::drop;
|
||||
|
||||
let lock = RwLock::new(0isize);
|
||||
let read_guard = lock.read();
|
||||
|
||||
let write_result = lock.try_write();
|
||||
match write_result {
|
||||
None => (),
|
||||
Some(_) => assert!(
|
||||
false,
|
||||
"try_write should not succeed while read_guard is in scope"
|
||||
),
|
||||
}
|
||||
|
||||
drop(read_guard);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rw_try_read() {
|
||||
let m = RwLock::new(0);
|
||||
mem::forget(m.write());
|
||||
assert!(m.try_read().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_inner() {
|
||||
let m = RwLock::new(NonCopy(10));
|
||||
assert_eq!(m.into_inner(), NonCopy(10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_inner_drop() {
|
||||
struct Foo(Arc<AtomicUsize>);
|
||||
impl Drop for Foo {
|
||||
fn drop(&mut self) {
|
||||
self.0.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
let num_drops = Arc::new(AtomicUsize::new(0));
|
||||
let m = RwLock::new(Foo(num_drops.clone()));
|
||||
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
|
||||
{
|
||||
let _inner = m.into_inner();
|
||||
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
|
||||
}
|
||||
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_force_read_decrement() {
|
||||
let m = RwLock::new(());
|
||||
::std::mem::forget(m.read());
|
||||
::std::mem::forget(m.read());
|
||||
::std::mem::forget(m.read());
|
||||
assert!(m.try_write().is_none());
|
||||
unsafe {
|
||||
m.force_read_decrement();
|
||||
m.force_read_decrement();
|
||||
}
|
||||
assert!(m.try_write().is_none());
|
||||
unsafe {
|
||||
m.force_read_decrement();
|
||||
}
|
||||
assert!(m.try_write().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_force_write_unlock() {
|
||||
let m = RwLock::new(());
|
||||
::std::mem::forget(m.write());
|
||||
assert!(m.try_read().is_none());
|
||||
unsafe {
|
||||
m.force_write_unlock();
|
||||
}
|
||||
assert!(m.try_read().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_upgrade_downgrade() {
|
||||
let m = RwLock::new(());
|
||||
{
|
||||
let _r = m.read();
|
||||
let upg = m.try_upgradeable_read().unwrap();
|
||||
assert!(m.try_read().is_none());
|
||||
assert!(m.try_write().is_none());
|
||||
assert!(upg.try_upgrade().is_err());
|
||||
}
|
||||
{
|
||||
let w = m.write();
|
||||
assert!(m.try_upgradeable_read().is_none());
|
||||
let _r = w.downgrade();
|
||||
assert!(m.try_upgradeable_read().is_some());
|
||||
assert!(m.try_read().is_some());
|
||||
assert!(m.try_write().is_none());
|
||||
}
|
||||
{
|
||||
let _u = m.upgradeable_read();
|
||||
assert!(m.try_upgradeable_read().is_none());
|
||||
}
|
||||
|
||||
assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok());
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user