RPM build fix (reverted CI changes which will need to be un-reverted or made conditional) and vendor Rust dependencies to make builds much faster in any CI system.

This commit is contained in:
Adam Ierymenko
2022-06-08 07:32:16 -04:00
parent 373ca30269
commit d5ca4e5f52
12611 changed files with 2898014 additions and 284 deletions

181
zeroidc/vendor/once_cell/src/imp_pl.rs vendored Normal file
View File

@@ -0,0 +1,181 @@
use std::{
cell::UnsafeCell,
hint,
panic::{RefUnwindSafe, UnwindSafe},
sync::atomic::{AtomicU8, Ordering},
};
pub(crate) struct OnceCell<T> {
state: AtomicU8,
value: UnsafeCell<Option<T>>,
}
const INCOMPLETE: u8 = 0x0;
const RUNNING: u8 = 0x1;
const COMPLETE: u8 = 0x2;
// Why do we need `T: Send`?
// Thread A creates a `OnceCell` and shares it with
// scoped thread B, which fills the cell, which is
// then destroyed by A. That is, destructor observes
// a sent value.
unsafe impl<T: Sync + Send> Sync for OnceCell<T> {}
unsafe impl<T: Send> Send for OnceCell<T> {}
impl<T: RefUnwindSafe + UnwindSafe> RefUnwindSafe for OnceCell<T> {}
impl<T: UnwindSafe> UnwindSafe for OnceCell<T> {}
impl<T> OnceCell<T> {
pub(crate) const fn new() -> OnceCell<T> {
OnceCell { state: AtomicU8::new(INCOMPLETE), value: UnsafeCell::new(None) }
}
pub(crate) const fn with_value(value: T) -> OnceCell<T> {
OnceCell { state: AtomicU8::new(COMPLETE), value: UnsafeCell::new(Some(value)) }
}
/// Safety: synchronizes with store to value via Release/Acquire.
#[inline]
pub(crate) fn is_initialized(&self) -> bool {
self.state.load(Ordering::Acquire) == COMPLETE
}
/// Safety: synchronizes with store to value via `is_initialized` or mutex
/// lock/unlock, writes value only once because of the mutex.
#[cold]
pub(crate) fn initialize<F, E>(&self, f: F) -> Result<(), E>
where
F: FnOnce() -> Result<T, E>,
{
let mut f = Some(f);
let mut res: Result<(), E> = Ok(());
let slot: *mut Option<T> = self.value.get();
initialize_inner(&self.state, &mut || {
// We are calling user-supplied function and need to be careful.
// - if it returns Err, we unlock mutex and return without touching anything
// - if it panics, we unlock mutex and propagate panic without touching anything
// - if it calls `set` or `get_or_try_init` re-entrantly, we get a deadlock on
// mutex, which is important for safety. We *could* detect this and panic,
// but that is more complicated
// - finally, if it returns Ok, we store the value and store the flag with
// `Release`, which synchronizes with `Acquire`s.
let f = unsafe { crate::take_unchecked(&mut f) };
match f() {
Ok(value) => unsafe {
// Safe b/c we have a unique access and no panic may happen
// until the cell is marked as initialized.
debug_assert!((*slot).is_none());
*slot = Some(value);
true
},
Err(err) => {
res = Err(err);
false
}
}
});
res
}
#[cold]
pub(crate) fn wait(&self) {
let key = &self.state as *const _ as usize;
unsafe {
parking_lot_core::park(
key,
|| self.state.load(Ordering::Acquire) != COMPLETE,
|| (),
|_, _| (),
parking_lot_core::DEFAULT_PARK_TOKEN,
None,
);
}
}
/// Get the reference to the underlying value, without checking if the cell
/// is initialized.
///
/// # Safety
///
/// Caller must ensure that the cell is in initialized state, and that
/// the contents are acquired by (synchronized to) this thread.
pub(crate) unsafe fn get_unchecked(&self) -> &T {
debug_assert!(self.is_initialized());
let slot: &Option<T> = &*self.value.get();
match slot {
Some(value) => value,
// This unsafe does improve performance, see `examples/bench`.
None => {
debug_assert!(false);
hint::unreachable_unchecked()
}
}
}
/// Gets the mutable reference to the underlying value.
/// Returns `None` if the cell is empty.
pub(crate) fn get_mut(&mut self) -> Option<&mut T> {
// Safe b/c we have an exclusive access
let slot: &mut Option<T> = unsafe { &mut *self.value.get() };
slot.as_mut()
}
/// Consumes this `OnceCell`, returning the wrapped value.
/// Returns `None` if the cell was empty.
pub(crate) fn into_inner(self) -> Option<T> {
self.value.into_inner()
}
}
struct Guard<'a> {
state: &'a AtomicU8,
new_state: u8,
}
impl<'a> Drop for Guard<'a> {
fn drop(&mut self) {
self.state.store(self.new_state, Ordering::Release);
unsafe {
let key = self.state as *const AtomicU8 as usize;
parking_lot_core::unpark_all(key, parking_lot_core::DEFAULT_UNPARK_TOKEN);
}
}
}
// Note: this is intentionally monomorphic
#[inline(never)]
fn initialize_inner(state: &AtomicU8, init: &mut dyn FnMut() -> bool) {
loop {
let exchange =
state.compare_exchange_weak(INCOMPLETE, RUNNING, Ordering::Acquire, Ordering::Acquire);
match exchange {
Ok(_) => {
let mut guard = Guard { state, new_state: INCOMPLETE };
if init() {
guard.new_state = COMPLETE;
}
return;
}
Err(COMPLETE) => return,
Err(RUNNING) => unsafe {
let key = state as *const AtomicU8 as usize;
parking_lot_core::park(
key,
|| state.load(Ordering::Relaxed) == RUNNING,
|| (),
|_, _| (),
parking_lot_core::DEFAULT_PARK_TOKEN,
None,
);
},
Err(_) => debug_assert!(false),
}
}
}
#[test]
fn test_size() {
use std::mem::size_of;
assert_eq!(size_of::<OnceCell<bool>>(), 1 * size_of::<bool>() + size_of::<u8>());
}

385
zeroidc/vendor/once_cell/src/imp_std.rs vendored Normal file
View File

@@ -0,0 +1,385 @@
// There's a lot of scary concurrent code in this module, but it is copied from
// `std::sync::Once` with two changes:
// * no poisoning
// * init function can fail
use std::{
cell::{Cell, UnsafeCell},
hint::unreachable_unchecked,
marker::PhantomData,
panic::{RefUnwindSafe, UnwindSafe},
sync::atomic::{AtomicBool, AtomicUsize, Ordering},
thread::{self, Thread},
};
use crate::take_unchecked;
#[derive(Debug)]
pub(crate) struct OnceCell<T> {
// This `queue` field is the core of the implementation. It encodes two
// pieces of information:
//
// * The current state of the cell (`INCOMPLETE`, `RUNNING`, `COMPLETE`)
// * Linked list of threads waiting for the current cell.
//
// State is encoded in two low bits. Only `INCOMPLETE` and `RUNNING` states
// allow waiters.
queue: AtomicUsize,
_marker: PhantomData<*mut Waiter>,
value: UnsafeCell<Option<T>>,
}
// Why do we need `T: Send`?
// Thread A creates a `OnceCell` and shares it with
// scoped thread B, which fills the cell, which is
// then destroyed by A. That is, destructor observes
// a sent value.
unsafe impl<T: Sync + Send> Sync for OnceCell<T> {}
unsafe impl<T: Send> Send for OnceCell<T> {}
impl<T: RefUnwindSafe + UnwindSafe> RefUnwindSafe for OnceCell<T> {}
impl<T: UnwindSafe> UnwindSafe for OnceCell<T> {}
impl<T> OnceCell<T> {
pub(crate) const fn new() -> OnceCell<T> {
OnceCell {
queue: AtomicUsize::new(INCOMPLETE),
_marker: PhantomData,
value: UnsafeCell::new(None),
}
}
pub(crate) const fn with_value(value: T) -> OnceCell<T> {
OnceCell {
queue: AtomicUsize::new(COMPLETE),
_marker: PhantomData,
value: UnsafeCell::new(Some(value)),
}
}
/// Safety: synchronizes with store to value via Release/(Acquire|SeqCst).
#[inline]
pub(crate) fn is_initialized(&self) -> bool {
// An `Acquire` load is enough because that makes all the initialization
// operations visible to us, and, this being a fast path, weaker
// ordering helps with performance. This `Acquire` synchronizes with
// `SeqCst` operations on the slow path.
self.queue.load(Ordering::Acquire) == COMPLETE
}
/// Safety: synchronizes with store to value via SeqCst read from state,
/// writes value only once because we never get to INCOMPLETE state after a
/// successful write.
#[cold]
pub(crate) fn initialize<F, E>(&self, f: F) -> Result<(), E>
where
F: FnOnce() -> Result<T, E>,
{
let mut f = Some(f);
let mut res: Result<(), E> = Ok(());
let slot: *mut Option<T> = self.value.get();
initialize_or_wait(
&self.queue,
Some(&mut || {
let f = unsafe { take_unchecked(&mut f) };
match f() {
Ok(value) => {
unsafe { *slot = Some(value) };
true
}
Err(err) => {
res = Err(err);
false
}
}
}),
);
res
}
#[cold]
pub(crate) fn wait(&self) {
initialize_or_wait(&self.queue, None);
}
/// Get the reference to the underlying value, without checking if the cell
/// is initialized.
///
/// # Safety
///
/// Caller must ensure that the cell is in initialized state, and that
/// the contents are acquired by (synchronized to) this thread.
pub(crate) unsafe fn get_unchecked(&self) -> &T {
debug_assert!(self.is_initialized());
let slot: &Option<T> = &*self.value.get();
match slot {
Some(value) => value,
// This unsafe does improve performance, see `examples/bench`.
None => {
debug_assert!(false);
unreachable_unchecked()
}
}
}
/// Gets the mutable reference to the underlying value.
/// Returns `None` if the cell is empty.
pub(crate) fn get_mut(&mut self) -> Option<&mut T> {
// Safe b/c we have a unique access.
unsafe { &mut *self.value.get() }.as_mut()
}
/// Consumes this `OnceCell`, returning the wrapped value.
/// Returns `None` if the cell was empty.
#[inline]
pub(crate) fn into_inner(self) -> Option<T> {
// Because `into_inner` takes `self` by value, the compiler statically
// verifies that it is not currently borrowed.
// So, it is safe to move out `Option<T>`.
self.value.into_inner()
}
}
// Three states that a OnceCell can be in, encoded into the lower bits of `queue` in
// the OnceCell structure.
const INCOMPLETE: usize = 0x0;
const RUNNING: usize = 0x1;
const COMPLETE: usize = 0x2;
// Mask to learn about the state. All other bits are the queue of waiters if
// this is in the RUNNING state.
const STATE_MASK: usize = 0x3;
/// Representation of a node in the linked list of waiters in the RUNNING state.
/// A waiters is stored on the stack of the waiting threads.
#[repr(align(4))] // Ensure the two lower bits are free to use as state bits.
struct Waiter {
thread: Cell<Option<Thread>>,
signaled: AtomicBool,
next: *const Waiter,
}
/// Drains and notifies the queue of waiters on drop.
struct Guard<'a> {
queue: &'a AtomicUsize,
new_queue: usize,
}
impl Drop for Guard<'_> {
fn drop(&mut self) {
let queue = self.queue.swap(self.new_queue, Ordering::AcqRel);
assert_eq!(queue & STATE_MASK, RUNNING);
unsafe {
let mut waiter = (queue & !STATE_MASK) as *const Waiter;
while !waiter.is_null() {
let next = (*waiter).next;
let thread = (*waiter).thread.take().unwrap();
(*waiter).signaled.store(true, Ordering::Release);
waiter = next;
thread.unpark();
}
}
}
}
// Corresponds to `std::sync::Once::call_inner`.
//
// Originally copied from std, but since modified to remove poisoning and to
// support wait.
//
// Note: this is intentionally monomorphic
#[inline(never)]
fn initialize_or_wait(queue: &AtomicUsize, mut init: Option<&mut dyn FnMut() -> bool>) {
let mut curr_queue = queue.load(Ordering::Acquire);
loop {
let curr_state = curr_queue & STATE_MASK;
match (curr_state, &mut init) {
(COMPLETE, _) => return,
(INCOMPLETE, Some(init)) => {
let exchange = queue.compare_exchange(
curr_queue,
(curr_queue & !STATE_MASK) | RUNNING,
Ordering::Acquire,
Ordering::Acquire,
);
if let Err(new_queue) = exchange {
curr_queue = new_queue;
continue;
}
let mut guard = Guard { queue, new_queue: INCOMPLETE };
if init() {
guard.new_queue = COMPLETE;
}
return;
}
(INCOMPLETE, None) | (RUNNING, _) => {
wait(&queue, curr_queue);
curr_queue = queue.load(Ordering::Acquire);
}
_ => debug_assert!(false),
}
}
}
fn wait(queue: &AtomicUsize, mut curr_queue: usize) {
let curr_state = curr_queue & STATE_MASK;
loop {
let node = Waiter {
thread: Cell::new(Some(thread::current())),
signaled: AtomicBool::new(false),
next: (curr_queue & !STATE_MASK) as *const Waiter,
};
let me = &node as *const Waiter as usize;
let exchange = queue.compare_exchange(
curr_queue,
me | curr_state,
Ordering::Release,
Ordering::Relaxed,
);
if let Err(new_queue) = exchange {
if new_queue & STATE_MASK != curr_state {
return;
}
curr_queue = new_queue;
continue;
}
while !node.signaled.load(Ordering::Acquire) {
thread::park();
}
break;
}
}
// These test are snatched from std as well.
#[cfg(test)]
mod tests {
use std::panic;
use std::{sync::mpsc::channel, thread};
use super::OnceCell;
impl<T> OnceCell<T> {
fn init(&self, f: impl FnOnce() -> T) {
enum Void {}
let _ = self.initialize(|| Ok::<T, Void>(f()));
}
}
#[test]
fn smoke_once() {
static O: OnceCell<()> = OnceCell::new();
let mut a = 0;
O.init(|| a += 1);
assert_eq!(a, 1);
O.init(|| a += 1);
assert_eq!(a, 1);
}
#[test]
#[cfg(not(miri))]
fn stampede_once() {
static O: OnceCell<()> = OnceCell::new();
static mut RUN: bool = false;
let (tx, rx) = channel();
for _ in 0..10 {
let tx = tx.clone();
thread::spawn(move || {
for _ in 0..4 {
thread::yield_now()
}
unsafe {
O.init(|| {
assert!(!RUN);
RUN = true;
});
assert!(RUN);
}
tx.send(()).unwrap();
});
}
unsafe {
O.init(|| {
assert!(!RUN);
RUN = true;
});
assert!(RUN);
}
for _ in 0..10 {
rx.recv().unwrap();
}
}
#[test]
fn poison_bad() {
static O: OnceCell<()> = OnceCell::new();
// poison the once
let t = panic::catch_unwind(|| {
O.init(|| panic!());
});
assert!(t.is_err());
// we can subvert poisoning, however
let mut called = false;
O.init(|| {
called = true;
});
assert!(called);
// once any success happens, we stop propagating the poison
O.init(|| {});
}
#[test]
fn wait_for_force_to_finish() {
static O: OnceCell<()> = OnceCell::new();
// poison the once
let t = panic::catch_unwind(|| {
O.init(|| panic!());
});
assert!(t.is_err());
// make sure someone's waiting inside the once via a force
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
let t1 = thread::spawn(move || {
O.init(|| {
tx1.send(()).unwrap();
rx2.recv().unwrap();
});
});
rx1.recv().unwrap();
// put another waiter on the once
let t2 = thread::spawn(|| {
let mut called = false;
O.init(|| {
called = true;
});
assert!(!called);
});
tx2.send(()).unwrap();
assert!(t1.join().is_ok());
assert!(t2.join().is_ok());
}
#[test]
#[cfg(target_pointer_width = "64")]
fn test_size() {
use std::mem::size_of;
assert_eq!(size_of::<OnceCell<u32>>(), 4 * size_of::<u32>());
}
}

1269
zeroidc/vendor/once_cell/src/lib.rs vendored Normal file

File diff suppressed because it is too large Load Diff

303
zeroidc/vendor/once_cell/src/race.rs vendored Normal file
View File

@@ -0,0 +1,303 @@
//! Thread-safe, non-blocking, "first one wins" flavor of `OnceCell`.
//!
//! If two threads race to initialize a type from the `race` module, they
//! don't block, execute initialization function together, but only one of
//! them stores the result.
//!
//! This module does not require `std` feature.
//!
//! # Atomic orderings
//!
//! All types in this module use `Acquire` and `Release`
//! [atomic orderings](Ordering) for all their operations. While this is not
//! strictly necessary for types other than `OnceBox`, it is useful for users as
//! it allows them to be certain that after `get` or `get_or_init` returns on
//! one thread, any side-effects caused by the setter thread prior to them
//! calling `set` or `get_or_init` will be made visible to that thread; without
//! it, it's possible for it to appear as if they haven't happened yet from the
//! getter thread's perspective. This is an acceptable tradeoff to make since
//! `Acquire` and `Release` have very little performance overhead on most
//! architectures versus `Relaxed`.
#[cfg(feature = "atomic-polyfill")]
use atomic_polyfill as atomic;
#[cfg(not(feature = "atomic-polyfill"))]
use core::sync::atomic;
use atomic::{AtomicUsize, Ordering};
use core::num::NonZeroUsize;
/// A thread-safe cell which can be written to only once.
#[derive(Default, Debug)]
pub struct OnceNonZeroUsize {
inner: AtomicUsize,
}
impl OnceNonZeroUsize {
/// Creates a new empty cell.
#[inline]
pub const fn new() -> OnceNonZeroUsize {
OnceNonZeroUsize { inner: AtomicUsize::new(0) }
}
/// Gets the underlying value.
#[inline]
pub fn get(&self) -> Option<NonZeroUsize> {
let val = self.inner.load(Ordering::Acquire);
NonZeroUsize::new(val)
}
/// Sets the contents of this cell to `value`.
///
/// Returns `Ok(())` if the cell was empty and `Err(())` if it was
/// full.
#[inline]
pub fn set(&self, value: NonZeroUsize) -> Result<(), ()> {
let exchange =
self.inner.compare_exchange(0, value.get(), Ordering::AcqRel, Ordering::Acquire);
match exchange {
Ok(_) => Ok(()),
Err(_) => Err(()),
}
}
/// Gets the contents of the cell, initializing it with `f` if the cell was
/// empty.
///
/// If several threads concurrently run `get_or_init`, more than one `f` can
/// be called. However, all threads will return the same value, produced by
/// some `f`.
pub fn get_or_init<F>(&self, f: F) -> NonZeroUsize
where
F: FnOnce() -> NonZeroUsize,
{
enum Void {}
match self.get_or_try_init(|| Ok::<NonZeroUsize, Void>(f())) {
Ok(val) => val,
Err(void) => match void {},
}
}
/// Gets the contents of the cell, initializing it with `f` if
/// the cell was empty. If the cell was empty and `f` failed, an
/// error is returned.
///
/// If several threads concurrently run `get_or_init`, more than one `f` can
/// be called. However, all threads will return the same value, produced by
/// some `f`.
pub fn get_or_try_init<F, E>(&self, f: F) -> Result<NonZeroUsize, E>
where
F: FnOnce() -> Result<NonZeroUsize, E>,
{
let val = self.inner.load(Ordering::Acquire);
let res = match NonZeroUsize::new(val) {
Some(it) => it,
None => {
let mut val = f()?.get();
let exchange =
self.inner.compare_exchange(0, val, Ordering::AcqRel, Ordering::Acquire);
if let Err(old) = exchange {
val = old;
}
unsafe { NonZeroUsize::new_unchecked(val) }
}
};
Ok(res)
}
}
/// A thread-safe cell which can be written to only once.
#[derive(Default, Debug)]
pub struct OnceBool {
inner: OnceNonZeroUsize,
}
impl OnceBool {
/// Creates a new empty cell.
#[inline]
pub const fn new() -> OnceBool {
OnceBool { inner: OnceNonZeroUsize::new() }
}
/// Gets the underlying value.
#[inline]
pub fn get(&self) -> Option<bool> {
self.inner.get().map(OnceBool::from_usize)
}
/// Sets the contents of this cell to `value`.
///
/// Returns `Ok(())` if the cell was empty and `Err(())` if it was
/// full.
#[inline]
pub fn set(&self, value: bool) -> Result<(), ()> {
self.inner.set(OnceBool::to_usize(value))
}
/// Gets the contents of the cell, initializing it with `f` if the cell was
/// empty.
///
/// If several threads concurrently run `get_or_init`, more than one `f` can
/// be called. However, all threads will return the same value, produced by
/// some `f`.
pub fn get_or_init<F>(&self, f: F) -> bool
where
F: FnOnce() -> bool,
{
OnceBool::from_usize(self.inner.get_or_init(|| OnceBool::to_usize(f())))
}
/// Gets the contents of the cell, initializing it with `f` if
/// the cell was empty. If the cell was empty and `f` failed, an
/// error is returned.
///
/// If several threads concurrently run `get_or_init`, more than one `f` can
/// be called. However, all threads will return the same value, produced by
/// some `f`.
pub fn get_or_try_init<F, E>(&self, f: F) -> Result<bool, E>
where
F: FnOnce() -> Result<bool, E>,
{
self.inner.get_or_try_init(|| f().map(OnceBool::to_usize)).map(OnceBool::from_usize)
}
#[inline]
fn from_usize(value: NonZeroUsize) -> bool {
value.get() == 1
}
#[inline]
fn to_usize(value: bool) -> NonZeroUsize {
unsafe { NonZeroUsize::new_unchecked(if value { 1 } else { 2 }) }
}
}
#[cfg(feature = "alloc")]
pub use self::once_box::OnceBox;
#[cfg(feature = "alloc")]
mod once_box {
use super::atomic::{AtomicPtr, Ordering};
use core::{marker::PhantomData, ptr};
use alloc::boxed::Box;
/// A thread-safe cell which can be written to only once.
pub struct OnceBox<T> {
inner: AtomicPtr<T>,
ghost: PhantomData<Option<Box<T>>>,
}
impl<T> core::fmt::Debug for OnceBox<T> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "OnceBox({:?})", self.inner.load(Ordering::Relaxed))
}
}
impl<T> Default for OnceBox<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> Drop for OnceBox<T> {
fn drop(&mut self) {
let ptr = *self.inner.get_mut();
if !ptr.is_null() {
drop(unsafe { Box::from_raw(ptr) })
}
}
}
impl<T> OnceBox<T> {
/// Creates a new empty cell.
pub const fn new() -> OnceBox<T> {
OnceBox { inner: AtomicPtr::new(ptr::null_mut()), ghost: PhantomData }
}
/// Gets a reference to the underlying value.
pub fn get(&self) -> Option<&T> {
let ptr = self.inner.load(Ordering::Acquire);
if ptr.is_null() {
return None;
}
Some(unsafe { &*ptr })
}
/// Sets the contents of this cell to `value`.
///
/// Returns `Ok(())` if the cell was empty and `Err(value)` if it was
/// full.
pub fn set(&self, value: Box<T>) -> Result<(), Box<T>> {
let ptr = Box::into_raw(value);
let exchange = self.inner.compare_exchange(
ptr::null_mut(),
ptr,
Ordering::AcqRel,
Ordering::Acquire,
);
if let Err(_) = exchange {
let value = unsafe { Box::from_raw(ptr) };
return Err(value);
}
Ok(())
}
/// Gets the contents of the cell, initializing it with `f` if the cell was
/// empty.
///
/// If several threads concurrently run `get_or_init`, more than one `f` can
/// be called. However, all threads will return the same value, produced by
/// some `f`.
pub fn get_or_init<F>(&self, f: F) -> &T
where
F: FnOnce() -> Box<T>,
{
enum Void {}
match self.get_or_try_init(|| Ok::<Box<T>, Void>(f())) {
Ok(val) => val,
Err(void) => match void {},
}
}
/// Gets the contents of the cell, initializing it with `f` if
/// the cell was empty. If the cell was empty and `f` failed, an
/// error is returned.
///
/// If several threads concurrently run `get_or_init`, more than one `f` can
/// be called. However, all threads will return the same value, produced by
/// some `f`.
pub fn get_or_try_init<F, E>(&self, f: F) -> Result<&T, E>
where
F: FnOnce() -> Result<Box<T>, E>,
{
let mut ptr = self.inner.load(Ordering::Acquire);
if ptr.is_null() {
let val = f()?;
ptr = Box::into_raw(val);
let exchange = self.inner.compare_exchange(
ptr::null_mut(),
ptr,
Ordering::AcqRel,
Ordering::Acquire,
);
if let Err(old) = exchange {
drop(unsafe { Box::from_raw(ptr) });
ptr = old;
}
};
Ok(unsafe { &*ptr })
}
}
unsafe impl<T: Sync + Send> Sync for OnceBox<T> {}
/// ```compile_fail
/// struct S(*mut ());
/// unsafe impl Sync for S {}
///
/// fn share<T: Sync>(_: &T) {}
/// share(&once_cell::race::OnceBox::<S>::new());
/// ```
fn _dummy() {}
}