mirror of
https://github.com/bspeice/qadapt
synced 2024-11-22 05:48:13 -05:00
Rustfmt
This commit is contained in:
parent
37deb8704a
commit
6560ad2a94
@ -10,9 +10,9 @@
|
||||
extern crate std;
|
||||
|
||||
pub use mutex::*;
|
||||
pub use rw_lock::*;
|
||||
pub use once::*;
|
||||
pub use rw_lock::*;
|
||||
|
||||
mod mutex;
|
||||
mod rw_lock;
|
||||
mod once;
|
||||
mod rw_lock;
|
||||
|
@ -1,10 +1,10 @@
|
||||
use core::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT, spin_loop_hint as cpu_relax};
|
||||
use core::cell::UnsafeCell;
|
||||
use core::marker::Sync;
|
||||
use core::ops::{Drop, Deref, DerefMut};
|
||||
use core::fmt;
|
||||
use core::option::Option::{self, None, Some};
|
||||
use core::default::Default;
|
||||
use core::fmt;
|
||||
use core::marker::Sync;
|
||||
use core::ops::{Deref, DerefMut, Drop};
|
||||
use core::option::Option::{self, None, Some};
|
||||
use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicBool, Ordering, ATOMIC_BOOL_INIT};
|
||||
|
||||
/// This type provides MUTual EXclusion based on spinning.
|
||||
///
|
||||
@ -69,8 +69,7 @@ use core::default::Default;
|
||||
/// let answer = { *spin_mutex.lock() };
|
||||
/// assert_eq!(answer, numthreads);
|
||||
/// ```
|
||||
pub struct Mutex<T: ?Sized>
|
||||
{
|
||||
pub struct Mutex<T: ?Sized> {
|
||||
lock: AtomicBool,
|
||||
data: UnsafeCell<T>,
|
||||
}
|
||||
@ -79,8 +78,7 @@ pub struct Mutex<T: ?Sized>
|
||||
///
|
||||
/// When the guard falls out of scope it will release the lock.
|
||||
#[derive(Debug)]
|
||||
pub struct MutexGuard<'a, T: ?Sized + 'a>
|
||||
{
|
||||
pub struct MutexGuard<'a, T: ?Sized + 'a> {
|
||||
lock: &'a AtomicBool,
|
||||
data: &'a mut T,
|
||||
}
|
||||
@ -89,8 +87,7 @@ pub struct MutexGuard<'a, T: ?Sized + 'a>
|
||||
unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
|
||||
unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
|
||||
|
||||
impl<T> Mutex<T>
|
||||
{
|
||||
impl<T> Mutex<T> {
|
||||
/// Creates a new spinlock wrapping the supplied data.
|
||||
///
|
||||
/// May be used statically:
|
||||
@ -106,10 +103,8 @@ impl<T> Mutex<T>
|
||||
/// drop(lock);
|
||||
/// }
|
||||
/// ```
|
||||
pub const fn new(user_data: T) -> Mutex<T>
|
||||
{
|
||||
Mutex
|
||||
{
|
||||
pub const fn new(user_data: T) -> Mutex<T> {
|
||||
Mutex {
|
||||
lock: ATOMIC_BOOL_INIT,
|
||||
data: UnsafeCell::new(user_data),
|
||||
}
|
||||
@ -124,15 +119,11 @@ impl<T> Mutex<T>
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> Mutex<T>
|
||||
{
|
||||
fn obtain_lock(&self)
|
||||
{
|
||||
while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false
|
||||
{
|
||||
impl<T: ?Sized> Mutex<T> {
|
||||
fn obtain_lock(&self) {
|
||||
while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false {
|
||||
// Wait until the lock looks unlocked before retrying
|
||||
while self.lock.load(Ordering::Relaxed)
|
||||
{
|
||||
while self.lock.load(Ordering::Relaxed) {
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
@ -153,11 +144,9 @@ impl<T: ?Sized> Mutex<T>
|
||||
/// }
|
||||
///
|
||||
/// ```
|
||||
pub fn lock(&self) -> MutexGuard<T>
|
||||
{
|
||||
pub fn lock(&self) -> MutexGuard<T> {
|
||||
self.obtain_lock();
|
||||
MutexGuard
|
||||
{
|
||||
MutexGuard {
|
||||
lock: &self.lock,
|
||||
data: unsafe { &mut *self.data.get() },
|
||||
}
|
||||
@ -176,30 +165,21 @@ impl<T: ?Sized> Mutex<T>
|
||||
|
||||
/// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns
|
||||
/// a guard within Some.
|
||||
pub fn try_lock(&self) -> Option<MutexGuard<T>>
|
||||
{
|
||||
if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false
|
||||
{
|
||||
Some(
|
||||
MutexGuard {
|
||||
pub fn try_lock(&self) -> Option<MutexGuard<T>> {
|
||||
if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false {
|
||||
Some(MutexGuard {
|
||||
lock: &self.lock,
|
||||
data: unsafe { &mut *self.data.get() },
|
||||
}
|
||||
)
|
||||
}
|
||||
else
|
||||
{
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T>
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
|
||||
{
|
||||
match self.try_lock()
|
||||
{
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self.try_lock() {
|
||||
Some(guard) => write!(f, "Mutex {{ data: ")
|
||||
.and_then(|()| (&*guard).fmt(f))
|
||||
.and_then(|()| write!(f, "}}")),
|
||||
@ -214,22 +194,22 @@ impl<T: ?Sized + Default> Default for Mutex<T> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Deref for MutexGuard<'a, T>
|
||||
{
|
||||
impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> {
|
||||
type Target = T;
|
||||
fn deref<'b>(&'b self) -> &'b T { &*self.data }
|
||||
fn deref<'b>(&'b self) -> &'b T {
|
||||
&*self.data
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T>
|
||||
{
|
||||
fn deref_mut<'b>(&'b mut self) -> &'b mut T { &mut *self.data }
|
||||
impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> {
|
||||
fn deref_mut<'b>(&'b mut self) -> &'b mut T {
|
||||
&mut *self.data
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Drop for MutexGuard<'a, T>
|
||||
{
|
||||
impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> {
|
||||
/// The dropping of the MutexGuard will release the lock it was created from.
|
||||
fn drop(&mut self)
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
self.lock.store(false, Ordering::Release);
|
||||
}
|
||||
}
|
||||
@ -238,9 +218,9 @@ impl<'a, T: ?Sized> Drop for MutexGuard<'a, T>
|
||||
mod tests {
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread;
|
||||
|
||||
use super::*;
|
||||
@ -274,9 +254,15 @@ mod tests {
|
||||
let (tx, rx) = channel();
|
||||
for _ in 0..K {
|
||||
let tx2 = tx.clone();
|
||||
thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
|
||||
thread::spawn(move || {
|
||||
inc();
|
||||
tx2.send(()).unwrap();
|
||||
});
|
||||
let tx2 = tx.clone();
|
||||
thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
|
||||
thread::spawn(move || {
|
||||
inc();
|
||||
tx2.send(()).unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
drop(tx);
|
||||
@ -359,7 +345,8 @@ mod tests {
|
||||
}
|
||||
let _u = Unwinder { i: arc2 };
|
||||
panic!();
|
||||
}).join();
|
||||
})
|
||||
.join();
|
||||
let lock = arc.lock();
|
||||
assert_eq!(*lock, 2);
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
use core::cell::UnsafeCell;
|
||||
use core::sync::atomic::{AtomicUsize, Ordering, spin_loop_hint as cpu_relax};
|
||||
use core::fmt;
|
||||
use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering};
|
||||
|
||||
/// A synchronization primitive which can be used to run a one-time global
|
||||
/// initialization. Unlike its std equivalent, this is generalized so that The
|
||||
@ -29,7 +29,7 @@ impl<T: fmt::Debug> fmt::Debug for Once<T> {
|
||||
Some(s) => write!(f, "Once {{ data: ")
|
||||
.and_then(|()| s.fmt(f))
|
||||
.and_then(|()| write!(f, "}}")),
|
||||
None => write!(f, "Once {{ <uninitialized> }}")
|
||||
None => write!(f, "Once {{ <uninitialized> }}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -96,17 +96,22 @@ impl<T> Once<T> {
|
||||
/// }
|
||||
/// ```
|
||||
pub fn call_once<'a, F>(&'a self, builder: F) -> &'a T
|
||||
where F: FnOnce() -> T
|
||||
where
|
||||
F: FnOnce() -> T,
|
||||
{
|
||||
let mut status = self.state.load(Ordering::SeqCst);
|
||||
|
||||
if status == INCOMPLETE {
|
||||
status = self.state.compare_and_swap(INCOMPLETE,
|
||||
RUNNING,
|
||||
Ordering::SeqCst);
|
||||
if status == INCOMPLETE { // We init
|
||||
status = self
|
||||
.state
|
||||
.compare_and_swap(INCOMPLETE, RUNNING, Ordering::SeqCst);
|
||||
if status == INCOMPLETE {
|
||||
// We init
|
||||
// We use a guard (Finish) to catch panics caused by builder
|
||||
let mut finish = Finish { state: &self.state, panicked: true };
|
||||
let mut finish = Finish {
|
||||
state: &self.state,
|
||||
panicked: true,
|
||||
};
|
||||
unsafe { *self.data.get() = Some(builder()) };
|
||||
finish.panicked = false;
|
||||
|
||||
@ -121,10 +126,11 @@ impl<T> Once<T> {
|
||||
loop {
|
||||
match status {
|
||||
INCOMPLETE => unreachable!(),
|
||||
RUNNING => { // We spin
|
||||
RUNNING => {
|
||||
// We spin
|
||||
cpu_relax();
|
||||
status = self.state.load(Ordering::SeqCst)
|
||||
},
|
||||
}
|
||||
PANICKED => panic!("Once has panicked"),
|
||||
COMPLETE => return self.force_get(),
|
||||
_ => unsafe { unreachable() },
|
||||
@ -172,9 +178,9 @@ impl<'a> Drop for Finish<'a> {
|
||||
mod tests {
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use super::Once;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::thread;
|
||||
use super::Once;
|
||||
|
||||
#[test]
|
||||
fn smoke_once() {
|
||||
@ -204,7 +210,9 @@ mod tests {
|
||||
for _ in 0..10 {
|
||||
let tx = tx.clone();
|
||||
thread::spawn(move || {
|
||||
for _ in 0..4 { thread::yield_now() }
|
||||
for _ in 0..4 {
|
||||
thread::yield_now()
|
||||
}
|
||||
unsafe {
|
||||
O.call_once(|| {
|
||||
assert!(!RUN);
|
||||
@ -249,7 +257,6 @@ mod tests {
|
||||
assert!(INIT.try().is_none());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn wait() {
|
||||
static INIT: Once<usize> = Once::new();
|
||||
@ -261,7 +268,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn panic() {
|
||||
use ::std::panic;
|
||||
use std::panic;
|
||||
|
||||
static INIT: Once<()> = Once::new();
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT, spin_loop_hint as cpu_relax};
|
||||
use core::cell::UnsafeCell;
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use core::fmt;
|
||||
use core::default::Default;
|
||||
use core::fmt;
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
|
||||
|
||||
/// A reader-writer lock
|
||||
///
|
||||
@ -42,8 +42,7 @@ use core::default::Default;
|
||||
/// assert_eq!(*w, 6);
|
||||
/// } // write lock is dropped here
|
||||
/// ```
|
||||
pub struct RwLock<T: ?Sized>
|
||||
{
|
||||
pub struct RwLock<T: ?Sized> {
|
||||
lock: AtomicUsize,
|
||||
data: UnsafeCell<T>,
|
||||
}
|
||||
@ -53,8 +52,7 @@ pub struct RwLock<T: ?Sized>
|
||||
/// When the guard falls out of scope it will decrement the read count,
|
||||
/// potentially releasing the lock.
|
||||
#[derive(Debug)]
|
||||
pub struct RwLockReadGuard<'a, T: 'a + ?Sized>
|
||||
{
|
||||
pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
|
||||
lock: &'a AtomicUsize,
|
||||
data: &'a T,
|
||||
}
|
||||
@ -63,8 +61,7 @@ pub struct RwLockReadGuard<'a, T: 'a + ?Sized>
|
||||
///
|
||||
/// When the guard falls out of scope it will release the lock.
|
||||
#[derive(Debug)]
|
||||
pub struct RwLockWriteGuard<'a, T: 'a + ?Sized>
|
||||
{
|
||||
pub struct RwLockWriteGuard<'a, T: 'a + ?Sized> {
|
||||
lock: &'a AtomicUsize,
|
||||
data: &'a mut T,
|
||||
}
|
||||
@ -75,8 +72,7 @@ unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
|
||||
|
||||
const USIZE_MSB: usize = ::core::isize::MIN as usize;
|
||||
|
||||
impl<T> RwLock<T>
|
||||
{
|
||||
impl<T> RwLock<T> {
|
||||
/// Creates a new spinlock wrapping the supplied data.
|
||||
///
|
||||
/// May be used statically:
|
||||
@ -93,18 +89,15 @@ impl<T> RwLock<T>
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub const fn new(user_data: T) -> RwLock<T>
|
||||
{
|
||||
RwLock
|
||||
{
|
||||
pub const fn new(user_data: T) -> RwLock<T> {
|
||||
RwLock {
|
||||
lock: ATOMIC_USIZE_INIT,
|
||||
data: UnsafeCell::new(user_data),
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes this `RwLock`, returning the underlying data.
|
||||
pub fn into_inner(self) -> T
|
||||
{
|
||||
pub fn into_inner(self) -> T {
|
||||
// We know statically that there are no outstanding references to
|
||||
// `self` so there's no need to lock.
|
||||
let RwLock { data, .. } = self;
|
||||
@ -112,8 +105,7 @@ impl<T> RwLock<T>
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized> RwLock<T>
|
||||
{
|
||||
impl<T: ?Sized> RwLock<T> {
|
||||
/// Locks this rwlock with shared read access, blocking the current thread
|
||||
/// until it can be acquired.
|
||||
///
|
||||
@ -136,8 +128,7 @@ impl<T: ?Sized> RwLock<T>
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T>
|
||||
{
|
||||
pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T> {
|
||||
// (funny do-while loop)
|
||||
while {
|
||||
// Old value, with write bit unset
|
||||
@ -191,17 +182,13 @@ impl<T: ?Sized> RwLock<T>
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn try_read(&self) -> Option<RwLockReadGuard<T>>
|
||||
{
|
||||
pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
|
||||
// Old value, with write bit unset
|
||||
let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
|
||||
|
||||
let new = old + 1;
|
||||
debug_assert!(new != (!USIZE_MSB) & (!0));
|
||||
if self.lock.compare_and_swap(old,
|
||||
new,
|
||||
Ordering::SeqCst) == old
|
||||
{
|
||||
if self.lock.compare_and_swap(old, new, Ordering::SeqCst) == old {
|
||||
Some(RwLockReadGuard {
|
||||
lock: &self.lock,
|
||||
data: unsafe { &*self.data.get() },
|
||||
@ -251,23 +238,18 @@ impl<T: ?Sized> RwLock<T>
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T>
|
||||
{
|
||||
loop
|
||||
{
|
||||
pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T> {
|
||||
loop {
|
||||
// Old value, with write bit unset.
|
||||
let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
|
||||
// Old value, with write bit set.
|
||||
let new = USIZE_MSB | old;
|
||||
if self.lock.compare_and_swap(old,
|
||||
new,
|
||||
Ordering::SeqCst) == old
|
||||
{
|
||||
if self.lock.compare_and_swap(old, new, Ordering::SeqCst) == old {
|
||||
// Wait for readers to go away, then lock is ours.
|
||||
while self.lock.load(Ordering::Relaxed) != USIZE_MSB {
|
||||
cpu_relax();
|
||||
}
|
||||
break
|
||||
break;
|
||||
}
|
||||
}
|
||||
RwLockWriteGuard {
|
||||
@ -296,12 +278,8 @@ impl<T: ?Sized> RwLock<T>
|
||||
/// }
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>>
|
||||
{
|
||||
if self.lock.compare_and_swap(0,
|
||||
USIZE_MSB,
|
||||
Ordering::SeqCst) == 0
|
||||
{
|
||||
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
|
||||
if self.lock.compare_and_swap(0, USIZE_MSB, Ordering::SeqCst) == 0 {
|
||||
Some(RwLockWriteGuard {
|
||||
lock: &self.lock,
|
||||
data: unsafe { &mut *self.data.get() },
|
||||
@ -312,12 +290,9 @@ impl<T: ?Sized> RwLock<T>
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T>
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
|
||||
{
|
||||
match self.try_read()
|
||||
{
|
||||
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self.try_read() {
|
||||
Some(guard) => write!(f, "RwLock {{ data: ")
|
||||
.and_then(|()| (&*guard).fmt(f))
|
||||
.and_then(|()| write!(f, "}}")),
|
||||
@ -335,17 +310,23 @@ impl<T: ?Sized + Default> Default for RwLock<T> {
|
||||
impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T { self.data }
|
||||
fn deref(&self) -> &T {
|
||||
self.data
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T { self.data }
|
||||
fn deref(&self) -> &T {
|
||||
self.data
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
|
||||
fn deref_mut(&mut self) -> &mut T { self.data }
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
self.data
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
|
||||
@ -366,9 +347,9 @@ impl<'rwlock, T: ?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
|
||||
mod tests {
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
use super::*;
|
||||
@ -466,7 +447,8 @@ mod tests {
|
||||
}
|
||||
let _u = Unwinder { i: arc2 };
|
||||
panic!();
|
||||
}).join();
|
||||
})
|
||||
.join();
|
||||
let lock = arc.read();
|
||||
assert_eq!(*lock, 2);
|
||||
}
|
||||
@ -493,7 +475,10 @@ mod tests {
|
||||
let write_result = lock.try_write();
|
||||
match write_result {
|
||||
None => (),
|
||||
Some(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
|
||||
Some(_) => assert!(
|
||||
false,
|
||||
"try_write should not succeed while read_guard is in scope"
|
||||
),
|
||||
}
|
||||
|
||||
drop(read_guard);
|
||||
|
Loading…
Reference in New Issue
Block a user