diff --git a/qadapt-spin/src/lib.rs b/qadapt-spin/src/lib.rs index 6e876bf..8901dcc 100644 --- a/qadapt-spin/src/lib.rs +++ b/qadapt-spin/src/lib.rs @@ -10,9 +10,9 @@ extern crate std; pub use mutex::*; -pub use rw_lock::*; pub use once::*; +pub use rw_lock::*; mod mutex; -mod rw_lock; mod once; +mod rw_lock; diff --git a/qadapt-spin/src/mutex.rs b/qadapt-spin/src/mutex.rs index fa5c48d..ce206f4 100644 --- a/qadapt-spin/src/mutex.rs +++ b/qadapt-spin/src/mutex.rs @@ -1,10 +1,10 @@ -use core::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT, spin_loop_hint as cpu_relax}; use core::cell::UnsafeCell; -use core::marker::Sync; -use core::ops::{Drop, Deref, DerefMut}; -use core::fmt; -use core::option::Option::{self, None, Some}; use core::default::Default; +use core::fmt; +use core::marker::Sync; +use core::ops::{Deref, DerefMut, Drop}; +use core::option::Option::{self, None, Some}; +use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicBool, Ordering, ATOMIC_BOOL_INIT}; /// This type provides MUTual EXclusion based on spinning. /// @@ -69,8 +69,7 @@ use core::default::Default; /// let answer = { *spin_mutex.lock() }; /// assert_eq!(answer, numthreads); /// ``` -pub struct Mutex -{ +pub struct Mutex { lock: AtomicBool, data: UnsafeCell, } @@ -79,8 +78,7 @@ pub struct Mutex /// /// When the guard falls out of scope it will release the lock. #[derive(Debug)] -pub struct MutexGuard<'a, T: ?Sized + 'a> -{ +pub struct MutexGuard<'a, T: ?Sized + 'a> { lock: &'a AtomicBool, data: &'a mut T, } @@ -89,8 +87,7 @@ pub struct MutexGuard<'a, T: ?Sized + 'a> unsafe impl Sync for Mutex {} unsafe impl Send for Mutex {} -impl Mutex -{ +impl Mutex { /// Creates a new spinlock wrapping the supplied data. /// /// May be used statically: @@ -106,10 +103,8 @@ impl Mutex /// drop(lock); /// } /// ``` - pub const fn new(user_data: T) -> Mutex - { - Mutex - { + pub const fn new(user_data: T) -> Mutex { + Mutex { lock: ATOMIC_BOOL_INIT, data: UnsafeCell::new(user_data), } @@ -124,15 +119,11 @@ impl Mutex } } -impl Mutex -{ - fn obtain_lock(&self) - { - while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false - { +impl Mutex { + fn obtain_lock(&self) { + while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false { // Wait until the lock looks unlocked before retrying - while self.lock.load(Ordering::Relaxed) - { + while self.lock.load(Ordering::Relaxed) { cpu_relax(); } } @@ -153,11 +144,9 @@ impl Mutex /// } /// /// ``` - pub fn lock(&self) -> MutexGuard - { + pub fn lock(&self) -> MutexGuard { self.obtain_lock(); - MutexGuard - { + MutexGuard { lock: &self.lock, data: unsafe { &mut *self.data.get() }, } @@ -176,33 +165,24 @@ impl Mutex /// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns /// a guard within Some. - pub fn try_lock(&self) -> Option> - { - if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false - { - Some( - MutexGuard { - lock: &self.lock, - data: unsafe { &mut *self.data.get() }, - } - ) - } - else - { + pub fn try_lock(&self) -> Option> { + if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false { + Some(MutexGuard { + lock: &self.lock, + data: unsafe { &mut *self.data.get() }, + }) + } else { None } } } -impl fmt::Debug for Mutex -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result - { - match self.try_lock() - { +impl fmt::Debug for Mutex { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.try_lock() { Some(guard) => write!(f, "Mutex {{ data: ") - .and_then(|()| (&*guard).fmt(f)) - .and_then(|()| write!(f, "}}")), + .and_then(|()| (&*guard).fmt(f)) + .and_then(|()| write!(f, "}}")), None => write!(f, "Mutex {{ }}"), } } @@ -214,22 +194,22 @@ impl Default for Mutex { } } -impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> -{ +impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> { type Target = T; - fn deref<'b>(&'b self) -> &'b T { &*self.data } + fn deref<'b>(&'b self) -> &'b T { + &*self.data + } } -impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> -{ - fn deref_mut<'b>(&'b mut self) -> &'b mut T { &mut *self.data } +impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> { + fn deref_mut<'b>(&'b mut self) -> &'b mut T { + &mut *self.data + } } -impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> -{ +impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> { /// The dropping of the MutexGuard will release the lock it was created from. - fn drop(&mut self) - { + fn drop(&mut self) { self.lock.store(false, Ordering::Release); } } @@ -238,9 +218,9 @@ impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> mod tests { use std::prelude::v1::*; + use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; use std::sync::Arc; - use std::sync::atomic::{AtomicUsize, Ordering}; use std::thread; use super::*; @@ -257,7 +237,7 @@ mod tests { #[test] fn lots_and_lots() { - static M: Mutex<()> = Mutex::new(()); + static M: Mutex<()> = Mutex::new(()); static mut CNT: u32 = 0; const J: u32 = 1000; const K: u32 = 3; @@ -274,16 +254,22 @@ mod tests { let (tx, rx) = channel(); for _ in 0..K { let tx2 = tx.clone(); - thread::spawn(move|| { inc(); tx2.send(()).unwrap(); }); + thread::spawn(move || { + inc(); + tx2.send(()).unwrap(); + }); let tx2 = tx.clone(); - thread::spawn(move|| { inc(); tx2.send(()).unwrap(); }); + thread::spawn(move || { + inc(); + tx2.send(()).unwrap(); + }); } drop(tx); for _ in 0..2 * K { rx.recv().unwrap(); } - assert_eq!(unsafe {CNT}, J * K * 2); + assert_eq!(unsafe { CNT }, J * K * 2); } #[test] @@ -335,7 +321,7 @@ mod tests { let arc = Arc::new(Mutex::new(1)); let arc2 = Arc::new(Mutex::new(arc)); let (tx, rx) = channel(); - let _t = thread::spawn(move|| { + let _t = thread::spawn(move || { let lock = arc2.lock(); let lock2 = lock.lock(); assert_eq!(*lock2, 1); @@ -348,7 +334,7 @@ mod tests { fn test_mutex_arc_access_in_unwind() { let arc = Arc::new(Mutex::new(1)); let arc2 = arc.clone(); - let _ = thread::spawn(move|| -> () { + let _ = thread::spawn(move || -> () { struct Unwinder { i: Arc>, } @@ -359,7 +345,8 @@ mod tests { } let _u = Unwinder { i: arc2 }; panic!(); - }).join(); + }) + .join(); let lock = arc.lock(); assert_eq!(*lock, 2); } @@ -382,7 +369,7 @@ mod tests { ::std::mem::forget(lock.lock()); unsafe { lock.force_unlock(); - } + } assert!(lock.try_lock().is_some()); } } diff --git a/qadapt-spin/src/once.rs b/qadapt-spin/src/once.rs index fc247c6..85370e7 100644 --- a/qadapt-spin/src/once.rs +++ b/qadapt-spin/src/once.rs @@ -1,6 +1,6 @@ use core::cell::UnsafeCell; -use core::sync::atomic::{AtomicUsize, Ordering, spin_loop_hint as cpu_relax}; use core::fmt; +use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering}; /// A synchronization primitive which can be used to run a one-time global /// initialization. Unlike its std equivalent, this is generalized so that The @@ -27,9 +27,9 @@ impl fmt::Debug for Once { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.try() { Some(s) => write!(f, "Once {{ data: ") - .and_then(|()| s.fmt(f)) - .and_then(|()| write!(f, "}}")), - None => write!(f, "Once {{ }}") + .and_then(|()| s.fmt(f)) + .and_then(|()| write!(f, "}}")), + None => write!(f, "Once {{ }}"), } } } @@ -62,7 +62,7 @@ impl Once { fn force_get<'a>(&'a self) -> &'a T { match unsafe { &*self.data.get() }.as_ref() { - None => unsafe { unreachable() }, + None => unsafe { unreachable() }, Some(p) => p, } } @@ -96,17 +96,22 @@ impl Once { /// } /// ``` pub fn call_once<'a, F>(&'a self, builder: F) -> &'a T - where F: FnOnce() -> T + where + F: FnOnce() -> T, { let mut status = self.state.load(Ordering::SeqCst); if status == INCOMPLETE { - status = self.state.compare_and_swap(INCOMPLETE, - RUNNING, - Ordering::SeqCst); - if status == INCOMPLETE { // We init + status = self + .state + .compare_and_swap(INCOMPLETE, RUNNING, Ordering::SeqCst); + if status == INCOMPLETE { + // We init // We use a guard (Finish) to catch panics caused by builder - let mut finish = Finish { state: &self.state, panicked: true }; + let mut finish = Finish { + state: &self.state, + panicked: true, + }; unsafe { *self.data.get() = Some(builder()) }; finish.panicked = false; @@ -121,10 +126,11 @@ impl Once { loop { match status { INCOMPLETE => unreachable!(), - RUNNING => { // We spin + RUNNING => { + // We spin cpu_relax(); status = self.state.load(Ordering::SeqCst) - }, + } PANICKED => panic!("Once has panicked"), COMPLETE => return self.force_get(), _ => unsafe { unreachable() }, @@ -136,7 +142,7 @@ impl Once { pub fn try<'a>(&'a self) -> Option<&'a T> { match self.state.load(Ordering::SeqCst) { COMPLETE => Some(self.force_get()), - _ => None, + _ => None, } } @@ -146,9 +152,9 @@ impl Once { loop { match self.state.load(Ordering::SeqCst) { INCOMPLETE => return None, - RUNNING => cpu_relax(), // We spin - COMPLETE => return Some(self.force_get()), - PANICKED => panic!("Once has panicked"), + RUNNING => cpu_relax(), // We spin + COMPLETE => return Some(self.force_get()), + PANICKED => panic!("Once has panicked"), _ => unsafe { unreachable() }, } } @@ -172,9 +178,9 @@ impl<'a> Drop for Finish<'a> { mod tests { use std::prelude::v1::*; + use super::Once; use std::sync::mpsc::channel; use std::thread; - use super::Once; #[test] fn smoke_once() { @@ -203,8 +209,10 @@ mod tests { let (tx, rx) = channel(); for _ in 0..10 { let tx = tx.clone(); - thread::spawn(move|| { - for _ in 0..4 { thread::yield_now() } + thread::spawn(move || { + for _ in 0..4 { + thread::yield_now() + } unsafe { O.call_once(|| { assert!(!RUN); @@ -243,13 +251,12 @@ mod tests { static INIT: Once = Once::new(); assert!(INIT.try().is_none()); - thread::spawn(move|| { - INIT.call_once(|| loop { }); + thread::spawn(move || { + INIT.call_once(|| loop {}); }); assert!(INIT.try().is_none()); } - #[test] fn wait() { static INIT: Once = Once::new(); @@ -261,7 +268,7 @@ mod tests { #[test] fn panic() { - use ::std::panic; + use std::panic; static INIT: Once<()> = Once::new(); diff --git a/qadapt-spin/src/rw_lock.rs b/qadapt-spin/src/rw_lock.rs index bea8741..0f4cdbe 100644 --- a/qadapt-spin/src/rw_lock.rs +++ b/qadapt-spin/src/rw_lock.rs @@ -1,8 +1,8 @@ -use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT, spin_loop_hint as cpu_relax}; use core::cell::UnsafeCell; -use core::ops::{Deref, DerefMut}; -use core::fmt; use core::default::Default; +use core::fmt; +use core::ops::{Deref, DerefMut}; +use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; /// A reader-writer lock /// @@ -42,8 +42,7 @@ use core::default::Default; /// assert_eq!(*w, 6); /// } // write lock is dropped here /// ``` -pub struct RwLock -{ +pub struct RwLock { lock: AtomicUsize, data: UnsafeCell, } @@ -53,8 +52,7 @@ pub struct RwLock /// When the guard falls out of scope it will decrement the read count, /// potentially releasing the lock. #[derive(Debug)] -pub struct RwLockReadGuard<'a, T: 'a + ?Sized> -{ +pub struct RwLockReadGuard<'a, T: 'a + ?Sized> { lock: &'a AtomicUsize, data: &'a T, } @@ -63,8 +61,7 @@ pub struct RwLockReadGuard<'a, T: 'a + ?Sized> /// /// When the guard falls out of scope it will release the lock. #[derive(Debug)] -pub struct RwLockWriteGuard<'a, T: 'a + ?Sized> -{ +pub struct RwLockWriteGuard<'a, T: 'a + ?Sized> { lock: &'a AtomicUsize, data: &'a mut T, } @@ -75,8 +72,7 @@ unsafe impl Sync for RwLock {} const USIZE_MSB: usize = ::core::isize::MIN as usize; -impl RwLock -{ +impl RwLock { /// Creates a new spinlock wrapping the supplied data. /// /// May be used statically: @@ -93,18 +89,15 @@ impl RwLock /// } /// ``` #[inline] - pub const fn new(user_data: T) -> RwLock - { - RwLock - { + pub const fn new(user_data: T) -> RwLock { + RwLock { lock: ATOMIC_USIZE_INIT, data: UnsafeCell::new(user_data), } } /// Consumes this `RwLock`, returning the underlying data. - pub fn into_inner(self) -> T - { + pub fn into_inner(self) -> T { // We know statically that there are no outstanding references to // `self` so there's no need to lock. let RwLock { data, .. } = self; @@ -112,8 +105,7 @@ impl RwLock } } -impl RwLock -{ +impl RwLock { /// Locks this rwlock with shared read access, blocking the current thread /// until it can be acquired. /// @@ -136,8 +128,7 @@ impl RwLock /// } /// ``` #[inline] - pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T> - { + pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T> { // (funny do-while loop) while { // Old value, with write bit unset @@ -164,7 +155,7 @@ impl RwLock } RwLockReadGuard { lock: &self.lock, - data: unsafe { & *self.data.get() }, + data: unsafe { &*self.data.get() }, } } @@ -191,20 +182,16 @@ impl RwLock /// } /// ``` #[inline] - pub fn try_read(&self) -> Option> - { + pub fn try_read(&self) -> Option> { // Old value, with write bit unset let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed); let new = old + 1; debug_assert!(new != (!USIZE_MSB) & (!0)); - if self.lock.compare_and_swap(old, - new, - Ordering::SeqCst) == old - { + if self.lock.compare_and_swap(old, new, Ordering::SeqCst) == old { Some(RwLockReadGuard { lock: &self.lock, - data: unsafe { & *self.data.get() }, + data: unsafe { &*self.data.get() }, }) } else { None @@ -251,23 +238,18 @@ impl RwLock /// } /// ``` #[inline] - pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T> - { - loop - { + pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T> { + loop { // Old value, with write bit unset. let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed); // Old value, with write bit set. let new = USIZE_MSB | old; - if self.lock.compare_and_swap(old, - new, - Ordering::SeqCst) == old - { + if self.lock.compare_and_swap(old, new, Ordering::SeqCst) == old { // Wait for readers to go away, then lock is ours. while self.lock.load(Ordering::Relaxed) != USIZE_MSB { cpu_relax(); } - break + break; } } RwLockWriteGuard { @@ -296,12 +278,8 @@ impl RwLock /// } /// ``` #[inline] - pub fn try_write(&self) -> Option> - { - if self.lock.compare_and_swap(0, - USIZE_MSB, - Ordering::SeqCst) == 0 - { + pub fn try_write(&self) -> Option> { + if self.lock.compare_and_swap(0, USIZE_MSB, Ordering::SeqCst) == 0 { Some(RwLockWriteGuard { lock: &self.lock, data: unsafe { &mut *self.data.get() }, @@ -312,12 +290,9 @@ impl RwLock } } -impl fmt::Debug for RwLock -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result - { - match self.try_read() - { +impl fmt::Debug for RwLock { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.try_read() { Some(guard) => write!(f, "RwLock {{ data: ") .and_then(|()| (&*guard).fmt(f)) .and_then(|()| write!(f, "}}")), @@ -335,17 +310,23 @@ impl Default for RwLock { impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> { type Target = T; - fn deref(&self) -> &T { self.data } + fn deref(&self) -> &T { + self.data + } } impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> { type Target = T; - fn deref(&self) -> &T { self.data } + fn deref(&self) -> &T { + self.data + } } impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> { - fn deref_mut(&mut self) -> &mut T { self.data } + fn deref_mut(&mut self) -> &mut T { + self.data + } } impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> { @@ -366,9 +347,9 @@ impl<'rwlock, T: ?Sized> Drop for RwLockWriteGuard<'rwlock, T> { mod tests { use std::prelude::v1::*; - use std::sync::Arc; - use std::sync::mpsc::channel; use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::mpsc::channel; + use std::sync::Arc; use std::thread; use super::*; @@ -418,7 +399,7 @@ mod tests { let arc2 = arc.clone(); let (tx, rx) = channel(); - thread::spawn(move|| { + thread::spawn(move || { let mut lock = arc2.write(); for _ in 0..10 { let tmp = *lock; @@ -433,7 +414,7 @@ mod tests { let mut children = Vec::new(); for _ in 0..5 { let arc3 = arc.clone(); - children.push(thread::spawn(move|| { + children.push(thread::spawn(move || { let lock = arc3.read(); assert!(*lock >= 0); })); @@ -454,7 +435,7 @@ mod tests { fn test_rw_arc_access_in_unwind() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); - let _ = thread::spawn(move|| -> () { + let _ = thread::spawn(move || -> () { struct Unwinder { i: Arc>, } @@ -466,7 +447,8 @@ mod tests { } let _u = Unwinder { i: arc2 }; panic!(); - }).join(); + }) + .join(); let lock = arc.read(); assert_eq!(*lock, 2); } @@ -493,7 +475,10 @@ mod tests { let write_result = lock.try_write(); match write_result { None => (), - Some(_) => assert!(false, "try_write should not succeed while read_guard is in scope"), + Some(_) => assert!( + false, + "try_write should not succeed while read_guard is in scope" + ), } drop(read_guard);