1
0
mirror of https://github.com/bspeice/qadapt synced 2024-12-22 04:28:09 -05:00

Merge pull request #5 from bspeice/rustfmt

Re-enable rustfmt
This commit is contained in:
bspeice 2018-12-17 04:48:04 +00:00 committed by GitHub
commit ab5b552175
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 134 additions and 157 deletions

View File

@ -40,12 +40,10 @@ before_install:
install:
- source ~/.cargo/env || true
# Disabling for the time being: rust-lang/rustup.rs#1558
# - rustup component add rustfmt
- rustup component add rustfmt
script:
# Disabling for the time being: rust-lang/rustup.rs#1558
# - cargo fmt --all -- --check
- cargo fmt --all -- --check
- |
if [ -z "$TRAVIS_TAG" ]; then
cargo test

View File

@ -10,9 +10,9 @@
extern crate std;
pub use mutex::*;
pub use rw_lock::*;
pub use once::*;
pub use rw_lock::*;
mod mutex;
mod rw_lock;
mod once;
mod rw_lock;

View File

@ -1,10 +1,10 @@
use core::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT, spin_loop_hint as cpu_relax};
use core::cell::UnsafeCell;
use core::marker::Sync;
use core::ops::{Drop, Deref, DerefMut};
use core::fmt;
use core::option::Option::{self, None, Some};
use core::default::Default;
use core::fmt;
use core::marker::Sync;
use core::ops::{Deref, DerefMut, Drop};
use core::option::Option::{self, None, Some};
use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicBool, Ordering, ATOMIC_BOOL_INIT};
/// This type provides MUTual EXclusion based on spinning.
///
@ -69,8 +69,7 @@ use core::default::Default;
/// let answer = { *spin_mutex.lock() };
/// assert_eq!(answer, numthreads);
/// ```
pub struct Mutex<T: ?Sized>
{
pub struct Mutex<T: ?Sized> {
lock: AtomicBool,
data: UnsafeCell<T>,
}
@ -79,8 +78,7 @@ pub struct Mutex<T: ?Sized>
///
/// When the guard falls out of scope it will release the lock.
#[derive(Debug)]
pub struct MutexGuard<'a, T: ?Sized + 'a>
{
pub struct MutexGuard<'a, T: ?Sized + 'a> {
lock: &'a AtomicBool,
data: &'a mut T,
}
@ -89,8 +87,7 @@ pub struct MutexGuard<'a, T: ?Sized + 'a>
unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
impl<T> Mutex<T>
{
impl<T> Mutex<T> {
/// Creates a new spinlock wrapping the supplied data.
///
/// May be used statically:
@ -106,10 +103,8 @@ impl<T> Mutex<T>
/// drop(lock);
/// }
/// ```
pub const fn new(user_data: T) -> Mutex<T>
{
Mutex
{
pub const fn new(user_data: T) -> Mutex<T> {
Mutex {
lock: ATOMIC_BOOL_INIT,
data: UnsafeCell::new(user_data),
}
@ -124,15 +119,11 @@ impl<T> Mutex<T>
}
}
impl<T: ?Sized> Mutex<T>
{
fn obtain_lock(&self)
{
while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false
{
impl<T: ?Sized> Mutex<T> {
fn obtain_lock(&self) {
while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false {
// Wait until the lock looks unlocked before retrying
while self.lock.load(Ordering::Relaxed)
{
while self.lock.load(Ordering::Relaxed) {
cpu_relax();
}
}
@ -153,11 +144,9 @@ impl<T: ?Sized> Mutex<T>
/// }
///
/// ```
pub fn lock(&self) -> MutexGuard<T>
{
pub fn lock(&self) -> MutexGuard<T> {
self.obtain_lock();
MutexGuard
{
MutexGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}
@ -176,33 +165,24 @@ impl<T: ?Sized> Mutex<T>
/// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns
/// a guard within Some.
pub fn try_lock(&self) -> Option<MutexGuard<T>>
{
if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false
{
Some(
MutexGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}
)
}
else
{
pub fn try_lock(&self) -> Option<MutexGuard<T>> {
if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false {
Some(MutexGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
})
} else {
None
}
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
match self.try_lock()
{
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Some(guard) => write!(f, "Mutex {{ data: ")
.and_then(|()| (&*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
.and_then(|()| (&*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "Mutex {{ <locked> }}"),
}
}
@ -214,22 +194,22 @@ impl<T: ?Sized + Default> Default for Mutex<T> {
}
}
impl<'a, T: ?Sized> Deref for MutexGuard<'a, T>
{
impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> {
type Target = T;
fn deref<'b>(&'b self) -> &'b T { &*self.data }
fn deref<'b>(&'b self) -> &'b T {
&*self.data
}
}
impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T>
{
fn deref_mut<'b>(&'b mut self) -> &'b mut T { &mut *self.data }
impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> {
fn deref_mut<'b>(&'b mut self) -> &'b mut T {
&mut *self.data
}
}
impl<'a, T: ?Sized> Drop for MutexGuard<'a, T>
{
impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> {
/// The dropping of the MutexGuard will release the lock it was created from.
fn drop(&mut self)
{
fn drop(&mut self) {
self.lock.store(false, Ordering::Release);
}
}
@ -238,9 +218,9 @@ impl<'a, T: ?Sized> Drop for MutexGuard<'a, T>
mod tests {
use std::prelude::v1::*;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use super::*;
@ -257,7 +237,7 @@ mod tests {
#[test]
fn lots_and_lots() {
static M: Mutex<()> = Mutex::new(());
static M: Mutex<()> = Mutex::new(());
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;
@ -274,16 +254,22 @@ mod tests {
let (tx, rx) = channel();
for _ in 0..K {
let tx2 = tx.clone();
thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
thread::spawn(move || {
inc();
tx2.send(()).unwrap();
});
let tx2 = tx.clone();
thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
thread::spawn(move || {
inc();
tx2.send(()).unwrap();
});
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(unsafe {CNT}, J * K * 2);
assert_eq!(unsafe { CNT }, J * K * 2);
}
#[test]
@ -335,7 +321,7 @@ mod tests {
let arc = Arc::new(Mutex::new(1));
let arc2 = Arc::new(Mutex::new(arc));
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
let _t = thread::spawn(move || {
let lock = arc2.lock();
let lock2 = lock.lock();
assert_eq!(*lock2, 1);
@ -348,7 +334,7 @@ mod tests {
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(Mutex::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move|| -> () {
let _ = thread::spawn(move || -> () {
struct Unwinder {
i: Arc<Mutex<i32>>,
}
@ -359,7 +345,8 @@ mod tests {
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
})
.join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
@ -382,7 +369,7 @@ mod tests {
::std::mem::forget(lock.lock());
unsafe {
lock.force_unlock();
}
}
assert!(lock.try_lock().is_some());
}
}

View File

@ -1,6 +1,6 @@
use core::cell::UnsafeCell;
use core::sync::atomic::{AtomicUsize, Ordering, spin_loop_hint as cpu_relax};
use core::fmt;
use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering};
/// A synchronization primitive which can be used to run a one-time global
/// initialization. Unlike its std equivalent, this is generalized so that The
@ -27,9 +27,9 @@ impl<T: fmt::Debug> fmt::Debug for Once<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try() {
Some(s) => write!(f, "Once {{ data: ")
.and_then(|()| s.fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "Once {{ <uninitialized> }}")
.and_then(|()| s.fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "Once {{ <uninitialized> }}"),
}
}
}
@ -62,7 +62,7 @@ impl<T> Once<T> {
fn force_get<'a>(&'a self) -> &'a T {
match unsafe { &*self.data.get() }.as_ref() {
None => unsafe { unreachable() },
None => unsafe { unreachable() },
Some(p) => p,
}
}
@ -96,17 +96,22 @@ impl<T> Once<T> {
/// }
/// ```
pub fn call_once<'a, F>(&'a self, builder: F) -> &'a T
where F: FnOnce() -> T
where
F: FnOnce() -> T,
{
let mut status = self.state.load(Ordering::SeqCst);
if status == INCOMPLETE {
status = self.state.compare_and_swap(INCOMPLETE,
RUNNING,
Ordering::SeqCst);
if status == INCOMPLETE { // We init
status = self
.state
.compare_and_swap(INCOMPLETE, RUNNING, Ordering::SeqCst);
if status == INCOMPLETE {
// We init
// We use a guard (Finish) to catch panics caused by builder
let mut finish = Finish { state: &self.state, panicked: true };
let mut finish = Finish {
state: &self.state,
panicked: true,
};
unsafe { *self.data.get() = Some(builder()) };
finish.panicked = false;
@ -121,10 +126,11 @@ impl<T> Once<T> {
loop {
match status {
INCOMPLETE => unreachable!(),
RUNNING => { // We spin
RUNNING => {
// We spin
cpu_relax();
status = self.state.load(Ordering::SeqCst)
},
}
PANICKED => panic!("Once has panicked"),
COMPLETE => return self.force_get(),
_ => unsafe { unreachable() },
@ -136,7 +142,7 @@ impl<T> Once<T> {
pub fn try<'a>(&'a self) -> Option<&'a T> {
match self.state.load(Ordering::SeqCst) {
COMPLETE => Some(self.force_get()),
_ => None,
_ => None,
}
}
@ -146,9 +152,9 @@ impl<T> Once<T> {
loop {
match self.state.load(Ordering::SeqCst) {
INCOMPLETE => return None,
RUNNING => cpu_relax(), // We spin
COMPLETE => return Some(self.force_get()),
PANICKED => panic!("Once has panicked"),
RUNNING => cpu_relax(), // We spin
COMPLETE => return Some(self.force_get()),
PANICKED => panic!("Once has panicked"),
_ => unsafe { unreachable() },
}
}
@ -172,9 +178,9 @@ impl<'a> Drop for Finish<'a> {
mod tests {
use std::prelude::v1::*;
use super::Once;
use std::sync::mpsc::channel;
use std::thread;
use super::Once;
#[test]
fn smoke_once() {
@ -203,8 +209,10 @@ mod tests {
let (tx, rx) = channel();
for _ in 0..10 {
let tx = tx.clone();
thread::spawn(move|| {
for _ in 0..4 { thread::yield_now() }
thread::spawn(move || {
for _ in 0..4 {
thread::yield_now()
}
unsafe {
O.call_once(|| {
assert!(!RUN);
@ -243,13 +251,12 @@ mod tests {
static INIT: Once<usize> = Once::new();
assert!(INIT.try().is_none());
thread::spawn(move|| {
INIT.call_once(|| loop { });
thread::spawn(move || {
INIT.call_once(|| loop {});
});
assert!(INIT.try().is_none());
}
#[test]
fn wait() {
static INIT: Once<usize> = Once::new();
@ -261,7 +268,7 @@ mod tests {
#[test]
fn panic() {
use ::std::panic;
use std::panic;
static INIT: Once<()> = Once::new();

View File

@ -1,8 +1,8 @@
use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT, spin_loop_hint as cpu_relax};
use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::fmt;
use core::default::Default;
use core::fmt;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
/// A reader-writer lock
///
@ -42,8 +42,7 @@ use core::default::Default;
/// assert_eq!(*w, 6);
/// } // write lock is dropped here
/// ```
pub struct RwLock<T: ?Sized>
{
pub struct RwLock<T: ?Sized> {
lock: AtomicUsize,
data: UnsafeCell<T>,
}
@ -53,8 +52,7 @@ pub struct RwLock<T: ?Sized>
/// When the guard falls out of scope it will decrement the read count,
/// potentially releasing the lock.
#[derive(Debug)]
pub struct RwLockReadGuard<'a, T: 'a + ?Sized>
{
pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
lock: &'a AtomicUsize,
data: &'a T,
}
@ -63,8 +61,7 @@ pub struct RwLockReadGuard<'a, T: 'a + ?Sized>
///
/// When the guard falls out of scope it will release the lock.
#[derive(Debug)]
pub struct RwLockWriteGuard<'a, T: 'a + ?Sized>
{
pub struct RwLockWriteGuard<'a, T: 'a + ?Sized> {
lock: &'a AtomicUsize,
data: &'a mut T,
}
@ -75,8 +72,7 @@ unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
const USIZE_MSB: usize = ::core::isize::MIN as usize;
impl<T> RwLock<T>
{
impl<T> RwLock<T> {
/// Creates a new spinlock wrapping the supplied data.
///
/// May be used statically:
@ -93,18 +89,15 @@ impl<T> RwLock<T>
/// }
/// ```
#[inline]
pub const fn new(user_data: T) -> RwLock<T>
{
RwLock
{
pub const fn new(user_data: T) -> RwLock<T> {
RwLock {
lock: ATOMIC_USIZE_INIT,
data: UnsafeCell::new(user_data),
}
}
/// Consumes this `RwLock`, returning the underlying data.
pub fn into_inner(self) -> T
{
pub fn into_inner(self) -> T {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let RwLock { data, .. } = self;
@ -112,8 +105,7 @@ impl<T> RwLock<T>
}
}
impl<T: ?Sized> RwLock<T>
{
impl<T: ?Sized> RwLock<T> {
/// Locks this rwlock with shared read access, blocking the current thread
/// until it can be acquired.
///
@ -136,8 +128,7 @@ impl<T: ?Sized> RwLock<T>
/// }
/// ```
#[inline]
pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T>
{
pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T> {
// (funny do-while loop)
while {
// Old value, with write bit unset
@ -164,7 +155,7 @@ impl<T: ?Sized> RwLock<T>
}
RwLockReadGuard {
lock: &self.lock,
data: unsafe { & *self.data.get() },
data: unsafe { &*self.data.get() },
}
}
@ -191,20 +182,16 @@ impl<T: ?Sized> RwLock<T>
/// }
/// ```
#[inline]
pub fn try_read(&self) -> Option<RwLockReadGuard<T>>
{
pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
// Old value, with write bit unset
let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
let new = old + 1;
debug_assert!(new != (!USIZE_MSB) & (!0));
if self.lock.compare_and_swap(old,
new,
Ordering::SeqCst) == old
{
if self.lock.compare_and_swap(old, new, Ordering::SeqCst) == old {
Some(RwLockReadGuard {
lock: &self.lock,
data: unsafe { & *self.data.get() },
data: unsafe { &*self.data.get() },
})
} else {
None
@ -251,23 +238,18 @@ impl<T: ?Sized> RwLock<T>
/// }
/// ```
#[inline]
pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T>
{
loop
{
pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T> {
loop {
// Old value, with write bit unset.
let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
// Old value, with write bit set.
let new = USIZE_MSB | old;
if self.lock.compare_and_swap(old,
new,
Ordering::SeqCst) == old
{
if self.lock.compare_and_swap(old, new, Ordering::SeqCst) == old {
// Wait for readers to go away, then lock is ours.
while self.lock.load(Ordering::Relaxed) != USIZE_MSB {
cpu_relax();
}
break
break;
}
}
RwLockWriteGuard {
@ -296,12 +278,8 @@ impl<T: ?Sized> RwLock<T>
/// }
/// ```
#[inline]
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>>
{
if self.lock.compare_and_swap(0,
USIZE_MSB,
Ordering::SeqCst) == 0
{
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
if self.lock.compare_and_swap(0, USIZE_MSB, Ordering::SeqCst) == 0 {
Some(RwLockWriteGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
@ -312,12 +290,9 @@ impl<T: ?Sized> RwLock<T>
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
match self.try_read()
{
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_read() {
Some(guard) => write!(f, "RwLock {{ data: ")
.and_then(|()| (&*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
@ -335,17 +310,23 @@ impl<T: ?Sized + Default> Default for RwLock<T> {
impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T { self.data }
fn deref(&self) -> &T {
self.data
}
}
impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T { self.data }
fn deref(&self) -> &T {
self.data
}
}
impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
fn deref_mut(&mut self) -> &mut T { self.data }
fn deref_mut(&mut self) -> &mut T {
self.data
}
}
impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
@ -366,9 +347,9 @@ impl<'rwlock, T: ?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
mod tests {
use std::prelude::v1::*;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use super::*;
@ -418,7 +399,7 @@ mod tests {
let arc2 = arc.clone();
let (tx, rx) = channel();
thread::spawn(move|| {
thread::spawn(move || {
let mut lock = arc2.write();
for _ in 0..10 {
let tmp = *lock;
@ -433,7 +414,7 @@ mod tests {
let mut children = Vec::new();
for _ in 0..5 {
let arc3 = arc.clone();
children.push(thread::spawn(move|| {
children.push(thread::spawn(move || {
let lock = arc3.read();
assert!(*lock >= 0);
}));
@ -454,7 +435,7 @@ mod tests {
fn test_rw_arc_access_in_unwind() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move|| -> () {
let _ = thread::spawn(move || -> () {
struct Unwinder {
i: Arc<RwLock<isize>>,
}
@ -466,7 +447,8 @@ mod tests {
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
})
.join();
let lock = arc.read();
assert_eq!(*lock, 2);
}
@ -493,7 +475,10 @@ mod tests {
let write_result = lock.try_write();
match write_result {
None => (),
Some(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
Some(_) => assert!(
false,
"try_write should not succeed while read_guard is in scope"
),
}
drop(read_guard);