1
0
mirror of https://github.com/bspeice/qadapt synced 2024-11-24 06:48:09 -05:00

Merge pull request #3 from bspeice/qadapt-spin

Vendor in the spin crate for the time being
This commit is contained in:
bspeice 2018-12-15 21:00:31 +00:00 committed by GitHub
commit 501c7712fc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 1331 additions and 2 deletions

View File

@ -20,10 +20,10 @@ maintenance = { status = "actively-developed" }
[dependencies]
libc = "0.2"
spin = { git = "https://github.com/bspeice/spin-rs.git" }
thread-id = "3.3"
qadapt-macro = { version = "0.7.1", path = "./qadapt-macro" }
qadapt-spin = { version = "0.7.1", path = "./qadapt-spin" }
[dev-dependencies]
futures = "0.1"

3
qadapt-spin/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
/target
**/*.rs.bk
Cargo.lock

15
qadapt-spin/Cargo.toml Normal file
View File

@ -0,0 +1,15 @@
[package]
name = "qadapt-spin"
version = "0.7.1"
authors = [ "Mathijs van de Nes <git@mathijs.vd-nes.nl>",
"John Ericson <John_Ericson@Yahoo.com>" ]
license = "MIT"
repository = "https://github.com/mvdnes/spin-rs.git"
documentation = "https://mvdnes.github.io/rust-docs/spin-rs/spin/index.html"
keywords = ["spinlock", "mutex", "rwlock"]
description = """
Synchronization primitives based on spinning.
They may contain data, are usable without `std`,
and static initializers are available.
"""

21
qadapt-spin/LICENSE Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Mathijs van de Nes
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

4
qadapt-spin/README.md Normal file
View File

@ -0,0 +1,4 @@
qadapt-spin
===========
Placeholder for the [`spin-rs`](https://github.com/mvdnes/spin-rs)

View File

@ -0,0 +1,21 @@
extern crate spin;
fn main() {
let mutex = spin::Mutex::new(42);
println!("{:?}", mutex);
{
let x = mutex.lock();
println!("{:?}, {:?}", mutex, *x);
}
let rwlock = spin::RwLock::new(42);
println!("{:?}", rwlock);
{
let x = rwlock.read();
println!("{:?}, {:?}", rwlock, *x);
}
{
let x = rwlock.write();
println!("{:?}, {:?}", rwlock, *x);
}
}

View File

@ -0,0 +1,3 @@
PROJECT_NAME=spin-rs
DOCS_REPO=mvdnes/rust-docs.git
DOC_RUST_VERSION=nightly

18
qadapt-spin/src/lib.rs Normal file
View File

@ -0,0 +1,18 @@
#![crate_type = "lib"]
#![warn(missing_docs)]
//! Synchronization primitives based on spinning
#![no_std]
#[cfg(test)]
#[macro_use]
extern crate std;
pub use mutex::*;
pub use rw_lock::*;
pub use once::*;
mod mutex;
mod rw_lock;
mod once;

388
qadapt-spin/src/mutex.rs Normal file
View File

@ -0,0 +1,388 @@
use core::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT, spin_loop_hint as cpu_relax};
use core::cell::UnsafeCell;
use core::marker::Sync;
use core::ops::{Drop, Deref, DerefMut};
use core::fmt;
use core::option::Option::{self, None, Some};
use core::default::Default;
/// This type provides MUTual EXclusion based on spinning.
///
/// # Description
///
/// The behaviour of these lock is similar to their namesakes in `std::sync`. they
/// differ on the following:
///
/// - The lock will not be poisoned in case of failure;
///
/// # Simple examples
///
/// ```
/// use spin;
/// let spin_mutex = spin::Mutex::new(0);
///
/// // Modify the data
/// {
/// let mut data = spin_mutex.lock();
/// *data = 2;
/// }
///
/// // Read the data
/// let answer =
/// {
/// let data = spin_mutex.lock();
/// *data
/// };
///
/// assert_eq!(answer, 2);
/// ```
///
/// # Thread-safety example
///
/// ```
/// use spin;
/// use std::sync::{Arc, Barrier};
///
/// let numthreads = 1000;
/// let spin_mutex = Arc::new(spin::Mutex::new(0));
///
/// // We use a barrier to ensure the readout happens after all writing
/// let barrier = Arc::new(Barrier::new(numthreads + 1));
///
/// for _ in (0..numthreads)
/// {
/// let my_barrier = barrier.clone();
/// let my_lock = spin_mutex.clone();
/// std::thread::spawn(move||
/// {
/// let mut guard = my_lock.lock();
/// *guard += 1;
///
/// // Release the lock to prevent a deadlock
/// drop(guard);
/// my_barrier.wait();
/// });
/// }
///
/// barrier.wait();
///
/// let answer = { *spin_mutex.lock() };
/// assert_eq!(answer, numthreads);
/// ```
pub struct Mutex<T: ?Sized>
{
lock: AtomicBool,
data: UnsafeCell<T>,
}
/// A guard to which the protected data can be accessed
///
/// When the guard falls out of scope it will release the lock.
#[derive(Debug)]
pub struct MutexGuard<'a, T: ?Sized + 'a>
{
lock: &'a AtomicBool,
data: &'a mut T,
}
// Same unsafe impls as `std::sync::Mutex`
unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
impl<T> Mutex<T>
{
/// Creates a new spinlock wrapping the supplied data.
///
/// May be used statically:
///
/// ```
/// use spin;
///
/// static MUTEX: spin::Mutex<()> = spin::Mutex::new(());
///
/// fn demo() {
/// let lock = MUTEX.lock();
/// // do something with lock
/// drop(lock);
/// }
/// ```
pub const fn new(user_data: T) -> Mutex<T>
{
Mutex
{
lock: ATOMIC_BOOL_INIT,
data: UnsafeCell::new(user_data),
}
}
/// Consumes this mutex, returning the underlying data.
pub fn into_inner(self) -> T {
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let Mutex { data, .. } = self;
data.into_inner()
}
}
impl<T: ?Sized> Mutex<T>
{
fn obtain_lock(&self)
{
while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false
{
// Wait until the lock looks unlocked before retrying
while self.lock.load(Ordering::Relaxed)
{
cpu_relax();
}
}
}
/// Locks the spinlock and returns a guard.
///
/// The returned value may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
///
/// ```
/// let mylock = spin::Mutex::new(0);
/// {
/// let mut data = mylock.lock();
/// // The lock is now locked and the data can be accessed
/// *data += 1;
/// // The lock is implicitly dropped
/// }
///
/// ```
pub fn lock(&self) -> MutexGuard<T>
{
self.obtain_lock();
MutexGuard
{
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}
}
/// Force unlock the spinlock.
///
/// This is *extremely* unsafe if the lock is not held by the current
/// thread. However, this can be useful in some instances for exposing the
/// lock to FFI that doesn't know how to deal with RAII.
///
/// If the lock isn't held, this is a no-op.
pub unsafe fn force_unlock(&self) {
self.lock.store(false, Ordering::Release);
}
/// Tries to lock the mutex. If it is already locked, it will return None. Otherwise it returns
/// a guard within Some.
pub fn try_lock(&self) -> Option<MutexGuard<T>>
{
if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false
{
Some(
MutexGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}
)
}
else
{
None
}
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
match self.try_lock()
{
Some(guard) => write!(f, "Mutex {{ data: ")
.and_then(|()| (&*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "Mutex {{ <locked> }}"),
}
}
}
impl<T: ?Sized + Default> Default for Mutex<T> {
fn default() -> Mutex<T> {
Mutex::new(Default::default())
}
}
impl<'a, T: ?Sized> Deref for MutexGuard<'a, T>
{
type Target = T;
fn deref<'b>(&'b self) -> &'b T { &*self.data }
}
impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T>
{
fn deref_mut<'b>(&'b mut self) -> &'b mut T { &mut *self.data }
}
impl<'a, T: ?Sized> Drop for MutexGuard<'a, T>
{
/// The dropping of the MutexGuard will release the lock it was created from.
fn drop(&mut self)
{
self.lock.store(false, Ordering::Release);
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use super::*;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let m = Mutex::new(());
drop(m.lock());
drop(m.lock());
}
#[test]
fn lots_and_lots() {
static M: Mutex<()> = Mutex::new(());
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;
fn inc() {
for _ in 0..J {
unsafe {
let _g = M.lock();
CNT += 1;
}
}
}
let (tx, rx) = channel();
for _ in 0..K {
let tx2 = tx.clone();
thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
let tx2 = tx.clone();
thread::spawn(move|| { inc(); tx2.send(()).unwrap(); });
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(unsafe {CNT}, J * K * 2);
}
#[test]
fn try_lock() {
let mutex = Mutex::new(42);
// First lock succeeds
let a = mutex.try_lock();
assert_eq!(a.as_ref().map(|r| **r), Some(42));
// Additional lock failes
let b = mutex.try_lock();
assert!(b.is_none());
// After dropping lock, it succeeds again
::core::mem::drop(a);
let c = mutex.try_lock();
assert_eq!(c.as_ref().map(|r| **r), Some(42));
}
#[test]
fn test_into_inner() {
let m = Mutex::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = Mutex::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_mutex_arc_nested() {
// Tests nested mutexes and access
// to underlying data.
let arc = Arc::new(Mutex::new(1));
let arc2 = Arc::new(Mutex::new(arc));
let (tx, rx) = channel();
let _t = thread::spawn(move|| {
let lock = arc2.lock();
let lock2 = lock.lock();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(Mutex::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move|| -> () {
struct Unwinder {
i: Arc<Mutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
#[test]
fn test_mutex_unsized() {
let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
{
let b = &mut *mutex.lock();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*mutex.lock(), comp);
}
#[test]
fn test_mutex_force_lock() {
let lock = Mutex::new(());
::std::mem::forget(lock.lock());
unsafe {
lock.force_unlock();
}
assert!(lock.try_lock().is_some());
}
}

290
qadapt-spin/src/once.rs Normal file
View File

@ -0,0 +1,290 @@
use core::cell::UnsafeCell;
use core::sync::atomic::{AtomicUsize, Ordering, spin_loop_hint as cpu_relax};
use core::fmt;
/// A synchronization primitive which can be used to run a one-time global
/// initialization. Unlike its std equivalent, this is generalized so that The
/// closure returns a value and it is stored. Once therefore acts something like
/// 1a future, too.
///
/// # Examples
///
/// ```
/// use spin;
///
/// static START: spin::Once<()> = spin::Once::new();
///
/// START.call_once(|| {
/// // run initialization here
/// });
/// ```
pub struct Once<T> {
state: AtomicUsize,
data: UnsafeCell<Option<T>>, // TODO remove option and use mem::uninitialized
}
impl<T: fmt::Debug> fmt::Debug for Once<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try() {
Some(s) => write!(f, "Once {{ data: ")
.and_then(|()| s.fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "Once {{ <uninitialized> }}")
}
}
}
// Same unsafe impls as `std::sync::RwLock`, because this also allows for
// concurrent reads.
unsafe impl<T: Send + Sync> Sync for Once<T> {}
unsafe impl<T: Send> Send for Once<T> {}
// Four states that a Once can be in, encoded into the lower bits of `state` in
// the Once structure.
const INCOMPLETE: usize = 0x0;
const RUNNING: usize = 0x1;
const COMPLETE: usize = 0x2;
const PANICKED: usize = 0x3;
use core::hint::unreachable_unchecked as unreachable;
impl<T> Once<T> {
/// Initialization constant of `Once`.
pub const INIT: Self = Once {
state: AtomicUsize::new(INCOMPLETE),
data: UnsafeCell::new(None),
};
/// Creates a new `Once` value.
pub const fn new() -> Once<T> {
Self::INIT
}
fn force_get<'a>(&'a self) -> &'a T {
match unsafe { &*self.data.get() }.as_ref() {
None => unsafe { unreachable() },
Some(p) => p,
}
}
/// Performs an initialization routine once and only once. The given closure
/// will be executed if this is the first time `call_once` has been called,
/// and otherwise the routine will *not* be invoked.
///
/// This method will block the calling thread if another initialization
/// routine is currently running.
///
/// When this function returns, it is guaranteed that some initialization
/// has run and completed (it may not be the closure specified). The
/// returned pointer will point to the result from the closure that was
/// ran.
///
/// # Examples
///
/// ```
/// use spin;
///
/// static INIT: spin::Once<usize> = spin::Once::new();
///
/// fn get_cached_val() -> usize {
/// *INIT.call_once(expensive_computation)
/// }
///
/// fn expensive_computation() -> usize {
/// // ...
/// # 2
/// }
/// ```
pub fn call_once<'a, F>(&'a self, builder: F) -> &'a T
where F: FnOnce() -> T
{
let mut status = self.state.load(Ordering::SeqCst);
if status == INCOMPLETE {
status = self.state.compare_and_swap(INCOMPLETE,
RUNNING,
Ordering::SeqCst);
if status == INCOMPLETE { // We init
// We use a guard (Finish) to catch panics caused by builder
let mut finish = Finish { state: &self.state, panicked: true };
unsafe { *self.data.get() = Some(builder()) };
finish.panicked = false;
status = COMPLETE;
self.state.store(status, Ordering::SeqCst);
// This next line is strictly an optomization
return self.force_get();
}
}
loop {
match status {
INCOMPLETE => unreachable!(),
RUNNING => { // We spin
cpu_relax();
status = self.state.load(Ordering::SeqCst)
},
PANICKED => panic!("Once has panicked"),
COMPLETE => return self.force_get(),
_ => unsafe { unreachable() },
}
}
}
/// Returns a pointer iff the `Once` was previously initialized
pub fn try<'a>(&'a self) -> Option<&'a T> {
match self.state.load(Ordering::SeqCst) {
COMPLETE => Some(self.force_get()),
_ => None,
}
}
/// Like try, but will spin if the `Once` is in the process of being
/// initialized
pub fn wait<'a>(&'a self) -> Option<&'a T> {
loop {
match self.state.load(Ordering::SeqCst) {
INCOMPLETE => return None,
RUNNING => cpu_relax(), // We spin
COMPLETE => return Some(self.force_get()),
PANICKED => panic!("Once has panicked"),
_ => unsafe { unreachable() },
}
}
}
}
struct Finish<'a> {
state: &'a AtomicUsize,
panicked: bool,
}
impl<'a> Drop for Finish<'a> {
fn drop(&mut self) {
if self.panicked {
self.state.store(PANICKED, Ordering::SeqCst);
}
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::mpsc::channel;
use std::thread;
use super::Once;
#[test]
fn smoke_once() {
static O: Once<()> = Once::new();
let mut a = 0;
O.call_once(|| a += 1);
assert_eq!(a, 1);
O.call_once(|| a += 1);
assert_eq!(a, 1);
}
#[test]
fn smoke_once_value() {
static O: Once<usize> = Once::new();
let a = O.call_once(|| 1);
assert_eq!(*a, 1);
let b = O.call_once(|| 2);
assert_eq!(*b, 1);
}
#[test]
fn stampede_once() {
static O: Once<()> = Once::new();
static mut RUN: bool = false;
let (tx, rx) = channel();
for _ in 0..10 {
let tx = tx.clone();
thread::spawn(move|| {
for _ in 0..4 { thread::yield_now() }
unsafe {
O.call_once(|| {
assert!(!RUN);
RUN = true;
});
assert!(RUN);
}
tx.send(()).unwrap();
});
}
unsafe {
O.call_once(|| {
assert!(!RUN);
RUN = true;
});
assert!(RUN);
}
for _ in 0..10 {
rx.recv().unwrap();
}
}
#[test]
fn try() {
static INIT: Once<usize> = Once::new();
assert!(INIT.try().is_none());
INIT.call_once(|| 2);
assert_eq!(INIT.try().map(|r| *r), Some(2));
}
#[test]
fn try_no_wait() {
static INIT: Once<usize> = Once::new();
assert!(INIT.try().is_none());
thread::spawn(move|| {
INIT.call_once(|| loop { });
});
assert!(INIT.try().is_none());
}
#[test]
fn wait() {
static INIT: Once<usize> = Once::new();
assert!(INIT.wait().is_none());
INIT.call_once(|| 3);
assert_eq!(INIT.wait().map(|r| *r), Some(3));
}
#[test]
fn panic() {
use ::std::panic;
static INIT: Once<()> = Once::new();
// poison the once
let t = panic::catch_unwind(|| {
INIT.call_once(|| panic!());
});
assert!(t.is_err());
// poisoning propagates
let t = panic::catch_unwind(|| {
INIT.call_once(|| {});
});
assert!(t.is_err());
}
#[test]
fn init_constant() {
static O: Once<()> = Once::INIT;
let mut a = 0;
O.call_once(|| a += 1);
assert_eq!(a, 1);
O.call_once(|| a += 1);
assert_eq!(a, 1);
}
}

554
qadapt-spin/src/rw_lock.rs Normal file
View File

@ -0,0 +1,554 @@
use core::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT, spin_loop_hint as cpu_relax};
use core::cell::UnsafeCell;
use core::ops::{Deref, DerefMut};
use core::fmt;
use core::default::Default;
/// A reader-writer lock
///
/// This type of lock allows a number of readers or at most one writer at any
/// point in time. The write portion of this lock typically allows modification
/// of the underlying data (exclusive access) and the read portion of this lock
/// typically allows for read-only access (shared access).
///
/// The type parameter `T` represents the data that this lock protects. It is
/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
/// allow concurrent access through readers. The RAII guards returned from the
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
/// to allow access to the contained of the lock.
///
/// Based on
/// <https://jfdube.wordpress.com/2014/01/03/implementing-a-recursive-read-write-spinlock/>
///
/// # Examples
///
/// ```
/// use spin;
///
/// let lock = spin::RwLock::new(5);
///
/// // many reader locks can be held at once
/// {
/// let r1 = lock.read();
/// let r2 = lock.read();
/// assert_eq!(*r1, 5);
/// assert_eq!(*r2, 5);
/// } // read locks are dropped at this point
///
/// // only one write lock may be held, however
/// {
/// let mut w = lock.write();
/// *w += 1;
/// assert_eq!(*w, 6);
/// } // write lock is dropped here
/// ```
pub struct RwLock<T: ?Sized>
{
lock: AtomicUsize,
data: UnsafeCell<T>,
}
/// A guard to which the protected data can be read
///
/// When the guard falls out of scope it will decrement the read count,
/// potentially releasing the lock.
#[derive(Debug)]
pub struct RwLockReadGuard<'a, T: 'a + ?Sized>
{
lock: &'a AtomicUsize,
data: &'a T,
}
/// A guard to which the protected data can be written
///
/// When the guard falls out of scope it will release the lock.
#[derive(Debug)]
pub struct RwLockWriteGuard<'a, T: 'a + ?Sized>
{
lock: &'a AtomicUsize,
data: &'a mut T,
}
// Same unsafe impls as `std::sync::RwLock`
unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
const USIZE_MSB: usize = ::core::isize::MIN as usize;
impl<T> RwLock<T>
{
/// Creates a new spinlock wrapping the supplied data.
///
/// May be used statically:
///
/// ```
/// use spin;
///
/// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
///
/// fn demo() {
/// let lock = RW_LOCK.read();
/// // do something with lock
/// drop(lock);
/// }
/// ```
#[inline]
pub const fn new(user_data: T) -> RwLock<T>
{
RwLock
{
lock: ATOMIC_USIZE_INIT,
data: UnsafeCell::new(user_data),
}
}
/// Consumes this `RwLock`, returning the underlying data.
pub fn into_inner(self) -> T
{
// We know statically that there are no outstanding references to
// `self` so there's no need to lock.
let RwLock { data, .. } = self;
data.into_inner()
}
}
impl<T: ?Sized> RwLock<T>
{
/// Locks this rwlock with shared read access, blocking the current thread
/// until it can be acquired.
///
/// The calling thread will be blocked until there are no more writers which
/// hold the lock. There may be other readers currently inside the lock when
/// this method returns. This method does not provide any guarantees with
/// respect to the ordering of whether contentious readers or writers will
/// acquire the lock first.
///
/// Returns an RAII guard which will release this thread's shared access
/// once it is dropped.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// let mut data = mylock.read();
/// // The lock is now locked and the data can be read
/// println!("{}", *data);
/// // The lock is dropped
/// }
/// ```
#[inline]
pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T>
{
// (funny do-while loop)
while {
// Old value, with write bit unset
let mut old;
// Wait for for writer to go away before doing expensive atomic ops
// (funny do-while loop)
while {
old = self.lock.load(Ordering::Relaxed);
old & USIZE_MSB != 0
} {
cpu_relax();
}
// unset write bit
old &= !USIZE_MSB;
let new = old + 1;
debug_assert!(new != (!USIZE_MSB) & (!0));
self.lock.compare_and_swap(old, new, Ordering::SeqCst) != old
} {
cpu_relax();
}
RwLockReadGuard {
lock: &self.lock,
data: unsafe { & *self.data.get() },
}
}
/// Attempt to acquire this lock with shared read access.
///
/// This function will never block and will return immediately if `read`
/// would otherwise succeed. Returns `Some` of an RAII guard which will
/// release the shared access of this thread when dropped, or `None` if the
/// access could not be granted. This method does not provide any
/// guarantees with respect to the ordering of whether contentious readers
/// or writers will acquire the lock first.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// match mylock.try_read() {
/// Some(data) => {
/// // The lock is now locked and the data can be read
/// println!("{}", *data);
/// // The lock is dropped
/// },
/// None => (), // no cigar
/// };
/// }
/// ```
#[inline]
pub fn try_read(&self) -> Option<RwLockReadGuard<T>>
{
// Old value, with write bit unset
let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
let new = old + 1;
debug_assert!(new != (!USIZE_MSB) & (!0));
if self.lock.compare_and_swap(old,
new,
Ordering::SeqCst) == old
{
Some(RwLockReadGuard {
lock: &self.lock,
data: unsafe { & *self.data.get() },
})
} else {
None
}
}
/// Force decrement the reader count.
///
/// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
/// live, or if called more times than `read` has been called, but can be
/// useful in FFI contexts where the caller doesn't know how to deal with
/// RAII.
pub unsafe fn force_read_decrement(&self) {
debug_assert!(self.lock.load(Ordering::Relaxed) & (!USIZE_MSB) > 0);
self.lock.fetch_sub(1, Ordering::SeqCst);
}
/// Force unlock exclusive write access.
///
/// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
/// live, or if called when there are current readers, but can be useful in
/// FFI contexts where the caller doesn't know how to deal with RAII.
pub unsafe fn force_write_unlock(&self) {
debug_assert_eq!(self.lock.load(Ordering::Relaxed), USIZE_MSB);
self.lock.store(0, Ordering::Relaxed);
}
/// Lock this rwlock with exclusive write access, blocking the current
/// thread until it can be acquired.
///
/// This function will not return while other writers or other readers
/// currently have access to the lock.
///
/// Returns an RAII guard which will drop the write access of this rwlock
/// when dropped.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// let mut data = mylock.write();
/// // The lock is now locked and the data can be written
/// *data += 1;
/// // The lock is dropped
/// }
/// ```
#[inline]
pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T>
{
loop
{
// Old value, with write bit unset.
let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
// Old value, with write bit set.
let new = USIZE_MSB | old;
if self.lock.compare_and_swap(old,
new,
Ordering::SeqCst) == old
{
// Wait for readers to go away, then lock is ours.
while self.lock.load(Ordering::Relaxed) != USIZE_MSB {
cpu_relax();
}
break
}
}
RwLockWriteGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
}
}
/// Attempt to lock this rwlock with exclusive write access.
///
/// This function does not ever block, and it will return `None` if a call
/// to `write` would otherwise block. If successful, an RAII guard is
/// returned.
///
/// ```
/// let mylock = spin::RwLock::new(0);
/// {
/// match mylock.try_write() {
/// Some(mut data) => {
/// // The lock is now locked and the data can be written
/// *data += 1;
/// // The lock is implicitly dropped
/// },
/// None => (), // no cigar
/// };
/// }
/// ```
#[inline]
pub fn try_write(&self) -> Option<RwLockWriteGuard<T>>
{
if self.lock.compare_and_swap(0,
USIZE_MSB,
Ordering::SeqCst) == 0
{
Some(RwLockWriteGuard {
lock: &self.lock,
data: unsafe { &mut *self.data.get() },
})
} else {
None
}
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
match self.try_read()
{
Some(guard) => write!(f, "RwLock {{ data: ")
.and_then(|()| (&*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "RwLock {{ <locked> }}"),
}
}
}
impl<T: ?Sized + Default> Default for RwLock<T> {
fn default() -> RwLock<T> {
RwLock::new(Default::default())
}
}
impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T { self.data }
}
impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
type Target = T;
fn deref(&self) -> &T { self.data }
}
impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
fn deref_mut(&mut self) -> &mut T { self.data }
}
impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert!(self.lock.load(Ordering::Relaxed) & (!USIZE_MSB) > 0);
self.lock.fetch_sub(1, Ordering::SeqCst);
}
}
impl<'rwlock, T: ?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
fn drop(&mut self) {
debug_assert_eq!(self.lock.load(Ordering::Relaxed), USIZE_MSB);
self.lock.store(0, Ordering::Relaxed);
}
}
#[cfg(test)]
mod tests {
use std::prelude::v1::*;
use std::sync::Arc;
use std::sync::mpsc::channel;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use super::*;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let l = RwLock::new(());
drop(l.read());
drop(l.write());
drop((l.read(), l.read()));
drop(l.write());
}
// TODO: needs RNG
//#[test]
//fn frob() {
// static R: RwLock = RwLock::new();
// const N: usize = 10;
// const M: usize = 1000;
//
// let (tx, rx) = channel::<()>();
// for _ in 0..N {
// let tx = tx.clone();
// thread::spawn(move|| {
// let mut rng = rand::thread_rng();
// for _ in 0..M {
// if rng.gen_weighted_bool(N) {
// drop(R.write());
// } else {
// drop(R.read());
// }
// }
// drop(tx);
// });
// }
// drop(tx);
// let _ = rx.recv();
// unsafe { R.destroy(); }
//}
#[test]
fn test_rw_arc() {
let arc = Arc::new(RwLock::new(0));
let arc2 = arc.clone();
let (tx, rx) = channel();
thread::spawn(move|| {
let mut lock = arc2.write();
for _ in 0..10 {
let tmp = *lock;
*lock = -1;
thread::yield_now();
*lock = tmp + 1;
}
tx.send(()).unwrap();
});
// Readers try to catch the writer in the act
let mut children = Vec::new();
for _ in 0..5 {
let arc3 = arc.clone();
children.push(thread::spawn(move|| {
let lock = arc3.read();
assert!(*lock >= 0);
}));
}
// Wait for children to pass their asserts
for r in children {
assert!(r.join().is_ok());
}
// Wait for writer to finish
rx.recv().unwrap();
let lock = arc.read();
assert_eq!(*lock, 10);
}
#[test]
fn test_rw_arc_access_in_unwind() {
let arc = Arc::new(RwLock::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move|| -> () {
struct Unwinder {
i: Arc<RwLock<isize>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
let mut lock = self.i.write();
*lock += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
let lock = arc.read();
assert_eq!(*lock, 2);
}
#[test]
fn test_rwlock_unsized() {
let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
{
let b = &mut *rw.write();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*rw.read(), comp);
}
#[test]
fn test_rwlock_try_write() {
use std::mem::drop;
let lock = RwLock::new(0isize);
let read_guard = lock.read();
let write_result = lock.try_write();
match write_result {
None => (),
Some(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
}
drop(read_guard);
}
#[test]
fn test_into_inner() {
let m = RwLock::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = RwLock::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_force_read_decrement() {
let m = RwLock::new(());
::std::mem::forget(m.read());
::std::mem::forget(m.read());
::std::mem::forget(m.read());
assert!(m.try_write().is_none());
unsafe {
m.force_read_decrement();
m.force_read_decrement();
}
assert!(m.try_write().is_none());
unsafe {
m.force_read_decrement();
}
assert!(m.try_write().is_some());
}
#[test]
fn test_force_write_unlock() {
let m = RwLock::new(());
::std::mem::forget(m.write());
assert!(m.try_read().is_none());
unsafe {
m.force_write_unlock();
}
assert!(m.try_read().is_some());
}
}

View File

@ -30,10 +30,22 @@
//! ```rust,no_run
//! use qadapt::no_alloc;
//!
//! // This function is fine, there are no allocations here
//! #[no_alloc]
//! fn do_math() -> u8 {
//! 2 + 2
//! }
//!
//! // This function will trigger a panic when called
//! #[no_alloc]
//! fn does_panic() -> Box<u32> {
//! Box::new(5)
//! }
//!
//! fn main() {
//! do_math();
//! does_panic();
//! }
//! ```
//!
//! 2. Evaluate expressions with the `assert_no_alloc!` macro
@ -60,7 +72,7 @@ pub use qadapt_macro::*;
use libc::c_void;
use libc::free;
use libc::malloc;
use spin::RwLock;
use qadapt_spin::RwLock;
use std::alloc::GlobalAlloc;
use std::alloc::Layout;
use std::thread;