2018-11-10 21:36:23 -05:00
|
|
|
//! The Quick And Dirty Allocation Profiling Tool
|
|
|
|
//!
|
|
|
|
//! This allocator is a helper for writing high-performance code that is allocation/drop free;
|
|
|
|
//! for functions annotated with `#[allocate_panic]`, QADAPT will detect when allocations/drops
|
|
|
|
//! happen during their execution (and execution of any functions they call) and throw a
|
|
|
|
//! thread panic if this occurs.
|
|
|
|
//!
|
|
|
|
//! Because QADAPT panics on allocation and is rather slow (for an allocator) it is **strongly**
|
|
|
|
//! recommended that QADAPT (the allocator) be used only in code tests. Functions annotated with
|
|
|
|
//! `#[allocate_panic]` will have no side effects if the QADAPT allocator is not being used,
|
|
|
|
//! so the attribute is safe to leave everywhere.
|
2018-11-10 21:54:13 -05:00
|
|
|
//!
|
|
|
|
//! Currently this crate is Nightly-only, but will work once `const fn` is in Stable.
|
2018-11-10 21:36:23 -05:00
|
|
|
#![deny(missing_docs)]
|
2018-09-21 22:34:42 -04:00
|
|
|
extern crate libc;
|
2018-11-06 23:04:26 -05:00
|
|
|
extern crate qadapt_macro;
|
2018-09-22 17:26:52 -04:00
|
|
|
extern crate spin;
|
2018-11-10 21:54:13 -05:00
|
|
|
// thread_id is necessary because `std::thread::current()` panics if we have not yet
|
|
|
|
// allocated a `thread_local!{}` it depends on.
|
|
|
|
extern crate thread_id;
|
2018-09-21 22:34:42 -04:00
|
|
|
|
2018-11-06 23:04:26 -05:00
|
|
|
// Re-export the proc macros to use by other code
|
|
|
|
pub use qadapt_macro::*;
|
|
|
|
|
2018-09-21 22:34:42 -04:00
|
|
|
use libc::c_void;
|
|
|
|
use libc::free;
|
|
|
|
use libc::malloc;
|
2018-11-10 20:54:35 -05:00
|
|
|
use spin::RwLock;
|
2018-11-05 21:58:33 -05:00
|
|
|
use std::alloc::Layout;
|
|
|
|
use std::alloc::GlobalAlloc;
|
2018-11-10 01:30:39 -05:00
|
|
|
use std::thread;
|
2018-09-21 22:34:42 -04:00
|
|
|
|
2018-11-10 01:30:39 -05:00
|
|
|
thread_local! {
|
2018-11-10 21:54:13 -05:00
|
|
|
static PROTECTION_LEVEL: RwLock<usize> = RwLock::new(0);
|
2018-11-10 01:30:39 -05:00
|
|
|
}
|
2018-09-23 12:37:07 -04:00
|
|
|
|
2018-11-10 21:36:23 -05:00
|
|
|
/// The QADAPT allocator itself
|
2018-11-05 21:58:33 -05:00
|
|
|
pub struct QADAPT;
|
2018-09-22 16:13:36 -04:00
|
|
|
|
2018-11-10 21:36:23 -05:00
|
|
|
/// Let QADAPT know that we are now entering a protected region and that
|
|
|
|
/// panics should be triggered if allocations/drops happen while we are running.
|
2018-11-10 01:30:39 -05:00
|
|
|
pub fn enter_protected() {
|
|
|
|
if thread::panicking() {
|
|
|
|
return
|
2018-11-06 20:51:44 -05:00
|
|
|
}
|
|
|
|
|
2018-11-10 01:30:39 -05:00
|
|
|
PROTECTION_LEVEL.try_with(|v| {
|
2018-11-10 20:54:35 -05:00
|
|
|
*v.write() += 1;
|
2018-11-10 01:30:39 -05:00
|
|
|
}).unwrap_or_else(|_e| ());
|
2018-09-21 22:34:42 -04:00
|
|
|
}
|
|
|
|
|
2018-11-10 21:36:23 -05:00
|
|
|
/// Let QADAPT know that we are exiting a protected region. Will panic
|
|
|
|
/// if we attempt to [`exit_protected`] more times than we [`enter_protected`].
|
2018-11-10 01:30:39 -05:00
|
|
|
pub fn exit_protected() {
|
|
|
|
if thread::panicking() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
PROTECTION_LEVEL.try_with(|v| {
|
2018-11-10 20:54:35 -05:00
|
|
|
let val = { *v.read() };
|
2018-11-10 01:30:39 -05:00
|
|
|
match val {
|
|
|
|
v if v == 0 => panic!("Attempt to exit protected too many times"),
|
|
|
|
_ => {
|
2018-11-10 20:54:35 -05:00
|
|
|
*v.write() -= 1;
|
2018-11-10 01:30:39 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}).unwrap_or_else(|_e| ());
|
2018-09-22 17:26:52 -04:00
|
|
|
}
|
|
|
|
|
2018-11-10 21:54:13 -05:00
|
|
|
static INTERNAL_ALLOCATION: RwLock<usize> = RwLock::new(usize::max_value());
|
2018-11-10 20:54:35 -05:00
|
|
|
|
2018-11-10 21:54:13 -05:00
|
|
|
unsafe fn claim_internal_alloc() {
|
2018-11-10 20:54:35 -05:00
|
|
|
loop {
|
|
|
|
match INTERNAL_ALLOCATION.write() {
|
2018-11-10 21:54:13 -05:00
|
|
|
ref mut lock if **lock == usize::max_value() => {
|
|
|
|
**lock = thread_id::get();
|
2018-11-10 20:54:35 -05:00
|
|
|
break
|
|
|
|
},
|
|
|
|
_ => ()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-10 21:54:13 -05:00
|
|
|
unsafe fn release_internal_alloc() {
|
2018-11-10 20:54:35 -05:00
|
|
|
match INTERNAL_ALLOCATION.write() {
|
2018-11-10 21:54:13 -05:00
|
|
|
ref mut lock if **lock == thread_id::get() => **lock = usize::max_value(),
|
2018-11-10 20:54:35 -05:00
|
|
|
_ => panic!("Internal allocation tracking error")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe fn alloc_immediate() -> bool {
|
2018-11-10 21:54:13 -05:00
|
|
|
thread::panicking() || *INTERNAL_ALLOCATION.read() == thread_id::get()
|
2018-11-10 20:54:35 -05:00
|
|
|
}
|
|
|
|
|
2018-09-21 22:34:42 -04:00
|
|
|
unsafe impl GlobalAlloc for QADAPT {
|
|
|
|
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
2018-11-10 01:30:39 -05:00
|
|
|
// If we're attempting to allocate our PROTECTION_LEVEL thread local,
|
|
|
|
// just allow it through
|
2018-11-10 20:54:35 -05:00
|
|
|
if alloc_immediate() {
|
2018-11-10 01:30:39 -05:00
|
|
|
return malloc(layout.size()) as *mut u8;
|
|
|
|
}
|
2018-11-05 21:58:33 -05:00
|
|
|
|
2018-11-10 20:54:35 -05:00
|
|
|
// Because accessing PROTECTION_LEVEL has the potential to trigger an allocation,
|
|
|
|
// we need to spin until we can claim the INTERNAL_ALLOCATION lock for our thread.
|
|
|
|
claim_internal_alloc();
|
2018-11-10 21:54:13 -05:00
|
|
|
let protection_level: Result<usize, ()> = PROTECTION_LEVEL.try_with(|v| *v.read()).or(Ok(0));
|
2018-11-10 20:54:35 -05:00
|
|
|
release_internal_alloc();
|
2018-11-05 21:58:33 -05:00
|
|
|
|
2018-11-10 01:30:39 -05:00
|
|
|
match protection_level {
|
|
|
|
Ok(v) if v == 0 => malloc(layout.size()) as *mut u8,
|
|
|
|
Ok(v) => {
|
2018-11-10 21:54:13 -05:00
|
|
|
// Tripped a bad allocation, but make sure further memory access during unwind
|
2018-11-10 01:30:39 -05:00
|
|
|
// doesn't have issues
|
2018-11-10 20:54:35 -05:00
|
|
|
PROTECTION_LEVEL.with(|v| *v.write() = 0);
|
2018-11-10 01:30:39 -05:00
|
|
|
panic!("Unexpected allocation for size {}, protection level: {}", layout.size(), v)
|
2018-11-10 20:54:35 -05:00
|
|
|
},
|
2018-11-10 21:54:13 -05:00
|
|
|
Err(_) => unreachable!()
|
2018-09-22 16:13:36 -04:00
|
|
|
}
|
2018-09-21 22:34:42 -04:00
|
|
|
}
|
|
|
|
|
2018-11-10 01:30:39 -05:00
|
|
|
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
2018-11-10 20:54:35 -05:00
|
|
|
if alloc_immediate() {
|
2018-11-10 01:30:39 -05:00
|
|
|
return free(ptr as *mut c_void);
|
|
|
|
}
|
2018-09-22 17:26:52 -04:00
|
|
|
|
2018-11-10 20:54:35 -05:00
|
|
|
claim_internal_alloc();
|
2018-11-10 21:54:13 -05:00
|
|
|
let protection_level: Result<usize, ()> = PROTECTION_LEVEL.try_with(|v| *v.read()).or(Ok(0));
|
2018-11-10 20:54:35 -05:00
|
|
|
release_internal_alloc();
|
2018-09-22 17:26:52 -04:00
|
|
|
|
2018-11-10 21:54:13 -05:00
|
|
|
// Free before checking panic to make sure we avoid leaks
|
2018-11-10 01:30:39 -05:00
|
|
|
free(ptr as *mut c_void);
|
|
|
|
match protection_level {
|
|
|
|
Ok(v) if v > 0 => {
|
|
|
|
// Tripped a bad dealloc, but make sure further memory access during unwind
|
|
|
|
// doesn't have issues
|
2018-11-10 20:54:35 -05:00
|
|
|
PROTECTION_LEVEL.with(|v| *v.write() = 0);
|
2018-11-10 01:30:39 -05:00
|
|
|
panic!("Unexpected deallocation for size {}, protection level: {}", layout.size(), v)
|
|
|
|
},
|
|
|
|
_ => ()
|
2018-09-23 12:37:07 -04:00
|
|
|
}
|
2018-09-21 22:34:42 -04:00
|
|
|
}
|
|
|
|
}
|