1
0
mirror of https://github.com/bspeice/qadapt synced 2024-11-14 18:18:08 -05:00
qadapt/src/lib.rs

186 lines
5.8 KiB
Rust
Raw Normal View History

2018-11-17 11:04:37 -05:00
//! # The Quick And Dirty Allocation Profiling Tool
2018-11-10 21:59:39 -05:00
//!
2018-11-10 21:36:23 -05:00
//! This allocator is a helper for writing high-performance code that is allocation/drop free;
//! for functions annotated with `#[allocate_panic]`, QADAPT will detect when allocations/drops
//! happen during their execution (and execution of any functions they call) and throw a
2018-11-17 11:04:37 -05:00
//! thread panic if this occurs. QADAPT-related code is *stripped out during release builds*,
//! so no worries about random allocations crashing in production.
2018-11-10 21:59:39 -05:00
//!
2018-11-10 21:54:13 -05:00
//! Currently this crate is Nightly-only, but will work once `const fn` is in Stable.
2018-11-12 22:30:09 -05:00
//!
2018-11-11 22:37:00 -05:00
//! Please also take a look at [qadapt-macro](https://github.com/bspeice/qadapt/tree/master/qadapt-macro)
//! for some helper macros to make working with QADAPT a bit easier.
2018-11-10 21:36:23 -05:00
#![deny(missing_docs)]
extern crate libc;
#[macro_use]
extern crate log;
extern crate qadapt_macro;
extern crate spin;
2018-11-10 21:54:13 -05:00
// thread_id is necessary because `std::thread::current()` panics if we have not yet
// allocated a `thread_local!{}` it depends on.
extern crate thread_id;
// Re-export the proc macros to use by other code
pub use qadapt_macro::*;
use libc::c_void;
use libc::free;
use libc::malloc;
use spin::RwLock;
use std::alloc::GlobalAlloc;
2018-11-10 21:59:39 -05:00
use std::alloc::Layout;
use std::thread;
thread_local! {
2018-11-10 21:54:13 -05:00
static PROTECTION_LEVEL: RwLock<usize> = RwLock::new(0);
}
2018-09-23 12:37:07 -04:00
2018-11-10 21:36:23 -05:00
/// The QADAPT allocator itself
pub struct QADAPT;
2018-11-10 21:36:23 -05:00
/// Let QADAPT know that we are now entering a protected region and that
/// panics should be triggered if allocations/drops happen while we are running.
pub fn enter_protected() {
2018-11-18 21:29:32 -05:00
#[cfg(debug_assertions)]
{
if thread::panicking() {
return;
}
2018-11-06 20:51:44 -05:00
if *IS_ACTIVE.read() == false {
*IS_ACTIVE.write() = true;
warn!("QADAPT not initialized when using allocation guards; please verify `#[global_allocator]` is set!");
}
2018-11-18 21:29:32 -05:00
PROTECTION_LEVEL
.try_with(|v| {
*v.write() += 1;
})
.unwrap_or_else(|_e| ());
}
}
2018-11-10 21:36:23 -05:00
/// Let QADAPT know that we are exiting a protected region. Will panic
/// if we attempt to [`exit_protected`] more times than we [`enter_protected`].
pub fn exit_protected() {
2018-11-18 21:29:32 -05:00
#[cfg(debug_assertions)]
{
if thread::panicking() {
return;
}
2018-11-18 21:29:32 -05:00
PROTECTION_LEVEL
.try_with(|v| {
let val = { *v.read() };
match val {
v if v == 0 => panic!("Attempt to exit protected too many times"),
_ => {
*v.write() -= 1;
}
2018-11-10 21:59:39 -05:00
}
2018-11-18 21:29:32 -05:00
})
.unwrap_or_else(|_e| ());
}
}
static IS_ACTIVE: RwLock<bool> = RwLock::new(false);
2018-11-10 21:54:13 -05:00
static INTERNAL_ALLOCATION: RwLock<usize> = RwLock::new(usize::max_value());
/// Get the current "protection level" in QADAPT: calls to enter_protected() - exit_protected()
pub fn protection_level() -> usize {
2018-11-18 21:29:32 -05:00
#[cfg(debug_assertions)]
{
PROTECTION_LEVEL.try_with(|v| *v.read()).unwrap_or(0)
}
#[cfg(not(debug_assertions))]
{
0
}
}
fn claim_internal_alloc() {
loop {
match INTERNAL_ALLOCATION.write() {
2018-11-10 21:54:13 -05:00
ref mut lock if **lock == usize::max_value() => {
**lock = thread_id::get();
2018-11-10 21:59:39 -05:00
break;
}
_ => (),
}
}
}
fn release_internal_alloc() {
match INTERNAL_ALLOCATION.write() {
2018-11-10 21:54:13 -05:00
ref mut lock if **lock == thread_id::get() => **lock = usize::max_value(),
2018-11-10 21:59:39 -05:00
_ => panic!("Internal allocation tracking error"),
}
}
fn alloc_immediate() -> bool {
2018-11-10 21:54:13 -05:00
thread::panicking() || *INTERNAL_ALLOCATION.read() == thread_id::get()
}
unsafe impl GlobalAlloc for QADAPT {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if *IS_ACTIVE.read() == false {
*IS_ACTIVE.write() = true;
}
// If we're attempting to allocate our PROTECTION_LEVEL thread local,
// just allow it through
if alloc_immediate() {
return malloc(layout.size()) as *mut u8;
}
2018-11-10 21:59:39 -05:00
// Because accessing PROTECTION_LEVEL has the potential to trigger an allocation,
// we need to spin until we can claim the INTERNAL_ALLOCATION lock for our thread.
claim_internal_alloc();
2018-11-10 21:59:39 -05:00
let protection_level: Result<usize, ()> =
PROTECTION_LEVEL.try_with(|v| *v.read()).or(Ok(0));
release_internal_alloc();
match protection_level {
Ok(v) if v == 0 => malloc(layout.size()) as *mut u8,
Ok(v) => {
2018-11-10 21:54:13 -05:00
// Tripped a bad allocation, but make sure further memory access during unwind
// doesn't have issues
PROTECTION_LEVEL.with(|v| *v.write() = 0);
2018-11-10 21:59:39 -05:00
panic!(
"Unexpected allocation for size {}, protection level: {}",
layout.size(),
v
)
}
Err(_) => unreachable!(),
}
}
2018-11-15 20:16:49 -05:00
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if alloc_immediate() {
return free(ptr as *mut c_void);
}
claim_internal_alloc();
2018-11-10 21:59:39 -05:00
let protection_level: Result<usize, ()> =
PROTECTION_LEVEL.try_with(|v| *v.read()).or(Ok(0));
release_internal_alloc();
2018-11-10 21:54:13 -05:00
// Free before checking panic to make sure we avoid leaks
free(ptr as *mut c_void);
match protection_level {
Ok(v) if v > 0 => {
// Tripped a bad dealloc, but make sure further memory access during unwind
// doesn't have issues
PROTECTION_LEVEL.with(|v| *v.write() = 0);
2018-11-10 21:59:39 -05:00
panic!(
"Unexpected deallocation for size {}, protection level: {}",
layout.size(),
v
)
}
_ => (),
2018-09-23 12:37:07 -04:00
}
}
}