1
0
mirror of https://github.com/bspeice/qadapt synced 2024-11-21 21:38:10 -05:00

Multi-threaded allocation checker!

Holy crap that was much harder than expected
This commit is contained in:
Bradlee Speice 2018-11-10 01:30:39 -05:00
parent 18d1c3e6d1
commit 753b3a4e8b
3 changed files with 87 additions and 65 deletions

View File

@ -9,7 +9,6 @@ categories = ["allocator", "nostd"]
repository = "https://github.com/bspeice/qadapt.git" repository = "https://github.com/bspeice/qadapt.git"
[dependencies] [dependencies]
backtrace = "0.3"
libc = "0.2" libc = "0.2"
log = "0.4" log = "0.4"
spin = "0.4" spin = "0.4"

View File

@ -1,80 +1,102 @@
extern crate backtrace;
extern crate libc; extern crate libc;
extern crate qadapt_macro; extern crate qadapt_macro;
#[macro_use]
extern crate log;
extern crate spin; extern crate spin;
// Re-export the proc macros to use by other code // Re-export the proc macros to use by other code
pub use qadapt_macro::*; pub use qadapt_macro::*;
use backtrace::Backtrace;
use libc::c_void; use libc::c_void;
use libc::free; use libc::free;
use libc::malloc; use libc::malloc;
use log::Level; use spin::Mutex;
use std::alloc::Layout; use std::alloc::Layout;
use std::alloc::GlobalAlloc; use std::alloc::GlobalAlloc;
use spin::RwLock; use std::sync::RwLock;
use std::thread;
static DO_PANIC: RwLock<bool> = RwLock::new(false); static THREAD_LOCAL_LOCK: Mutex<()> = Mutex::new(());
static INTERNAL_ALLOCATION: RwLock<bool> = RwLock::new(false); thread_local! {
static LOG_LEVEL: RwLock<Level> = RwLock::new(Level::Debug); static PROTECTION_LEVEL: RwLock<u32> = RwLock::new(0);
}
pub struct QADAPT; pub struct QADAPT;
pub fn set_panic(b: bool) { pub fn enter_protected() {
let mut val = DO_PANIC.write(); if thread::panicking() {
if *val == b { return
let level = LOG_LEVEL.read();
if log_enabled!(*level) {
log!(*level, "Panic flag was already {}, potential data race", b)
}
} }
*val = b; PROTECTION_LEVEL.try_with(|v| {
*v.write().unwrap() += 1;
}).unwrap_or_else(|_e| ());
} }
pub fn set_log_level(level: Level) { pub fn exit_protected() {
*LOG_LEVEL.write() = level; if thread::panicking() {
return
}
PROTECTION_LEVEL.try_with(|v| {
let val = { *v.read().unwrap() };
match val {
v if v == 0 => panic!("Attempt to exit protected too many times"),
_ => {
*v.write().unwrap() -= 1;
}
}
}).unwrap_or_else(|_e| ());
} }
unsafe impl GlobalAlloc for QADAPT { unsafe impl GlobalAlloc for QADAPT {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 { unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// Use a block to release the read guard // If we're attempting to allocate our PROTECTION_LEVEL thread local,
let should_panic = { *DO_PANIC.read() }; // just allow it through
if thread::panicking() || THREAD_LOCAL_LOCK.try_lock().is_none() {
if should_panic && !*INTERNAL_ALLOCATION.read() { return malloc(layout.size()) as *mut u8;
// Only trip one panic at a time, don't want to cause issues on potential rewind
*DO_PANIC.write() = false;
panic!("Unexpected allocation")
} else if log_enabled!(*LOG_LEVEL.read()) {
// We wrap in a block because we need to release the write guard
// so allocations during `Backtrace::new()` can read
{ *INTERNAL_ALLOCATION.write() = true; }
let bt = Backtrace::new();
log!(*LOG_LEVEL.read(), "Unexpected allocation:\n{:?}", bt);
*INTERNAL_ALLOCATION.write() = false;
} }
malloc(layout.size()) as *mut u8 let protection_level: Result<u32, ()> = {
let _lock = THREAD_LOCAL_LOCK.lock();
PROTECTION_LEVEL.try_with(|v| *v.read().unwrap())
.or(Ok(0))
};
match protection_level {
Ok(v) if v == 0 => malloc(layout.size()) as *mut u8,
//Ok(v) => panic!("Unexpected allocation for size {}, protection level: {}", layout.size(), v),
Ok(v) => {
// Tripped a bad allocation, but make sure further allocation/deallocation during unwind
// doesn't have issues
PROTECTION_LEVEL.with(|v| *v.write().unwrap() = 0);
panic!("Unexpected allocation for size {}, protection level: {}", layout.size(), v)
}
Err(_) => {
// It shouldn't be possible to reach this point...
panic!("Unexpected error for fetching protection level")
}
}
} }
unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if *DO_PANIC.read() && !*INTERNAL_ALLOCATION.read() { if thread::panicking() || THREAD_LOCAL_LOCK.try_lock().is_none() {
panic!("Unexpected drop") return free(ptr as *mut c_void);
} else if log_enabled!(*LOG_LEVEL.read()) { }
// We wrap in a block because we need to release the write guard
// so allocations during `Backtrace::new()` can read let protection_level: Result<u32, ()> = {
{ *INTERNAL_ALLOCATION.write() = true; } let _lock = THREAD_LOCAL_LOCK.lock();
PROTECTION_LEVEL.try_with(|v| *v.read().unwrap())
let bt = Backtrace::new(); .or(Ok(0))
log!(*LOG_LEVEL.read(), "Unexpected drop:\n{:?}", bt); };
*INTERNAL_ALLOCATION.write() = false; free(ptr as *mut c_void);
match protection_level {
Ok(v) if v > 0 => {
// Tripped a bad dealloc, but make sure further memory access during unwind
// doesn't have issues
PROTECTION_LEVEL.with(|v| *v.write().unwrap() = 0);
panic!("Unexpected deallocation for size {}, protection level: {}", layout.size(), v)
},
_ => ()
} }
free(ptr as *mut c_void)
} }
} }

View File

@ -2,7 +2,8 @@
extern crate qadapt; extern crate qadapt;
use qadapt::QADAPT; use qadapt::QADAPT;
use qadapt::set_panic; use qadapt::enter_protected;
use qadapt::exit_protected;
#[global_allocator] #[global_allocator]
static Q: QADAPT = QADAPT; static Q: QADAPT = QADAPT;
@ -15,17 +16,17 @@ pub fn black_box<T>(dummy: T) -> T {
#[test] #[test]
fn test_copy() { fn test_copy() {
set_panic(true); enter_protected();
black_box(0u8); black_box(0u8);
set_panic(false); exit_protected();
} }
#[test] #[test]
#[should_panic] #[should_panic]
fn test_allocate() { fn test_allocate() {
set_panic(true); enter_protected();
let _x = Box::new(12); let _x = Box::new(12);
set_panic(false); exit_protected();
} }
fn unit_result(b: bool) -> Result<(), ()> { fn unit_result(b: bool) -> Result<(), ()> {
@ -38,51 +39,51 @@ fn unit_result(b: bool) -> Result<(), ()> {
#[test] #[test]
fn test_unit_result() { fn test_unit_result() {
set_panic(true); enter_protected();
#[allow(unused)] #[allow(unused)]
{ black_box(unit_result(true)); } { black_box(unit_result(true)); }
black_box(unit_result(true)).unwrap(); black_box(unit_result(true)).unwrap();
#[allow(unused)] #[allow(unused)]
{ black_box(unit_result(false)); } { black_box(unit_result(false)); }
black_box(unit_result(false)).unwrap_err(); black_box(unit_result(false)).unwrap_err();
set_panic(false); exit_protected();
} }
#[test] #[test]
#[should_panic] #[should_panic]
fn test_vec_push() { fn test_vec_push() {
let mut v = Vec::new(); let mut v = Vec::new();
set_panic(true); enter_protected();
v.push(0); v.push(0);
} }
#[test] #[test]
fn test_vec_push_capacity() { fn test_vec_push_capacity() {
let mut v = Vec::with_capacity(1); let mut v = Vec::with_capacity(1);
set_panic(true); enter_protected();
v.push(0); v.push(0);
v.pop(); v.pop();
v.push(0); v.push(0);
set_panic(false); exit_protected();
} }
#[test] #[test]
fn test_vec_with_zero() { fn test_vec_with_zero() {
set_panic(true); enter_protected();
let _v: Vec<u8> = black_box(Vec::with_capacity(0)); let _v: Vec<u8> = black_box(Vec::with_capacity(0));
set_panic(false); exit_protected();
} }
#[test] #[test]
fn test_vec_new() { fn test_vec_new() {
set_panic(true); enter_protected();
let _v: Vec<u8> = black_box(Vec::new()); let _v: Vec<u8> = black_box(Vec::new());
set_panic(false); exit_protected();
} }
#[test] #[test]
#[should_panic] #[should_panic]
fn test_vec_with_one() { fn test_vec_with_one() {
set_panic(true); enter_protected();
let _v: Vec<u8> = Vec::with_capacity(1); let _v: Vec<u8> = Vec::with_capacity(1);
} }