2018-11-05 21:58:33 -05:00
|
|
|
extern crate backtrace;
|
2018-09-21 22:34:42 -04:00
|
|
|
extern crate libc;
|
2018-11-06 23:04:26 -05:00
|
|
|
extern crate qadapt_macro;
|
2018-11-05 21:58:33 -05:00
|
|
|
#[macro_use]
|
|
|
|
extern crate log;
|
2018-09-22 17:26:52 -04:00
|
|
|
extern crate spin;
|
2018-09-21 22:34:42 -04:00
|
|
|
|
2018-11-06 23:04:26 -05:00
|
|
|
// Re-export the proc macros to use by other code
|
|
|
|
pub use qadapt_macro::*;
|
|
|
|
|
2018-11-05 21:58:33 -05:00
|
|
|
use backtrace::Backtrace;
|
2018-09-21 22:34:42 -04:00
|
|
|
use libc::c_void;
|
|
|
|
use libc::free;
|
|
|
|
use libc::malloc;
|
2018-11-05 21:58:33 -05:00
|
|
|
use log::Level;
|
|
|
|
use std::alloc::Layout;
|
|
|
|
use std::alloc::GlobalAlloc;
|
2018-09-23 12:37:07 -04:00
|
|
|
use spin::RwLock;
|
2018-09-21 22:34:42 -04:00
|
|
|
|
2018-11-05 21:58:33 -05:00
|
|
|
static DO_PANIC: RwLock<bool> = RwLock::new(false);
|
|
|
|
static INTERNAL_ALLOCATION: RwLock<bool> = RwLock::new(false);
|
|
|
|
static LOG_LEVEL: RwLock<Level> = RwLock::new(Level::Debug);
|
2018-09-23 12:37:07 -04:00
|
|
|
|
2018-11-05 21:58:33 -05:00
|
|
|
pub struct QADAPT;
|
2018-09-22 16:13:36 -04:00
|
|
|
|
2018-11-05 21:58:33 -05:00
|
|
|
pub fn set_panic(b: bool) {
|
2018-11-06 20:51:44 -05:00
|
|
|
let mut val = DO_PANIC.write();
|
|
|
|
if *val == b {
|
|
|
|
let level = LOG_LEVEL.read();
|
|
|
|
if log_enabled!(*level) {
|
|
|
|
log!(*level, "Panic flag was already {}, potential data race", b)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*val = b;
|
2018-09-21 22:34:42 -04:00
|
|
|
}
|
|
|
|
|
2018-11-05 21:58:33 -05:00
|
|
|
pub fn set_log_level(level: Level) {
|
|
|
|
*LOG_LEVEL.write() = level;
|
2018-09-22 17:26:52 -04:00
|
|
|
}
|
|
|
|
|
2018-09-21 22:34:42 -04:00
|
|
|
unsafe impl GlobalAlloc for QADAPT {
|
|
|
|
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
2018-11-05 21:58:33 -05:00
|
|
|
// Use a block to release the read guard
|
|
|
|
let should_panic = { *DO_PANIC.read() };
|
|
|
|
|
|
|
|
if should_panic && !*INTERNAL_ALLOCATION.read() {
|
|
|
|
// Only trip one panic at a time, don't want to cause issues on potential rewind
|
|
|
|
*DO_PANIC.write() = false;
|
|
|
|
panic!("Unexpected allocation")
|
|
|
|
} else if log_enabled!(*LOG_LEVEL.read()) {
|
|
|
|
// We wrap in a block because we need to release the write guard
|
|
|
|
// so allocations during `Backtrace::new()` can read
|
|
|
|
{ *INTERNAL_ALLOCATION.write() = true; }
|
|
|
|
|
|
|
|
let bt = Backtrace::new();
|
|
|
|
log!(*LOG_LEVEL.read(), "Unexpected allocation:\n{:?}", bt);
|
|
|
|
|
|
|
|
*INTERNAL_ALLOCATION.write() = false;
|
2018-09-22 16:13:36 -04:00
|
|
|
}
|
2018-09-21 22:34:42 -04:00
|
|
|
|
2018-09-22 17:26:52 -04:00
|
|
|
malloc(layout.size()) as *mut u8
|
2018-09-21 22:34:42 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
|
2018-11-05 21:58:33 -05:00
|
|
|
if *DO_PANIC.read() && !*INTERNAL_ALLOCATION.read() {
|
|
|
|
panic!("Unexpected drop")
|
|
|
|
} else if log_enabled!(*LOG_LEVEL.read()) {
|
|
|
|
// We wrap in a block because we need to release the write guard
|
|
|
|
// so allocations during `Backtrace::new()` can read
|
|
|
|
{ *INTERNAL_ALLOCATION.write() = true; }
|
2018-09-22 17:26:52 -04:00
|
|
|
|
2018-11-05 21:58:33 -05:00
|
|
|
let bt = Backtrace::new();
|
|
|
|
log!(*LOG_LEVEL.read(), "Unexpected drop:\n{:?}", bt);
|
2018-09-22 17:26:52 -04:00
|
|
|
|
2018-11-05 21:58:33 -05:00
|
|
|
*INTERNAL_ALLOCATION.write() = false;
|
2018-09-23 12:37:07 -04:00
|
|
|
}
|
2018-11-05 21:58:33 -05:00
|
|
|
free(ptr as *mut c_void)
|
2018-09-21 22:34:42 -04:00
|
|
|
}
|
|
|
|
}
|