1
0
mirror of https://github.com/bspeice/qadapt synced 2025-07-01 13:56:14 -04:00

Much simpler take, and actually works this time.

This commit is contained in:
2018-11-05 21:58:33 -05:00
parent a7c7571b49
commit 03310c6372
8 changed files with 79 additions and 232 deletions

View File

@ -1,5 +0,0 @@
/// Anything that can be initialized with a `const` value.
pub(crate) trait ConstInit {
/// The `const` default initializer value for `Self`.
const INIT: Self;
}

View File

@ -1,137 +1,68 @@
#![no_std]
#![feature(alloc)]
extern crate alloc;
extern crate backtrace;
extern crate libc;
#[macro_use]
extern crate log;
extern crate spin;
use alloc::collections::btree_map::BTreeMap;
use backtrace::Backtrace;
use libc::c_void;
use libc::free;
use libc::malloc;
use core::alloc::Layout;
use core::alloc::GlobalAlloc;
use core::sync::atomic::AtomicBool;
use core::sync::atomic::Ordering;
use log::Level;
use std::alloc::Layout;
use std::alloc::GlobalAlloc;
use spin::RwLock;
mod const_init;
use const_init::ConstInit;
static DO_PANIC: RwLock<bool> = RwLock::new(false);
static INTERNAL_ALLOCATION: RwLock<bool> = RwLock::new(false);
static LOG_LEVEL: RwLock<Level> = RwLock::new(Level::Debug);
mod thread_id;
pub struct QADAPT;
// TODO: Doesn't check for race conditions
static INTERNAL_ALLOCATION: AtomicBool = AtomicBool::new(false);
pub struct QADAPTInternal {
pub thread_has_allocated: BTreeMap<usize, AtomicBool>,
pub recording_enabled: BTreeMap<usize, AtomicBool>
pub fn set_panic(b: bool) {
*DO_PANIC.write() = b;
}
pub struct QADAPT {
internal: spin::Once<RwLock<QADAPTInternal>>
pub fn set_log_level(level: Level) {
*LOG_LEVEL.write() = level;
}
impl ConstInit for QADAPT {
const INIT: QADAPT = QADAPT {
internal: spin::Once::new()
};
}
unsafe impl GlobalAlloc for QADAPT {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if !INTERNAL_ALLOCATION.load(Ordering::SeqCst) {
let tid = thread_id::get();
// Use a block to release the read guard
let should_panic = { *DO_PANIC.read() };
// Need to use RAII guard because record_allocation() needs write access
let should_record = {
let internal = self.internal().read();
internal.recording_enabled.contains_key(&tid)
&& internal.recording_enabled.get(&tid).unwrap().load(Ordering::SeqCst)
};
if should_panic && !*INTERNAL_ALLOCATION.read() {
// Only trip one panic at a time, don't want to cause issues on potential rewind
*DO_PANIC.write() = false;
panic!("Unexpected allocation")
} else if log_enabled!(*LOG_LEVEL.read()) {
// We wrap in a block because we need to release the write guard
// so allocations during `Backtrace::new()` can read
{ *INTERNAL_ALLOCATION.write() = true; }
if should_record {
self.record_allocation(thread_id::get())
}
let bt = Backtrace::new();
log!(*LOG_LEVEL.read(), "Unexpected allocation:\n{:?}", bt);
*INTERNAL_ALLOCATION.write() = false;
}
malloc(layout.size()) as *mut u8
}
unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
if *DO_PANIC.read() && !*INTERNAL_ALLOCATION.read() {
panic!("Unexpected drop")
} else if log_enabled!(*LOG_LEVEL.read()) {
// We wrap in a block because we need to release the write guard
// so allocations during `Backtrace::new()` can read
{ *INTERNAL_ALLOCATION.write() = true; }
let bt = Backtrace::new();
log!(*LOG_LEVEL.read(), "Unexpected drop:\n{:?}", bt);
*INTERNAL_ALLOCATION.write() = false;
}
free(ptr as *mut c_void)
}
}
impl QADAPT {
pub const INIT: Self = <Self as ConstInit>::INIT;
fn internal(&self) -> &RwLock<QADAPTInternal> {
self.internal.call_once(|| {
INTERNAL_ALLOCATION.store(true, Ordering::SeqCst);
let q = QADAPTInternal {
thread_has_allocated: BTreeMap::new(),
recording_enabled: BTreeMap::new()
};
INTERNAL_ALLOCATION.store(false, Ordering::SeqCst);
RwLock::new(q)
})
}
pub fn reset_allocation_state(&self) {
let internal = self.internal().write();
for (_tid, has_allocated) in &internal.thread_has_allocated {
has_allocated.store(false, Ordering::SeqCst);
}
for (_tid, recording_enabled) in &internal.recording_enabled {
recording_enabled.store(false, Ordering::SeqCst);
}
}
pub fn has_allocated_current(&self) -> bool {
let tid = thread_id::get();
let internal = self.internal().read();
// UNWRAP: Already checked for existence
internal.thread_has_allocated.contains_key(&tid)
&& internal.thread_has_allocated.get(&tid).unwrap().load(Ordering::SeqCst)
}
pub fn record_allocation(&self, thread_id: usize) {
let mut internal = self.internal().write();
if internal.thread_has_allocated.contains_key(&thread_id) {
// UNWRAP: Already checked for existence
internal.thread_has_allocated.get(&thread_id)
.unwrap().store(true, Ordering::SeqCst)
}
else {
INTERNAL_ALLOCATION.store(true, Ordering::SeqCst);
internal.thread_has_allocated.insert(thread_id, AtomicBool::new(true));
INTERNAL_ALLOCATION.store(false, Ordering::SeqCst);
}
}
pub fn enable_recording_current(&self) {
self.enable_recording(thread_id::get());
}
pub fn enable_recording(&self, tid: usize) {
let mut internal = self.internal().write();
if internal.recording_enabled.contains_key(&tid) {
// UNWRAP: Already checked for existence
internal.recording_enabled.get(&tid).unwrap().store(true, Ordering::SeqCst);
}
else {
INTERNAL_ALLOCATION.store(true, Ordering::SeqCst);
internal.recording_enabled.insert(tid, AtomicBool::new(true));
INTERNAL_ALLOCATION.store(false, Ordering::SeqCst);
}
}
}

View File

@ -1,6 +0,0 @@
/// Taken from https://crates.io/crates/thread-id and re-purposed to be no-std safe
use libc;
pub fn get() -> usize {
unsafe { libc::pthread_self() as usize }
}