1
0
mirror of https://github.com/bspeice/qadapt synced 2024-11-21 21:38:10 -05:00

Enable per-thread tracking

This commit is contained in:
Bradlee Speice 2018-09-23 12:37:07 -04:00
parent a2f21fb462
commit 7fde099c3a
5 changed files with 114 additions and 32 deletions

View File

@ -6,5 +6,5 @@ description = "The Quick And Dirty Allocation Profiling Tool"
license = "Apache-2.0" license = "Apache-2.0"
[dependencies] [dependencies]
spin = "0.4"
libc = { version = "0.2", default-features = false } libc = { version = "0.2", default-features = false }
spin = "0.4"

View File

@ -1,8 +1,11 @@
#![no_std] #![no_std]
#![feature(alloc)]
extern crate alloc;
extern crate libc; extern crate libc;
extern crate spin; extern crate spin;
use alloc::collections::btree_map::BTreeMap;
use libc::c_void; use libc::c_void;
use libc::free; use libc::free;
use libc::malloc; use libc::malloc;
@ -10,18 +13,23 @@ use core::alloc::Layout;
use core::alloc::GlobalAlloc; use core::alloc::GlobalAlloc;
use core::sync::atomic::AtomicBool; use core::sync::atomic::AtomicBool;
use core::sync::atomic::Ordering; use core::sync::atomic::Ordering;
use spin::RwLock;
mod const_init; mod const_init;
use const_init::ConstInit; use const_init::ConstInit;
mod thread_id;
// TODO: Doesn't check for race conditions
static INTERNAL_ALLOCATION: AtomicBool = AtomicBool::new(false); static INTERNAL_ALLOCATION: AtomicBool = AtomicBool::new(false);
pub struct QADAPTInternal { pub struct QADAPTInternal {
pub has_allocated: AtomicBool pub thread_has_allocated: BTreeMap<usize, AtomicBool>,
pub recording_enabled: BTreeMap<usize, AtomicBool>
} }
pub struct QADAPT { pub struct QADAPT {
internal: spin::Once<QADAPTInternal> internal: spin::Once<RwLock<QADAPTInternal>>
} }
impl ConstInit for QADAPT { impl ConstInit for QADAPT {
@ -30,10 +38,23 @@ impl ConstInit for QADAPT {
}; };
} }
unsafe impl GlobalAlloc for QADAPT { unsafe impl GlobalAlloc for QADAPT {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 { unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if !INTERNAL_ALLOCATION.load(Ordering::SeqCst) { if !INTERNAL_ALLOCATION.load(Ordering::SeqCst) {
self.internal().has_allocated.store(true, Ordering::SeqCst); let tid = thread_id::get();
// Need to use RAII guard because record_allocation() needs write access
let should_record = {
let internal = self.internal().read();
internal.recording_enabled.contains_key(&tid)
&& internal.recording_enabled.get(&tid).unwrap().load(Ordering::SeqCst)
};
if should_record {
self.record_allocation(thread_id::get())
}
} }
malloc(layout.size()) as *mut u8 malloc(layout.size()) as *mut u8
@ -47,24 +68,70 @@ unsafe impl GlobalAlloc for QADAPT {
impl QADAPT { impl QADAPT {
pub const INIT: Self = <Self as ConstInit>::INIT; pub const INIT: Self = <Self as ConstInit>::INIT;
fn internal(&self) -> &QADAPTInternal { fn internal(&self) -> &RwLock<QADAPTInternal> {
self.internal.call_once(|| { self.internal.call_once(|| {
INTERNAL_ALLOCATION.store(true, Ordering::SeqCst); INTERNAL_ALLOCATION.store(true, Ordering::SeqCst);
let q = QADAPTInternal { let q = QADAPTInternal {
has_allocated: AtomicBool::new(false) thread_has_allocated: BTreeMap::new(),
recording_enabled: BTreeMap::new()
}; };
INTERNAL_ALLOCATION.store(false, Ordering::SeqCst); INTERNAL_ALLOCATION.store(false, Ordering::SeqCst);
q RwLock::new(q)
}) })
} }
pub fn clear_allocations(&self) { pub fn reset_allocation_state(&self) {
self.internal().has_allocated.store(false, Ordering::SeqCst); let internal = self.internal().write();
for (_tid, has_allocated) in &internal.thread_has_allocated {
has_allocated.store(false, Ordering::SeqCst);
}
for (_tid, recording_enabled) in &internal.recording_enabled {
recording_enabled.store(false, Ordering::SeqCst);
}
} }
pub fn has_allocated(&self) -> bool { pub fn has_allocated_current(&self) -> bool {
self.internal().has_allocated.load(Ordering::SeqCst) let tid = thread_id::get();
let internal = self.internal().read();
// UNWRAP: Already checked for existence
internal.thread_has_allocated.contains_key(&tid)
&& internal.thread_has_allocated.get(&tid).unwrap().load(Ordering::SeqCst)
}
pub fn record_allocation(&self, thread_id: usize) {
let mut internal = self.internal().write();
if internal.thread_has_allocated.contains_key(&thread_id) {
// UNWRAP: Already checked for existence
internal.thread_has_allocated.get(&thread_id)
.unwrap().store(true, Ordering::SeqCst)
}
else {
INTERNAL_ALLOCATION.store(true, Ordering::SeqCst);
internal.thread_has_allocated.insert(thread_id, AtomicBool::new(true));
INTERNAL_ALLOCATION.store(false, Ordering::SeqCst);
}
}
pub fn enable_recording_current(&self) {
self.enable_recording(thread_id::get());
}
pub fn enable_recording(&self, tid: usize) {
let mut internal = self.internal().write();
if internal.recording_enabled.contains_key(&tid) {
// UNWRAP: Already checked for existence
internal.recording_enabled.get(&tid).unwrap().store(true, Ordering::SeqCst);
}
else {
INTERNAL_ALLOCATION.store(true, Ordering::SeqCst);
internal.recording_enabled.insert(tid, AtomicBool::new(true));
INTERNAL_ALLOCATION.store(false, Ordering::SeqCst);
}
} }
} }

6
src/thread_id.rs Normal file
View File

@ -0,0 +1,6 @@
/// Taken from https://crates.io/crates/thread-id and re-purposed to be no-std safe
use libc;
pub fn get() -> usize {
unsafe { libc::pthread_self() as usize }
}

View File

@ -3,7 +3,6 @@ extern crate qadapt;
use qadapt::QADAPT; use qadapt::QADAPT;
use std::alloc::alloc; use std::alloc::alloc;
use std::alloc::Layout; use std::alloc::Layout;
use std::sync::atomic::Ordering;
#[global_allocator] #[global_allocator]
static A: QADAPT = QADAPT::INIT; static A: QADAPT = QADAPT::INIT;
@ -24,23 +23,24 @@ struct NonEmpty {
#[test] #[test]
fn allocation_flag() { fn allocation_flag() {
A.clear_allocations(); A.reset_allocation_state();
assert!(!A.has_allocated()); A.enable_recording_current();
assert!(!A.has_allocated_current());
let _x = 24; let _x = 24;
assert!(!A.has_allocated()); assert!(!A.has_allocated_current());
let _x = Empty {}; let _x = Empty {};
assert!(!A.has_allocated()); assert!(!A.has_allocated_current());
let _x = NonEmpty { let _x = NonEmpty {
_x: 42, _x: 42,
_y: 84 _y: 84
}; };
assert!(!A.has_allocated()); assert!(!A.has_allocated_current());
let _x = Box::new(42); let _x = Box::new(42);
assert!(A.has_allocated()); assert!(A.has_allocated_current());
} }
#[inline(never)] #[inline(never)]
@ -48,11 +48,12 @@ fn no_op() {}
#[test] #[test]
fn no_alloc_during_noop() { fn no_alloc_during_noop() {
A.clear_allocations(); A.reset_allocation_state();
assert!(!A.has_allocated()); A.enable_recording_current();
assert!(!A.has_allocated_current());
no_op(); no_op();
assert!(!A.has_allocated()); assert!(!A.has_allocated_current());
} }
#[inline(never)] #[inline(never)]
@ -62,9 +63,19 @@ fn allocates() {
#[test] #[test]
fn alloc_during_func_call() { fn alloc_during_func_call() {
A.clear_allocations(); A.reset_allocation_state();
assert!(!A.has_allocated()); A.enable_recording_current();
assert!(!A.has_allocated_current());
allocates(); allocates();
assert!(A.has_allocated()); assert!(A.has_allocated_current());
}
#[test]
fn allocates_unrecorded() {
A.reset_allocation_state();
assert!(!A.has_allocated_current());
allocates();
assert!(!A.has_allocated_current());
} }

View File

@ -1,20 +1,18 @@
extern crate qadapt; extern crate qadapt;
use qadapt::QADAPT; use qadapt::QADAPT;
use std::sync::atomic::Ordering;
#[global_allocator] #[global_allocator]
static A: QADAPT = QADAPT::INIT; static A: QADAPT = QADAPT::INIT;
#[test] #[test]
fn init() { fn init() {
// Because the Allocator and its internals isn't the only "pre-main" allocation assert!(!A.has_allocated_current());
// that happens, when starting up we expect to see that A has in fact allocated A.reset_allocation_state();
assert!(A.has_allocated()); A.enable_recording_current();
A.clear_allocations(); assert!(!A.has_allocated_current());
assert!(!A.has_allocated());
let _x = Box::new(42); let _x = Box::new(42);
assert!(A.has_allocated()); assert!(A.has_allocated_current());
} }