From 7fde099c3a1a51be1c678f79f01dd7ab62fc7f72 Mon Sep 17 00:00:00 2001 From: Bradlee Speice Date: Sun, 23 Sep 2018 12:37:07 -0400 Subject: [PATCH] Enable per-thread tracking --- Cargo.toml | 4 +-- src/lib.rs | 87 ++++++++++++++++++++++++++++++++++++++++++------ src/thread_id.rs | 6 ++++ tests/basic.rs | 37 ++++++++++++-------- tests/initial.rs | 12 +++---- 5 files changed, 114 insertions(+), 32 deletions(-) create mode 100644 src/thread_id.rs diff --git a/Cargo.toml b/Cargo.toml index 04aaa82..0957fbe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,5 +6,5 @@ description = "The Quick And Dirty Allocation Profiling Tool" license = "Apache-2.0" [dependencies] -spin = "0.4" -libc = { version = "0.2", default-features = false } \ No newline at end of file +libc = { version = "0.2", default-features = false } +spin = "0.4" \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index c485a80..47ee66a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,8 +1,11 @@ #![no_std] +#![feature(alloc)] +extern crate alloc; extern crate libc; extern crate spin; +use alloc::collections::btree_map::BTreeMap; use libc::c_void; use libc::free; use libc::malloc; @@ -10,18 +13,23 @@ use core::alloc::Layout; use core::alloc::GlobalAlloc; use core::sync::atomic::AtomicBool; use core::sync::atomic::Ordering; +use spin::RwLock; mod const_init; use const_init::ConstInit; +mod thread_id; + +// TODO: Doesn't check for race conditions static INTERNAL_ALLOCATION: AtomicBool = AtomicBool::new(false); pub struct QADAPTInternal { - pub has_allocated: AtomicBool + pub thread_has_allocated: BTreeMap, + pub recording_enabled: BTreeMap } pub struct QADAPT { - internal: spin::Once + internal: spin::Once> } impl ConstInit for QADAPT { @@ -30,10 +38,23 @@ impl ConstInit for QADAPT { }; } + + unsafe impl GlobalAlloc for QADAPT { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { if !INTERNAL_ALLOCATION.load(Ordering::SeqCst) { - self.internal().has_allocated.store(true, Ordering::SeqCst); + let tid = thread_id::get(); + + // Need to use RAII guard because record_allocation() needs write access + let should_record = { + let internal = self.internal().read(); + internal.recording_enabled.contains_key(&tid) + && internal.recording_enabled.get(&tid).unwrap().load(Ordering::SeqCst) + }; + + if should_record { + self.record_allocation(thread_id::get()) + } } malloc(layout.size()) as *mut u8 @@ -47,24 +68,70 @@ unsafe impl GlobalAlloc for QADAPT { impl QADAPT { pub const INIT: Self = ::INIT; - fn internal(&self) -> &QADAPTInternal { + fn internal(&self) -> &RwLock { self.internal.call_once(|| { INTERNAL_ALLOCATION.store(true, Ordering::SeqCst); let q = QADAPTInternal { - has_allocated: AtomicBool::new(false) + thread_has_allocated: BTreeMap::new(), + recording_enabled: BTreeMap::new() }; INTERNAL_ALLOCATION.store(false, Ordering::SeqCst); - q + RwLock::new(q) }) } - pub fn clear_allocations(&self) { - self.internal().has_allocated.store(false, Ordering::SeqCst); + pub fn reset_allocation_state(&self) { + let internal = self.internal().write(); + for (_tid, has_allocated) in &internal.thread_has_allocated { + + has_allocated.store(false, Ordering::SeqCst); + } + for (_tid, recording_enabled) in &internal.recording_enabled { + + recording_enabled.store(false, Ordering::SeqCst); + } } - pub fn has_allocated(&self) -> bool { - self.internal().has_allocated.load(Ordering::SeqCst) + pub fn has_allocated_current(&self) -> bool { + let tid = thread_id::get(); + let internal = self.internal().read(); + + // UNWRAP: Already checked for existence + internal.thread_has_allocated.contains_key(&tid) + && internal.thread_has_allocated.get(&tid).unwrap().load(Ordering::SeqCst) + } + + pub fn record_allocation(&self, thread_id: usize) { + let mut internal = self.internal().write(); + if internal.thread_has_allocated.contains_key(&thread_id) { + // UNWRAP: Already checked for existence + internal.thread_has_allocated.get(&thread_id) + .unwrap().store(true, Ordering::SeqCst) + } + else { + INTERNAL_ALLOCATION.store(true, Ordering::SeqCst); + internal.thread_has_allocated.insert(thread_id, AtomicBool::new(true)); + INTERNAL_ALLOCATION.store(false, Ordering::SeqCst); + } + } + + pub fn enable_recording_current(&self) { + self.enable_recording(thread_id::get()); + } + + pub fn enable_recording(&self, tid: usize) { + let mut internal = self.internal().write(); + + if internal.recording_enabled.contains_key(&tid) { + // UNWRAP: Already checked for existence + internal.recording_enabled.get(&tid).unwrap().store(true, Ordering::SeqCst); + } + else { + INTERNAL_ALLOCATION.store(true, Ordering::SeqCst); + internal.recording_enabled.insert(tid, AtomicBool::new(true)); + INTERNAL_ALLOCATION.store(false, Ordering::SeqCst); + } } } diff --git a/src/thread_id.rs b/src/thread_id.rs new file mode 100644 index 0000000..d439208 --- /dev/null +++ b/src/thread_id.rs @@ -0,0 +1,6 @@ +/// Taken from https://crates.io/crates/thread-id and re-purposed to be no-std safe +use libc; + +pub fn get() -> usize { + unsafe { libc::pthread_self() as usize } +} \ No newline at end of file diff --git a/tests/basic.rs b/tests/basic.rs index e688ed8..3f22100 100644 --- a/tests/basic.rs +++ b/tests/basic.rs @@ -3,7 +3,6 @@ extern crate qadapt; use qadapt::QADAPT; use std::alloc::alloc; use std::alloc::Layout; -use std::sync::atomic::Ordering; #[global_allocator] static A: QADAPT = QADAPT::INIT; @@ -24,23 +23,24 @@ struct NonEmpty { #[test] fn allocation_flag() { - A.clear_allocations(); - assert!(!A.has_allocated()); + A.reset_allocation_state(); + A.enable_recording_current(); + assert!(!A.has_allocated_current()); let _x = 24; - assert!(!A.has_allocated()); + assert!(!A.has_allocated_current()); let _x = Empty {}; - assert!(!A.has_allocated()); + assert!(!A.has_allocated_current()); let _x = NonEmpty { _x: 42, _y: 84 }; - assert!(!A.has_allocated()); + assert!(!A.has_allocated_current()); let _x = Box::new(42); - assert!(A.has_allocated()); + assert!(A.has_allocated_current()); } #[inline(never)] @@ -48,11 +48,12 @@ fn no_op() {} #[test] fn no_alloc_during_noop() { - A.clear_allocations(); - assert!(!A.has_allocated()); + A.reset_allocation_state(); + A.enable_recording_current(); + assert!(!A.has_allocated_current()); no_op(); - assert!(!A.has_allocated()); + assert!(!A.has_allocated_current()); } #[inline(never)] @@ -62,9 +63,19 @@ fn allocates() { #[test] fn alloc_during_func_call() { - A.clear_allocations(); - assert!(!A.has_allocated()); + A.reset_allocation_state(); + A.enable_recording_current(); + assert!(!A.has_allocated_current()); allocates(); - assert!(A.has_allocated()); + assert!(A.has_allocated_current()); +} + +#[test] +fn allocates_unrecorded() { + A.reset_allocation_state(); + assert!(!A.has_allocated_current()); + + allocates(); + assert!(!A.has_allocated_current()); } \ No newline at end of file diff --git a/tests/initial.rs b/tests/initial.rs index 896079a..68068ce 100644 --- a/tests/initial.rs +++ b/tests/initial.rs @@ -1,20 +1,18 @@ extern crate qadapt; use qadapt::QADAPT; -use std::sync::atomic::Ordering; #[global_allocator] static A: QADAPT = QADAPT::INIT; #[test] fn init() { - // Because the Allocator and its internals isn't the only "pre-main" allocation - // that happens, when starting up we expect to see that A has in fact allocated - assert!(A.has_allocated()); + assert!(!A.has_allocated_current()); + A.reset_allocation_state(); + A.enable_recording_current(); - A.clear_allocations(); - assert!(!A.has_allocated()); + assert!(!A.has_allocated_current()); let _x = Box::new(42); - assert!(A.has_allocated()); + assert!(A.has_allocated_current()); } \ No newline at end of file