diff --git a/Cargo.toml b/Cargo.toml index f5c90ce936..94e44a445c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -172,9 +172,6 @@ work_packet_stats = [] # Count the malloc'd memory into the heap size malloc_counted_size = [] -# Count the size of all live objects in GC -count_live_bytes_in_gc = [] - # Workaround a problem where bpftrace scripts (see tools/tracing/timeline/capture.bt) cannot # capture the type names of work packets. bpftrace_workaround = [] diff --git a/docs/userguide/src/migration/prefix.md b/docs/userguide/src/migration/prefix.md index 451fd5657c..54d2e7d745 100644 --- a/docs/userguide/src/migration/prefix.md +++ b/docs/userguide/src/migration/prefix.md @@ -32,6 +32,28 @@ Notes for the mmtk-core developers: ## 0.30.0 +### `live_bytes_in_last_gc` becomes a runtime option, and returns a map for live bytes in each space + +```admonish tldr +`count_live_bytes_in_gc` is now a runtime option instead of a features (build-time), and we collect +live bytes statistics per space. Correspondingly, `memory_manager::live_bytes_in_last_gc` now returns a map for +live bytes in each space. +``` + +API changes: + +- module `util::options` + + `Options` includes `count_live_bytes_in_gc`, which defaults to `false`. This can be turned on at run-time. + + The old `count_live_bytes_in_gc` feature is removed. +- module `memory_manager` + + `live_bytes_in_last_gc` now returns a `HashMap<&'static str, LiveBytesStats>`. The keys are + strings for space names, and the values are statistics for live bytes in the space. + +See also: + +- PR: + + ### mmap-related functions require annotation ```admonish tldr diff --git a/src/global_state.rs b/src/global_state.rs index f355e697f2..a1f42d4526 100644 --- a/src/global_state.rs +++ b/src/global_state.rs @@ -1,9 +1,9 @@ +use atomic_refcell::AtomicRefCell; +use std::collections::HashMap; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::Mutex; use std::time::Instant; -use atomic_refcell::AtomicRefCell; - /// This stores some global states for an MMTK instance. /// Some MMTK components like plans and allocators may keep an reference to the struct, and can access it. // This used to be a part of the `BasePlan`. In that case, any component that accesses @@ -45,9 +45,8 @@ pub struct GlobalState { /// A counteer that keeps tracks of the number of bytes allocated by malloc #[cfg(feature = "malloc_counted_size")] pub(crate) malloc_bytes: AtomicUsize, - /// This stores the size in bytes for all the live objects in last GC. This counter is only updated in the GC release phase. - #[cfg(feature = "count_live_bytes_in_gc")] - pub(crate) live_bytes_in_last_gc: AtomicUsize, + /// This stores the live bytes and the used bytes (by pages) for each space in last GC. This counter is only updated in the GC release phase. + pub(crate) live_bytes_in_last_gc: AtomicRefCell>, } impl GlobalState { @@ -183,16 +182,6 @@ impl GlobalState { pub(crate) fn decrease_malloc_bytes_by(&self, size: usize) { self.malloc_bytes.fetch_sub(size, Ordering::SeqCst); } - - #[cfg(feature = "count_live_bytes_in_gc")] - pub fn get_live_bytes_in_last_gc(&self) -> usize { - self.live_bytes_in_last_gc.load(Ordering::SeqCst) - } - - #[cfg(feature = "count_live_bytes_in_gc")] - pub fn set_live_bytes_in_last_gc(&self, size: usize) { - self.live_bytes_in_last_gc.store(size, Ordering::SeqCst); - } } impl Default for GlobalState { @@ -213,8 +202,7 @@ impl Default for GlobalState { allocation_bytes: AtomicUsize::new(0), #[cfg(feature = "malloc_counted_size")] malloc_bytes: AtomicUsize::new(0), - #[cfg(feature = "count_live_bytes_in_gc")] - live_bytes_in_last_gc: AtomicUsize::new(0), + live_bytes_in_last_gc: AtomicRefCell::new(HashMap::new()), } } } @@ -225,3 +213,15 @@ pub enum GcStatus { GcPrepare, GcProper, } + +/// Statistics for the live bytes in the last GC. The statistics is per space. +#[derive(Copy, Clone, Debug)] +pub struct LiveBytesStats { + /// Total accumulated bytes of live objects in the space. + pub live_bytes: usize, + /// Total pages used by the space. + pub used_pages: usize, + /// Total bytes used by the space, computed from `used_pages`. + /// The ratio of live_bytes and used_bytes reflects the utilization of the memory in the space. + pub used_bytes: usize, +} diff --git a/src/lib.rs b/src/lib.rs index 44bd5a123c..73a005d9f7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -46,6 +46,7 @@ pub(crate) use mmtk::MMAPPER; pub use mmtk::MMTK; mod global_state; +pub use crate::global_state::LiveBytesStats; mod policy; diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 1949eca8eb..54df61cce4 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -26,6 +26,8 @@ use crate::vm::slot::MemorySlice; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; +use std::collections::HashMap; + /// Initialize an MMTk instance. A VM should call this method after creating an [`crate::MMTK`] /// instance but before using any of the methods provided in MMTk (except `process()` and `process_bulk()`). /// @@ -531,16 +533,18 @@ pub fn free_bytes(mmtk: &MMTK) -> usize { mmtk.get_plan().get_free_pages() << LOG_BYTES_IN_PAGE } -/// Return the size of all the live objects in bytes in the last GC. MMTk usually accounts for memory in pages. +/// Return a hash map for live bytes statistics in the last GC for each space. +/// +/// MMTk usually accounts for memory in pages by each space. /// This is a special method that we count the size of every live object in a GC, and sum up the total bytes. -/// We provide this method so users can compare with `used_bytes` (which does page accounting), and know if -/// the heap is fragmented. +/// We provide this method so users can use [`crate::LiveBytesStats`] to know if +/// the space is fragmented. /// The value returned by this method is only updated when we finish tracing in a GC. A recommended timing /// to call this method is at the end of a GC (e.g. when the runtime is about to resume threads). -#[cfg(feature = "count_live_bytes_in_gc")] -pub fn live_bytes_in_last_gc(mmtk: &MMTK) -> usize { - use std::sync::atomic::Ordering; - mmtk.state.live_bytes_in_last_gc.load(Ordering::SeqCst) +pub fn live_bytes_in_last_gc( + mmtk: &MMTK, +) -> HashMap<&'static str, crate::LiveBytesStats> { + mmtk.state.live_bytes_in_last_gc.borrow().clone() } /// Return the starting address of the heap. *Note that currently MMTk uses diff --git a/src/mmtk.rs b/src/mmtk.rs index c498ca901f..24aa57d6c7 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -12,6 +12,7 @@ use crate::util::address::ObjectReference; use crate::util::analysis::AnalysisManager; use crate::util::finalizable_processor::FinalizableProcessor; use crate::util::heap::gc_trigger::GCTrigger; +use crate::util::heap::layout::heap_parameters::MAX_SPACES; use crate::util::heap::layout::vm_layout::VMLayout; use crate::util::heap::layout::{self, Mmapper, VMMap}; use crate::util::heap::HeapMeta; @@ -26,6 +27,7 @@ use crate::util::statistics::stats::Stats; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; use std::cell::UnsafeCell; +use std::collections::HashMap; use std::default::Default; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; @@ -527,6 +529,36 @@ impl MMTK { }) } + /// Aggregate a hash map of live bytes per space with the space stats to produce + /// a map of live bytes stats for the spaces. + pub(crate) fn aggregate_live_bytes_in_last_gc( + &self, + live_bytes_per_space: [usize; MAX_SPACES], + ) -> HashMap<&'static str, crate::LiveBytesStats> { + use crate::policy::space::Space; + let mut ret = HashMap::new(); + self.get_plan().for_each_space(&mut |space: &dyn Space| { + let space_name = space.get_name(); + let space_idx = space.get_descriptor().get_index(); + let used_pages = space.reserved_pages(); + if used_pages != 0 { + let used_bytes = crate::util::conversions::pages_to_bytes(used_pages); + let live_bytes = live_bytes_per_space[space_idx]; + debug_assert!( + live_bytes <= used_bytes, + "Live bytes of objects in {} ({} bytes) is larger than used pages ({} bytes), something is wrong.", + space_name, live_bytes, used_bytes + ); + ret.insert(space_name, crate::LiveBytesStats { + live_bytes, + used_pages, + used_bytes, + }); + } + }); + ret + } + /// Print VM maps. It will print the memory ranges used by spaces as well as some attributes of /// the spaces. /// diff --git a/src/plan/markcompact/gc_work.rs b/src/plan/markcompact/gc_work.rs index c89be04793..8197eb4e65 100644 --- a/src/plan/markcompact/gc_work.rs +++ b/src/plan/markcompact/gc_work.rs @@ -51,7 +51,6 @@ impl GCWork for UpdateReferences { mmtk.slot_logger.reset(); // We do two passes of transitive closures. We clear the live bytes from the first pass. - #[cfg(feature = "count_live_bytes_in_gc")] mmtk.scheduler .worker_group .get_and_clear_worker_live_bytes(); diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 6b30f80117..ddf8e5bb04 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -25,7 +25,7 @@ pub struct CopySpace { } impl SFT for CopySpace { - fn name(&self) -> &str { + fn name(&self) -> &'static str { self.get_name() } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 6ebd9c255e..331600bc91 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -86,7 +86,7 @@ pub struct ImmixSpaceArgs { unsafe impl Sync for ImmixSpace {} impl SFT for ImmixSpace { - fn name(&self) -> &str { + fn name(&self) -> &'static str { self.get_name() } diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index e4f49f7c28..cc4dd75bfc 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -27,7 +27,7 @@ pub struct ImmortalSpace { } impl SFT for ImmortalSpace { - fn name(&self) -> &str { + fn name(&self) -> &'static str { self.get_name() } fn is_live(&self, _object: ObjectReference) -> bool { diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 8906a50044..cdeb87a9c0 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -32,7 +32,7 @@ pub struct LargeObjectSpace { } impl SFT for LargeObjectSpace { - fn name(&self) -> &str { + fn name(&self) -> &'static str { self.get_name() } fn is_live(&self, object: ObjectReference) -> bool { diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index 6c6b842425..3863934414 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -46,7 +46,7 @@ pub struct LockFreeImmortalSpace { } impl SFT for LockFreeImmortalSpace { - fn name(&self) -> &str { + fn name(&self) -> &'static str { self.get_name() } fn is_live(&self, _object: ObjectReference) -> bool { diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index 7843edef9a..d7ca0c0745 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -33,7 +33,7 @@ pub const GC_EXTRA_HEADER_WORD: usize = 1; const GC_EXTRA_HEADER_BYTES: usize = GC_EXTRA_HEADER_WORD << LOG_BYTES_IN_WORD; impl SFT for MarkCompactSpace { - fn name(&self) -> &str { + fn name(&self) -> &'static str { self.get_name() } diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index 9424a5069a..dd1f91042c 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -64,7 +64,7 @@ pub struct MallocSpace { } impl SFT for MallocSpace { - fn name(&self) -> &str { + fn name(&self) -> &'static str { self.get_name() } diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index eaecbe3741..783765529e 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -157,7 +157,7 @@ impl AbandonedBlockLists { } impl SFT for MarkSweepSpace { - fn name(&self) -> &str { + fn name(&self) -> &'static str { self.common.name } diff --git a/src/policy/sft.rs b/src/policy/sft.rs index cab4cc987a..c443b48ad3 100644 --- a/src/policy/sft.rs +++ b/src/policy/sft.rs @@ -24,7 +24,7 @@ use std::marker::PhantomData; /// table of SFT rather than Space. pub trait SFT { /// The space name - fn name(&self) -> &str; + fn name(&self) -> &'static str; /// Get forwarding pointer if the object is forwarded. fn get_forwarded_object(&self, _object: ObjectReference) -> Option { @@ -120,7 +120,7 @@ pub const EMPTY_SFT_NAME: &str = "empty"; pub const EMPTY_SPACE_SFT: EmptySpaceSFT = EmptySpaceSFT {}; impl SFT for EmptySpaceSFT { - fn name(&self) -> &str { + fn name(&self) -> &'static str { EMPTY_SFT_NAME } fn is_live(&self, object: ObjectReference) -> bool { diff --git a/src/policy/space.rs b/src/policy/space.rs index d1dd36cede..7057638dde 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -328,6 +328,10 @@ pub trait Space: 'static + SFT + Sync + Downcast { self.common().name } + fn get_descriptor(&self) -> SpaceDescriptor { + self.common().descriptor + } + fn common(&self) -> &CommonSpace; fn get_gc_trigger(&self) -> &GCTrigger { self.common().gc_trigger.as_ref() diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index 770c362cfd..c19e3a516b 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -29,7 +29,7 @@ pub struct VMSpace { } impl SFT for VMSpace { - fn name(&self) -> &str { + fn name(&self) -> &'static str { self.common.name } fn is_live(&self, _object: ObjectReference) -> bool { diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 82be1f3561..ceae37d514 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -154,14 +154,12 @@ impl GCWork for Release { debug_assert!(result.is_ok()); } - #[cfg(feature = "count_live_bytes_in_gc")] - { - let live_bytes = mmtk - .scheduler - .worker_group - .get_and_clear_worker_live_bytes(); - mmtk.state.set_live_bytes_in_last_gc(live_bytes); - } + let live_bytes = mmtk + .scheduler + .worker_group + .get_and_clear_worker_live_bytes(); + *mmtk.state.live_bytes_in_last_gc.borrow_mut() = + mmtk.aggregate_live_bytes_in_last_gc(live_bytes); } } @@ -820,7 +818,7 @@ pub trait ScanObjectsWork: GCWork + Sized { &self, buffer: &[ObjectReference], worker: &mut GCWorker<::VM>, - _mmtk: &'static MMTK<::VM>, + mmtk: &'static MMTK<::VM>, ) { let tls = worker.tls; @@ -830,14 +828,21 @@ pub trait ScanObjectsWork: GCWork + Sized { let mut scan_later = vec![]; { let mut closure = ObjectsClosure::::new(worker, self.get_bucket()); - for object in objects_to_scan.iter().copied() { - // For any object we need to scan, we count its liv bytes - #[cfg(feature = "count_live_bytes_in_gc")] - closure - .worker - .shared - .increase_live_bytes(VM::VMObjectModel::get_current_size(object)); + // For any object we need to scan, we count its live bytes. + // Check the option outside the loop for better performance. + if crate::util::rust_util::unlikely(*mmtk.get_options().count_live_bytes_in_gc) { + // Borrow before the loop. + let mut live_bytes_stats = closure.worker.shared.live_bytes_per_space.borrow_mut(); + for object in objects_to_scan.iter().copied() { + crate::scheduler::worker::GCWorkerShared::::increase_live_bytes( + &mut live_bytes_stats, + object, + ); + } + } + + for object in objects_to_scan.iter().copied() { if ::VMScanning::support_slot_enqueuing(tls, object) { trace!("Scan object (slot) {}", object); // If an object supports slot-enqueuing, we enqueue its slots. diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 30143dc049..272f8be0ed 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -553,22 +553,15 @@ impl GCWorkScheduler { // USDT tracepoint for the end of GC. probe!(mmtk, gc_end); - #[cfg(feature = "count_live_bytes_in_gc")] - { - let live_bytes = mmtk.state.get_live_bytes_in_last_gc(); - let used_bytes = - mmtk.get_plan().get_used_pages() << crate::util::constants::LOG_BYTES_IN_PAGE; - debug_assert!( - live_bytes <= used_bytes, - "Live bytes of all live objects ({} bytes) is larger than used pages ({} bytes), something is wrong.", - live_bytes, used_bytes - ); - info!( - "Live objects = {} bytes ({:04.1}% of {} used pages)", - live_bytes, - live_bytes as f64 * 100.0 / used_bytes as f64, - mmtk.get_plan().get_used_pages() - ); + if *mmtk.get_options().count_live_bytes_in_gc { + for (space_name, &stats) in mmtk.state.live_bytes_in_last_gc.borrow().iter() { + info!( + "{} = {} pages ({:.1}% live)", + space_name, + stats.used_pages, + stats.live_bytes as f64 * 100.0 / stats.used_bytes as f64, + ); + } } #[cfg(feature = "extreme_assertions")] diff --git a/src/scheduler/worker.rs b/src/scheduler/worker.rs index babfd32f29..cc1b28e23b 100644 --- a/src/scheduler/worker.rs +++ b/src/scheduler/worker.rs @@ -3,14 +3,14 @@ use super::work_bucket::*; use super::*; use crate::mmtk::MMTK; use crate::util::copy::GCWorkerCopyContext; +use crate::util::heap::layout::heap_parameters::MAX_SPACES; use crate::util::opaque_pointer::*; +use crate::util::ObjectReference; use crate::vm::{Collection, GCThreadContext, VMBinding}; use atomic::Atomic; use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut}; use crossbeam::deque::{self, Stealer}; use crossbeam::queue::ArrayQueue; -#[cfg(feature = "count_live_bytes_in_gc")] -use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; @@ -42,8 +42,8 @@ pub struct GCWorkerShared { /// Accumulated bytes for live objects in this GC. When each worker scans /// objects, we increase the live bytes. We get this value from each worker /// at the end of a GC, and reset this counter. - #[cfg(feature = "count_live_bytes_in_gc")] - live_bytes: AtomicUsize, + /// The live bytes are stored in an array. The index is the index from the space descriptor. + pub live_bytes_per_space: AtomicRefCell<[usize; MAX_SPACES]>, /// A queue of GCWork that can only be processed by the owned thread. pub designated_work: ArrayQueue>>, /// Handle for stealing packets from the current worker @@ -54,21 +54,32 @@ impl GCWorkerShared { pub fn new(stealer: Option>>>) -> Self { Self { stat: Default::default(), - #[cfg(feature = "count_live_bytes_in_gc")] - live_bytes: AtomicUsize::new(0), + live_bytes_per_space: AtomicRefCell::new([0; MAX_SPACES]), designated_work: ArrayQueue::new(16), stealer, } } - #[cfg(feature = "count_live_bytes_in_gc")] - pub(crate) fn increase_live_bytes(&self, bytes: usize) { - self.live_bytes.fetch_add(bytes, Ordering::Relaxed); - } - - #[cfg(feature = "count_live_bytes_in_gc")] - pub(crate) fn get_and_clear_live_bytes(&self) -> usize { - self.live_bytes.swap(0, Ordering::SeqCst) + pub(crate) fn increase_live_bytes( + live_bytes_per_space: &mut [usize; MAX_SPACES], + object: ObjectReference, + ) { + use crate::mmtk::VM_MAP; + use crate::vm::object_model::ObjectModel; + + // The live bytes of the object + let bytes = VM::VMObjectModel::get_current_size(object); + // Get the space index from descriptor + let space_descriptor = VM_MAP.get_descriptor_for_address(object.to_raw_address()); + let space_index = space_descriptor.get_index(); + debug_assert!( + space_index < MAX_SPACES, + "Space index {} is not in the range of [0, {})", + space_index, + MAX_SPACES + ); + // Accumulate the live bytes for the index + live_bytes_per_space[space_index] += bytes; } } @@ -429,11 +440,16 @@ impl WorkerGroup { .any(|w| !w.designated_work.is_empty()) } - #[cfg(feature = "count_live_bytes_in_gc")] - pub fn get_and_clear_worker_live_bytes(&self) -> usize { - self.workers_shared - .iter() - .map(|w| w.get_and_clear_live_bytes()) - .sum() + /// Get the live bytes data from the worker, and clear the local data. + pub fn get_and_clear_worker_live_bytes(&self) -> [usize; MAX_SPACES] { + let mut ret = [0; MAX_SPACES]; + self.workers_shared.iter().for_each(|w| { + let mut live_bytes_per_space = w.live_bytes_per_space.borrow_mut(); + for (idx, val) in live_bytes_per_space.iter_mut().enumerate() { + ret[idx] += *val; + *val = 0; + } + }); + ret } } diff --git a/src/util/options.rs b/src/util/options.rs index 49eab795e0..2a0d5025b6 100644 --- a/src/util/options.rs +++ b/src/util/options.rs @@ -864,7 +864,9 @@ options! { gc_trigger: GCTriggerSelector [env_var: true, command_line: true] [|v: &GCTriggerSelector| v.validate()] = GCTriggerSelector::FixedHeapSize((crate::util::memory::get_system_total_memory() as f64 * 0.5f64) as usize), /// Enable transparent hugepage support for MMTk spaces via madvise (only Linux is supported) /// This only affects the memory for MMTk spaces. - transparent_hugepages: bool [env_var: true, command_line: true] [|v: &bool| !v || cfg!(target_os = "linux")] = false + transparent_hugepages: bool [env_var: true, command_line: true] [|v: &bool| !v || cfg!(target_os = "linux")] = false, + /// Count live bytes for objects in each space during a GC. + count_live_bytes_in_gc: bool [env_var: true, command_line: true] [always_valid] = false } #[cfg(test)]