diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 783765529e..f39b051156 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -29,6 +29,7 @@ use crate::util::heap::chunk_map::*; use crate::util::linear_scan::Region; use crate::util::VMThread; use crate::vm::ObjectModel; +use crate::vm::Scanning; use std::sync::Mutex; /// The result for `MarkSweepSpace.acquire_block()`. `MarkSweepSpace` will attempt @@ -329,6 +330,58 @@ impl MarkSweepSpace { } } + /// Mark an object non-atomically. If multiple GC worker threads attempt to mark the same + /// object, more than one of them may return `true`. + fn attempt_mark_non_atomic(&self, object: ObjectReference) -> bool { + if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_marked::(object, Ordering::SeqCst) { + VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.mark::(object, Ordering::SeqCst); + true + } else { + false + } + } + + /// Mark an object atomically. + fn attempt_mark_atomic(&self, object: ObjectReference) -> bool { + let mark_state = 1u8; + + loop { + let old_value = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.load_atomic::( + object, + None, + Ordering::SeqCst, + ); + if old_value == mark_state { + return false; + } + + if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC + .compare_exchange_metadata::( + object, + old_value, + mark_state, + None, + Ordering::SeqCst, + Ordering::SeqCst, + ) + .is_ok() + { + break; + } + } + true + } + + /// Mark an object. Return `true` if the object is newly marked. Return `false` if the object + /// was already marked. + fn attempt_mark(&self, object: ObjectReference) -> bool { + if VM::VMScanning::UNIQUE_OBJECT_ENQUEUING { + self.attempt_mark_atomic(object) + } else { + self.attempt_mark_non_atomic(object) + } + } + fn trace_object( &self, queue: &mut Q, @@ -339,8 +392,7 @@ impl MarkSweepSpace { "Cannot mark an object {} that was not alloced by free list allocator.", object, ); - if !VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_marked::(object, Ordering::SeqCst) { - VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.mark::(object, Ordering::SeqCst); + if self.attempt_mark(object) { let block = Block::containing(object); block.set_state(BlockState::Marked); queue.enqueue(object); diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 05b6260f32..b02feb7fe1 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -140,6 +140,21 @@ pub trait RootsWorkFactory: Clone + Send + 'static { /// VM-specific methods for scanning roots/objects. pub trait Scanning { + /// When set to `true`, all plans will guarantee that during each GC, each live object is + /// enqueued at most once, and therefore scanned (by either [`Scanning::scan_object`] or + /// [`Scanning::scan_object_and_trace_edges`]) at most once. + /// + /// When set to `false`, MMTk may enqueue an object multiple times due to optimizations, such as + /// using non-atomic operatios to mark objects. Consequently, an object may be scanned multiple + /// times during a GC. + /// + /// The default value is `false` because duplicated object-enqueuing is benign for most VMs, and + /// related optimizations, such as non-atomic marking, can improve GC speed. VM bindings can + /// override this if they need. For example, some VMs piggyback on object-scanning to visit + /// objects during a GC, but may have data race if multiple GC workers visit the same object at + /// the same time. Such VMs can set this constant to `true` to workaround this problem. + const UNIQUE_OBJECT_ENQUEUING: bool = false; + /// Return true if the given object supports slot enqueuing. /// /// - If this returns true, MMTk core will call `scan_object` on the object.