Skip to content

Commit

Permalink
Merge branch 'master' into misc_debugging
Browse files Browse the repository at this point in the history
  • Loading branch information
k-sareen authored Dec 1, 2024
2 parents 6d3b1e0 + cd2fe83 commit 655e5bb
Show file tree
Hide file tree
Showing 23 changed files with 470 additions and 149 deletions.
7 changes: 7 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,13 @@ count_live_bytes_in_gc = []
# capture the type names of work packets.
bpftrace_workaround = []

# Disable mmap annotations.
# All invocations of `mmap` in mmtk-core are accompanied by calls of `prctl` with
# `PR_SET_VMA_ANON_NAME` to annotate the mmap ranges with human-readable names. It is enabled by
# default and should work fine even with large heap sizes. However, if this is causing problems,
# users can disable such annotations by enabling this Cargo feature.
no_mmap_annotation = []

# Do not modify the following line - ci-common.sh matches it
# -- Mutally exclusive features --
# Only one feature from each group can be provided. Otherwise build will fail.
Expand Down
16 changes: 16 additions & 0 deletions docs/userguide/src/migration/prefix.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,22 @@ Notes for the mmtk-core developers:

<!-- Insert new versions here -->

## 0.30.0

### mmap-related functions require annotation

```admonish tldr
Memory-mapping functions in `mmtk::util::memory` now take an additional `MmapAnnotation` argument.
```

API changes:

- module `util::memory`
+ The following functions take an additional `MmapAnnotation` argument.
* `dzmmap`
* `dzmmap_noreplace`
* `mmap_noreserve`

## 0.28.0

### `handle_user_collection_request` returns `bool`
Expand Down
2 changes: 1 addition & 1 deletion src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ pub struct CreateSpecificPlanArgs<'a, VM: VMBinding> {
pub global_side_metadata_specs: Vec<SideMetadataSpec>,
}

impl<'a, VM: VMBinding> CreateSpecificPlanArgs<'a, VM> {
impl<VM: VMBinding> CreateSpecificPlanArgs<'_, VM> {
/// Get a PlanCreateSpaceArgs that can be used to create a space
pub fn get_space_args(
&mut self,
Expand Down
2 changes: 0 additions & 2 deletions src/plan/mutator_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ impl<VM: VMBinding> std::fmt::Debug for MutatorConfig<VM> {
/// A mutator is a per-thread data structure that manages allocations and barriers. It is usually highly coupled with the language VM.
/// It is recommended for MMTk users 1) to have a mutator struct of the same layout in the thread local storage that can be accessed efficiently,
/// and 2) to implement fastpath allocation and barriers for the mutator in the VM side.
// We are trying to make this struct fixed-sized so that VM bindings can easily define a type to have the exact same layout as this struct.
// Currently Mutator is fixed sized, and we should try keep this invariant:
// - Allocators are fixed-length arrays of allocators.
Expand Down Expand Up @@ -256,7 +255,6 @@ impl<VM: VMBinding> Mutator<VM> {

/// Each GC plan should provide their implementation of a MutatorContext. *Note that this trait is no longer needed as we removed
/// per-plan mutator implementation and we will remove this trait as well in the future.*
// TODO: We should be able to remove this trait, as we removed per-plan mutator implementation, and there is no other type that implements this trait.
// The Mutator struct above is the only type that implements this trait. We should be able to merge them.
pub trait MutatorContext<VM: VMBinding>: Send + 'static {
Expand Down
4 changes: 2 additions & 2 deletions src/plan/tracing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ impl<'a, E: ProcessEdgesWork> ObjectsClosure<'a, E> {
}
}

impl<'a, E: ProcessEdgesWork> SlotVisitor<SlotOf<E>> for ObjectsClosure<'a, E> {
impl<E: ProcessEdgesWork> SlotVisitor<SlotOf<E>> for ObjectsClosure<'_, E> {
fn visit_slot(&mut self, slot: SlotOf<E>) {
#[cfg(debug_assertions)]
{
Expand All @@ -129,7 +129,7 @@ impl<'a, E: ProcessEdgesWork> SlotVisitor<SlotOf<E>> for ObjectsClosure<'a, E> {
}
}

impl<'a, E: ProcessEdgesWork> Drop for ObjectsClosure<'a, E> {
impl<E: ProcessEdgesWork> Drop for ObjectsClosure<'_, E> {
fn drop(&mut self) {
self.flush();
}
Expand Down
24 changes: 16 additions & 8 deletions src/policy/lockfreeimmortalspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use crate::util::heap::gc_trigger::GCTrigger;
use crate::util::heap::layout::vm_layout::vm_layout;
use crate::util::heap::PageResource;
use crate::util::heap::VMRequest;
use crate::util::memory::MmapAnnotation;
use crate::util::memory::MmapStrategy;
use crate::util::metadata::side_metadata::SideMetadataContext;
use crate::util::metadata::side_metadata::SideMetadataSanity;
Expand Down Expand Up @@ -241,15 +242,22 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
*args.options.transparent_hugepages,
crate::util::memory::MmapProtection::ReadWrite,
);
crate::util::memory::dzmmap_noreplace(start, aligned_total_bytes, strategy).unwrap();
if space
crate::util::memory::dzmmap_noreplace(
start,
aligned_total_bytes,
strategy,
&MmapAnnotation::Space {
name: space.get_name(),
},
)
.unwrap();
space
.metadata
.try_map_metadata_space(start, aligned_total_bytes)
.is_err()
{
// TODO(Javad): handle meta space allocation failure
panic!("failed to mmap meta memory");
}
.try_map_metadata_space(start, aligned_total_bytes, space.get_name())
.unwrap_or_else(|e| {
// TODO(Javad): handle meta space allocation failure
panic!("failed to mmap meta memory: {e}")
});

space
}
Expand Down
2 changes: 1 addition & 1 deletion src/policy/marksweepspace/malloc_ms/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ impl<VM: VMBinding> MallocSpace<VM> {

fn map_metadata_and_update_bound(&self, addr: Address, size: usize) {
// Map the metadata space for the range [addr, addr + size)
map_meta_space(&self.metadata, addr, size);
map_meta_space(&self.metadata, addr, size, self.get_name());

// Update the bounds of the max and min chunk addresses seen -- this is used later in the sweep
// Lockless compare-and-swap loops perform better than a locking variant
Expand Down
21 changes: 13 additions & 8 deletions src/policy/marksweepspace/malloc_ms/metadata.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ fn is_meta_space_mapped_for_address(address: Address) -> bool {
}

/// Eagerly map the active chunk metadata surrounding `chunk_start`
fn map_active_chunk_metadata(chunk_start: Address) {
fn map_active_chunk_metadata(chunk_start: Address, space_name: &str) {
debug_assert!(chunk_start.is_aligned_to(BYTES_IN_CHUNK));
// We eagerly map 16Gb worth of space for the chunk mark bytes on 64-bits
// We require saturating subtractions in order to not overflow the chunk_start by
Expand All @@ -99,16 +99,20 @@ fn map_active_chunk_metadata(chunk_start: Address) {
chunk_start + (size / 2)
);

assert!(
CHUNK_METADATA.try_map_metadata_space(start, size).is_ok(),
"failed to mmap meta memory"
);
CHUNK_METADATA
.try_map_metadata_space(start, size, space_name)
.unwrap_or_else(|e| panic!("failed to mmap meta memory: {e}"));
}

/// We map the active chunk metadata (if not previously mapped), as well as the VO bit metadata
/// and active page metadata here. Note that if [addr, addr + size) crosses multiple chunks, we
/// will map for each chunk.
pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size: usize) {
pub(super) fn map_meta_space(
metadata: &SideMetadataContext,
addr: Address,
size: usize,
space_name: &str,
) {
// In order to prevent race conditions, we synchronize on the lock first and then
// check if we need to map the active chunk metadata for `chunk_start`
let _lock = CHUNK_MAP_LOCK.lock().unwrap();
Expand All @@ -118,7 +122,7 @@ pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size
// Check if the chunk bit metadata is mapped. If it is not mapped, map it.
// Note that the chunk bit metadata is global. It may have been mapped because other policy mapped it.
if !is_chunk_mapped(start) {
map_active_chunk_metadata(start);
map_active_chunk_metadata(start, space_name);
}

// If we have set the chunk bit, return. This is needed just in case another thread has done this before
Expand All @@ -131,7 +135,8 @@ pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size
// Note that this might fail. For example, we have marked a chunk as active but later we freed all
// the objects in it, and unset its chunk bit. However, we do not free its metadata. So for the chunk,
// its chunk bit is mapped, but not marked, and all its local metadata is also mapped.
let mmap_metadata_result = metadata.try_map_metadata_space(start, BYTES_IN_CHUNK);
let mmap_metadata_result =
metadata.try_map_metadata_space(start, BYTES_IN_CHUNK, space_name);
debug_assert!(
mmap_metadata_result.is_ok(),
"mmap sidemetadata failed for chunk_start ({})",
Expand Down
8 changes: 5 additions & 3 deletions src/policy/sft_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -378,9 +378,11 @@ mod dense_chunk_map {
global: vec![SFT_DENSE_CHUNK_MAP_INDEX],
local: vec![],
};
if context.try_map_metadata_space(start, bytes).is_err() {
panic!("failed to mmap metadata memory");
}
context
.try_map_metadata_space(start, bytes, "SFTDenseChunkMap")
.unwrap_or_else(|e| {
panic!("failed to mmap metadata memory: {e}");
});

self.update(space, start, bytes);
}
Expand Down
45 changes: 24 additions & 21 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -144,12 +144,19 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
if let Err(mmap_error) = self
.common()
.mmapper
.ensure_mapped(res.start, res.pages, self.common().mmap_strategy())
.and(
self.common()
.metadata
.try_map_metadata_space(res.start, bytes),
.ensure_mapped(
res.start,
res.pages,
self.common().mmap_strategy(),
&memory::MmapAnnotation::Space {
name: self.get_name(),
},
)
.and(self.common().metadata.try_map_metadata_space(
res.start,
bytes,
self.get_name(),
))
{
memory::handle_mmap_error::<VM>(mmap_error, tls, res.start, bytes);
}
Expand Down Expand Up @@ -293,15 +300,13 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
/// Ensure this space is marked as mapped -- used when the space is already
/// mapped (e.g. for a vm image which is externally mmapped.)
fn ensure_mapped(&self) {
if self
.common()
self.common()
.metadata
.try_map_metadata_space(self.common().start, self.common().extent)
.is_err()
{
// TODO(Javad): handle meta space allocation failure
panic!("failed to mmap meta memory");
}
.try_map_metadata_space(self.common().start, self.common().extent, self.get_name())
.unwrap_or_else(|e| {
// TODO(Javad): handle meta space allocation failure
panic!("failed to mmap meta memory: {e}");
});

self.common()
.mmapper
Expand Down Expand Up @@ -609,14 +614,12 @@ impl<VM: VMBinding> CommonSpace<VM> {
}

// For contiguous space, we know its address range so we reserve metadata memory for its range.
if rtn
.metadata
.try_map_metadata_address_range(rtn.start, rtn.extent)
.is_err()
{
// TODO(Javad): handle meta space allocation failure
panic!("failed to mmap meta memory");
}
rtn.metadata
.try_map_metadata_address_range(rtn.start, rtn.extent, rtn.name)
.unwrap_or_else(|e| {
// TODO(Javad): handle meta space allocation failure
panic!("failed to mmap meta memory: {e}");
});

debug!(
"Created space {} [{}, {}) for {} bytes",
Expand Down
2 changes: 1 addition & 1 deletion src/policy/vmspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ impl<VM: VMBinding> VMSpace<VM> {
// Map side metadata
self.common
.metadata
.try_map_metadata_space(chunk_start, chunk_size)
.try_map_metadata_space(chunk_start, chunk_size, self.get_name())
.unwrap();
// Insert to vm map: it would be good if we can make VM map aware of the region. However, the region may be outside what we can map in our VM map implementation.
// self.common.vm_map.insert(chunk_start, chunk_size, self.common.descriptor);
Expand Down
Loading

0 comments on commit 655e5bb

Please sign in to comment.