From 5bc6ce561a5c6e0e186fb7254f6b4a0c28be7dae Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 29 Nov 2024 13:16:22 +0800 Subject: [PATCH 1/2] Fix clippy warnings for Rust 1.83 (#1242) Clippy 1.83 produces some new warnings: - `needless_lifetimes` is extended to suggest eliding `impl` lifetimes. - `empty_line_after_doc_comments` is added to the `suspicious` group. --- src/plan/global.rs | 2 +- src/plan/mutator_context.rs | 2 -- src/plan/tracing.rs | 4 ++-- src/util/test_util/mock_vm.rs | 1 - 4 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/plan/global.rs b/src/plan/global.rs index 14695c2d00..6e0770e741 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -390,7 +390,7 @@ pub struct CreateSpecificPlanArgs<'a, VM: VMBinding> { pub global_side_metadata_specs: Vec, } -impl<'a, VM: VMBinding> CreateSpecificPlanArgs<'a, VM> { +impl CreateSpecificPlanArgs<'_, VM> { /// Get a PlanCreateSpaceArgs that can be used to create a space pub fn get_space_args( &mut self, diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 8bf266f430..4ed41190cd 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -81,7 +81,6 @@ impl std::fmt::Debug for MutatorConfig { /// A mutator is a per-thread data structure that manages allocations and barriers. It is usually highly coupled with the language VM. /// It is recommended for MMTk users 1) to have a mutator struct of the same layout in the thread local storage that can be accessed efficiently, /// and 2) to implement fastpath allocation and barriers for the mutator in the VM side. - // We are trying to make this struct fixed-sized so that VM bindings can easily define a type to have the exact same layout as this struct. // Currently Mutator is fixed sized, and we should try keep this invariant: // - Allocators are fixed-length arrays of allocators. @@ -256,7 +255,6 @@ impl Mutator { /// Each GC plan should provide their implementation of a MutatorContext. *Note that this trait is no longer needed as we removed /// per-plan mutator implementation and we will remove this trait as well in the future.* - // TODO: We should be able to remove this trait, as we removed per-plan mutator implementation, and there is no other type that implements this trait. // The Mutator struct above is the only type that implements this trait. We should be able to merge them. pub trait MutatorContext: Send + 'static { diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 2f9ac4c3c9..739caa657a 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -111,7 +111,7 @@ impl<'a, E: ProcessEdgesWork> ObjectsClosure<'a, E> { } } -impl<'a, E: ProcessEdgesWork> SlotVisitor> for ObjectsClosure<'a, E> { +impl SlotVisitor> for ObjectsClosure<'_, E> { fn visit_slot(&mut self, slot: SlotOf) { #[cfg(debug_assertions)] { @@ -129,7 +129,7 @@ impl<'a, E: ProcessEdgesWork> SlotVisitor> for ObjectsClosure<'a, E> { } } -impl<'a, E: ProcessEdgesWork> Drop for ObjectsClosure<'a, E> { +impl Drop for ObjectsClosure<'_, E> { fn drop(&mut self) { self.flush(); } diff --git a/src/util/test_util/mock_vm.rs b/src/util/test_util/mock_vm.rs index a0d9b5f868..cc687f4b1f 100644 --- a/src/util/test_util/mock_vm.rs +++ b/src/util/test_util/mock_vm.rs @@ -180,7 +180,6 @@ pub fn no_cleanup() {} /// /// These are not supported at the moment. As those will change the `MockVM` type, we will have /// to use macros to generate a new `MockVM` type when we custimize constants or associated types. - // The current implementation is not perfect, but at least it works, and it is easy enough to debug with. // I have tried different third-party libraries for mocking, and each has its own limitation. And // none of the libraries I tried can mock `VMBinding` and the associated traits out of box. Even after I attempted From cd2fe83dc84a264da17df81875517b7406ec7617 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 29 Nov 2024 17:17:14 +0800 Subject: [PATCH 2/2] Annotate mmap ranges using PR_SET_VMA (#1236) We demand that every invocation of `mmap` within mmtk-core to be accompanied with an "annotation" for the purpose of the mmap. On Linux, we will use `PR_SET_VMA_ANON_NAME` to set the attribute after `mmap` so that it can be seen in `/proc/pid/maps`. This will greatly improve the debugging experience. --- Cargo.toml | 7 + docs/userguide/src/migration/prefix.md | 16 ++ src/policy/lockfreeimmortalspace.rs | 24 +- src/policy/marksweepspace/malloc_ms/global.rs | 2 +- .../marksweepspace/malloc_ms/metadata.rs | 21 +- src/policy/sft_map.rs | 8 +- src/policy/space.rs | 45 ++-- src/policy/vmspace.rs | 2 +- src/util/heap/layout/byte_map_mmapper.rs | 44 +++- src/util/heap/layout/fragmented_mapper.rs | 35 ++- src/util/heap/layout/mmapper.rs | 29 +- src/util/memory.rs | 249 ++++++++++++++---- src/util/metadata/side_metadata/global.rs | 63 ++++- src/util/metadata/side_metadata/helpers.rs | 5 +- src/util/metadata/side_metadata/helpers_32.rs | 9 +- .../side_metadata/side_metadata_tests.rs | 17 +- src/util/raw_memory_freelist.rs | 10 +- .../mock_test_handle_mmap_conflict.rs | 16 +- .../mock_tests/mock_test_handle_mmap_oom.rs | 8 +- 19 files changed, 467 insertions(+), 143 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 20486c341c..f5c90ce936 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -179,6 +179,13 @@ count_live_bytes_in_gc = [] # capture the type names of work packets. bpftrace_workaround = [] +# Disable mmap annotations. +# All invocations of `mmap` in mmtk-core are accompanied by calls of `prctl` with +# `PR_SET_VMA_ANON_NAME` to annotate the mmap ranges with human-readable names. It is enabled by +# default and should work fine even with large heap sizes. However, if this is causing problems, +# users can disable such annotations by enabling this Cargo feature. +no_mmap_annotation = [] + # Do not modify the following line - ci-common.sh matches it # -- Mutally exclusive features -- # Only one feature from each group can be provided. Otherwise build will fail. diff --git a/docs/userguide/src/migration/prefix.md b/docs/userguide/src/migration/prefix.md index 51e8b0fc17..451fd5657c 100644 --- a/docs/userguide/src/migration/prefix.md +++ b/docs/userguide/src/migration/prefix.md @@ -30,6 +30,22 @@ Notes for the mmtk-core developers: +## 0.30.0 + +### mmap-related functions require annotation + +```admonish tldr +Memory-mapping functions in `mmtk::util::memory` now take an additional `MmapAnnotation` argument. +``` + +API changes: + +- module `util::memory` + + The following functions take an additional `MmapAnnotation` argument. + * `dzmmap` + * `dzmmap_noreplace` + * `mmap_noreserve` + ## 0.28.0 ### `handle_user_collection_request` returns `bool` diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index 05cf448001..6c6b842425 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -13,6 +13,7 @@ use crate::util::heap::gc_trigger::GCTrigger; use crate::util::heap::layout::vm_layout::vm_layout; use crate::util::heap::PageResource; use crate::util::heap::VMRequest; +use crate::util::memory::MmapAnnotation; use crate::util::memory::MmapStrategy; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSanity; @@ -241,15 +242,22 @@ impl LockFreeImmortalSpace { *args.options.transparent_hugepages, crate::util::memory::MmapProtection::ReadWrite, ); - crate::util::memory::dzmmap_noreplace(start, aligned_total_bytes, strategy).unwrap(); - if space + crate::util::memory::dzmmap_noreplace( + start, + aligned_total_bytes, + strategy, + &MmapAnnotation::Space { + name: space.get_name(), + }, + ) + .unwrap(); + space .metadata - .try_map_metadata_space(start, aligned_total_bytes) - .is_err() - { - // TODO(Javad): handle meta space allocation failure - panic!("failed to mmap meta memory"); - } + .try_map_metadata_space(start, aligned_total_bytes, space.get_name()) + .unwrap_or_else(|e| { + // TODO(Javad): handle meta space allocation failure + panic!("failed to mmap meta memory: {e}") + }); space } diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index d6a3124a1a..9424a5069a 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -439,7 +439,7 @@ impl MallocSpace { fn map_metadata_and_update_bound(&self, addr: Address, size: usize) { // Map the metadata space for the range [addr, addr + size) - map_meta_space(&self.metadata, addr, size); + map_meta_space(&self.metadata, addr, size, self.get_name()); // Update the bounds of the max and min chunk addresses seen -- this is used later in the sweep // Lockless compare-and-swap loops perform better than a locking variant diff --git a/src/policy/marksweepspace/malloc_ms/metadata.rs b/src/policy/marksweepspace/malloc_ms/metadata.rs index 7fb5b5738a..0bb208e9cd 100644 --- a/src/policy/marksweepspace/malloc_ms/metadata.rs +++ b/src/policy/marksweepspace/malloc_ms/metadata.rs @@ -76,7 +76,7 @@ fn is_meta_space_mapped_for_address(address: Address) -> bool { } /// Eagerly map the active chunk metadata surrounding `chunk_start` -fn map_active_chunk_metadata(chunk_start: Address) { +fn map_active_chunk_metadata(chunk_start: Address, space_name: &str) { debug_assert!(chunk_start.is_aligned_to(BYTES_IN_CHUNK)); // We eagerly map 16Gb worth of space for the chunk mark bytes on 64-bits // We require saturating subtractions in order to not overflow the chunk_start by @@ -99,16 +99,20 @@ fn map_active_chunk_metadata(chunk_start: Address) { chunk_start + (size / 2) ); - assert!( - CHUNK_METADATA.try_map_metadata_space(start, size).is_ok(), - "failed to mmap meta memory" - ); + CHUNK_METADATA + .try_map_metadata_space(start, size, space_name) + .unwrap_or_else(|e| panic!("failed to mmap meta memory: {e}")); } /// We map the active chunk metadata (if not previously mapped), as well as the VO bit metadata /// and active page metadata here. Note that if [addr, addr + size) crosses multiple chunks, we /// will map for each chunk. -pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size: usize) { +pub(super) fn map_meta_space( + metadata: &SideMetadataContext, + addr: Address, + size: usize, + space_name: &str, +) { // In order to prevent race conditions, we synchronize on the lock first and then // check if we need to map the active chunk metadata for `chunk_start` let _lock = CHUNK_MAP_LOCK.lock().unwrap(); @@ -118,7 +122,7 @@ pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size // Check if the chunk bit metadata is mapped. If it is not mapped, map it. // Note that the chunk bit metadata is global. It may have been mapped because other policy mapped it. if !is_chunk_mapped(start) { - map_active_chunk_metadata(start); + map_active_chunk_metadata(start, space_name); } // If we have set the chunk bit, return. This is needed just in case another thread has done this before @@ -131,7 +135,8 @@ pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size // Note that this might fail. For example, we have marked a chunk as active but later we freed all // the objects in it, and unset its chunk bit. However, we do not free its metadata. So for the chunk, // its chunk bit is mapped, but not marked, and all its local metadata is also mapped. - let mmap_metadata_result = metadata.try_map_metadata_space(start, BYTES_IN_CHUNK); + let mmap_metadata_result = + metadata.try_map_metadata_space(start, BYTES_IN_CHUNK, space_name); debug_assert!( mmap_metadata_result.is_ok(), "mmap sidemetadata failed for chunk_start ({})", diff --git a/src/policy/sft_map.rs b/src/policy/sft_map.rs index 8fccf9cd5f..c9db21830e 100644 --- a/src/policy/sft_map.rs +++ b/src/policy/sft_map.rs @@ -378,9 +378,11 @@ mod dense_chunk_map { global: vec![SFT_DENSE_CHUNK_MAP_INDEX], local: vec![], }; - if context.try_map_metadata_space(start, bytes).is_err() { - panic!("failed to mmap metadata memory"); - } + context + .try_map_metadata_space(start, bytes, "SFTDenseChunkMap") + .unwrap_or_else(|e| { + panic!("failed to mmap metadata memory: {e}"); + }); self.update(space, start, bytes); } diff --git a/src/policy/space.rs b/src/policy/space.rs index 8048009b20..d1dd36cede 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -144,12 +144,19 @@ pub trait Space: 'static + SFT + Sync + Downcast { if let Err(mmap_error) = self .common() .mmapper - .ensure_mapped(res.start, res.pages, self.common().mmap_strategy()) - .and( - self.common() - .metadata - .try_map_metadata_space(res.start, bytes), + .ensure_mapped( + res.start, + res.pages, + self.common().mmap_strategy(), + &memory::MmapAnnotation::Space { + name: self.get_name(), + }, ) + .and(self.common().metadata.try_map_metadata_space( + res.start, + bytes, + self.get_name(), + )) { memory::handle_mmap_error::(mmap_error, tls, res.start, bytes); } @@ -293,15 +300,13 @@ pub trait Space: 'static + SFT + Sync + Downcast { /// Ensure this space is marked as mapped -- used when the space is already /// mapped (e.g. for a vm image which is externally mmapped.) fn ensure_mapped(&self) { - if self - .common() + self.common() .metadata - .try_map_metadata_space(self.common().start, self.common().extent) - .is_err() - { - // TODO(Javad): handle meta space allocation failure - panic!("failed to mmap meta memory"); - } + .try_map_metadata_space(self.common().start, self.common().extent, self.get_name()) + .unwrap_or_else(|e| { + // TODO(Javad): handle meta space allocation failure + panic!("failed to mmap meta memory: {e}"); + }); self.common() .mmapper @@ -609,14 +614,12 @@ impl CommonSpace { } // For contiguous space, we know its address range so we reserve metadata memory for its range. - if rtn - .metadata - .try_map_metadata_address_range(rtn.start, rtn.extent) - .is_err() - { - // TODO(Javad): handle meta space allocation failure - panic!("failed to mmap meta memory"); - } + rtn.metadata + .try_map_metadata_address_range(rtn.start, rtn.extent, rtn.name) + .unwrap_or_else(|e| { + // TODO(Javad): handle meta space allocation failure + panic!("failed to mmap meta memory: {e}"); + }); debug!( "Created space {} [{}, {}) for {} bytes", diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index 60199c0fde..770c362cfd 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -229,7 +229,7 @@ impl VMSpace { // Map side metadata self.common .metadata - .try_map_metadata_space(chunk_start, chunk_size) + .try_map_metadata_space(chunk_start, chunk_size, self.get_name()) .unwrap(); // Insert to vm map: it would be good if we can make VM map aware of the region. However, the region may be outside what we can map in our VM map implementation. // self.common.vm_map.insert(chunk_start, chunk_size, self.common.descriptor); diff --git a/src/util/heap/layout/byte_map_mmapper.rs b/src/util/heap/layout/byte_map_mmapper.rs index ff5dc8c8fe..412e53ddcb 100644 --- a/src/util/heap/layout/byte_map_mmapper.rs +++ b/src/util/heap/layout/byte_map_mmapper.rs @@ -1,5 +1,6 @@ use super::mmapper::MapState; use super::Mmapper; +use crate::util::memory::MmapAnnotation; use crate::util::Address; use crate::util::constants::*; @@ -44,7 +45,13 @@ impl Mmapper for ByteMapMmapper { } } - fn ensure_mapped(&self, start: Address, pages: usize, strategy: MmapStrategy) -> Result<()> { + fn ensure_mapped( + &self, + start: Address, + pages: usize, + strategy: MmapStrategy, + anno: &MmapAnnotation, + ) -> Result<()> { let start_chunk = Self::address_to_mmap_chunks_down(start); let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages)); trace!( @@ -62,7 +69,8 @@ impl Mmapper for ByteMapMmapper { let mmap_start = Self::mmap_chunks_to_address(chunk); let _guard = self.lock.lock().unwrap(); - MapState::transition_to_mapped(&self.mapped[chunk], mmap_start, strategy).unwrap(); + MapState::transition_to_mapped(&self.mapped[chunk], mmap_start, strategy, anno) + .unwrap(); } Ok(()) @@ -73,6 +81,7 @@ impl Mmapper for ByteMapMmapper { start: Address, pages: usize, strategy: MmapStrategy, + anno: &MmapAnnotation, ) -> Result<()> { let start_chunk = Self::address_to_mmap_chunks_down(start); let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages)); @@ -91,7 +100,8 @@ impl Mmapper for ByteMapMmapper { let mmap_start = Self::mmap_chunks_to_address(chunk); let _guard = self.lock.lock().unwrap(); - MapState::transition_to_quarantined(&self.mapped[chunk], mmap_start, strategy).unwrap(); + MapState::transition_to_quarantined(&self.mapped[chunk], mmap_start, strategy, anno) + .unwrap(); } Ok(()) @@ -172,6 +182,7 @@ impl Default for ByteMapMmapper { #[cfg(test)] mod tests { use super::ByteMapMmapper; + use crate::mmap_anno_test; use crate::util::heap::layout::Mmapper; use crate::util::Address; @@ -237,7 +248,7 @@ mod tests { || { let mmapper = ByteMapMmapper::new(); mmapper - .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST) + .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!()) .unwrap(); for chunk in start_chunk..end_chunk { @@ -266,7 +277,7 @@ mod tests { || { let mmapper = ByteMapMmapper::new(); mmapper - .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST) + .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!()) .unwrap(); for chunk in start_chunk..end_chunk { @@ -295,7 +306,7 @@ mod tests { || { let mmapper = ByteMapMmapper::new(); mmapper - .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST) + .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!()) .unwrap(); let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS); @@ -329,7 +340,12 @@ mod tests { // map 2 chunks let mmapper = ByteMapMmapper::new(); mmapper - .ensure_mapped(FIXED_ADDRESS, test_memory_pages, MmapStrategy::TEST) + .ensure_mapped( + FIXED_ADDRESS, + test_memory_pages, + MmapStrategy::TEST, + mmap_anno_test!(), + ) .unwrap(); // protect 1 chunk @@ -364,7 +380,12 @@ mod tests { // map 2 chunks let mmapper = ByteMapMmapper::new(); mmapper - .ensure_mapped(FIXED_ADDRESS, test_memory_pages, MmapStrategy::TEST) + .ensure_mapped( + FIXED_ADDRESS, + test_memory_pages, + MmapStrategy::TEST, + mmap_anno_test!(), + ) .unwrap(); // protect 1 chunk @@ -382,7 +403,12 @@ mod tests { // ensure mapped - this will unprotect the previously protected chunk mmapper - .ensure_mapped(FIXED_ADDRESS, protect_memory_pages_2, MmapStrategy::TEST) + .ensure_mapped( + FIXED_ADDRESS, + protect_memory_pages_2, + MmapStrategy::TEST, + mmap_anno_test!(), + ) .unwrap(); assert_eq!( mmapper.mapped[chunk].load(Ordering::Relaxed), diff --git a/src/util/heap/layout/fragmented_mapper.rs b/src/util/heap/layout/fragmented_mapper.rs index 53ce26c441..0ebec07b83 100644 --- a/src/util/heap/layout/fragmented_mapper.rs +++ b/src/util/heap/layout/fragmented_mapper.rs @@ -3,7 +3,7 @@ use super::Mmapper; use crate::util::constants::BYTES_IN_PAGE; use crate::util::conversions; use crate::util::heap::layout::vm_layout::*; -use crate::util::memory::MmapStrategy; +use crate::util::memory::{MmapAnnotation, MmapStrategy}; use crate::util::Address; use atomic::{Atomic, Ordering}; use std::cell::UnsafeCell; @@ -94,6 +94,7 @@ impl Mmapper for FragmentedMapper { mut start: Address, pages: usize, strategy: MmapStrategy, + anno: &MmapAnnotation, ) -> Result<()> { debug_assert!(start.is_aligned_to(BYTES_IN_PAGE)); @@ -140,6 +141,7 @@ impl Mmapper for FragmentedMapper { state_slices.as_slice(), mmap_start, strategy, + anno, )?; } @@ -151,6 +153,7 @@ impl Mmapper for FragmentedMapper { mut start: Address, pages: usize, strategy: MmapStrategy, + anno: &MmapAnnotation, ) -> Result<()> { let end = start + conversions::pages_to_bytes(pages); // Iterate over the slabs covered @@ -176,7 +179,7 @@ impl Mmapper for FragmentedMapper { let mmap_start = Self::chunk_index_to_address(base, chunk); let _guard = self.lock.lock().unwrap(); - MapState::transition_to_mapped(entry, mmap_start, strategy)?; + MapState::transition_to_mapped(entry, mmap_start, strategy, anno)?; } start = high; } @@ -393,6 +396,7 @@ impl Default for FragmentedMapper { #[cfg(test)] mod tests { use super::*; + use crate::mmap_anno_test; use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::heap::layout::vm_layout::MMAP_CHUNK_BYTES; use crate::util::memory; @@ -446,7 +450,7 @@ mod tests { || { let mmapper = FragmentedMapper::new(); mmapper - .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST) + .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!()) .unwrap(); let chunks = pages_to_chunks_up(pages); @@ -474,7 +478,7 @@ mod tests { || { let mmapper = FragmentedMapper::new(); mmapper - .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST) + .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!()) .unwrap(); let chunks = pages_to_chunks_up(pages); @@ -503,7 +507,7 @@ mod tests { || { let mmapper = FragmentedMapper::new(); mmapper - .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST) + .ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!()) .unwrap(); let chunks = pages_to_chunks_up(pages); @@ -533,7 +537,12 @@ mod tests { let mmapper = FragmentedMapper::new(); let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; mmapper - .ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, MmapStrategy::TEST) + .ensure_mapped( + FIXED_ADDRESS, + pages_per_chunk * 2, + MmapStrategy::TEST, + mmap_anno_test!(), + ) .unwrap(); // protect 1 chunk @@ -564,7 +573,12 @@ mod tests { let mmapper = FragmentedMapper::new(); let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize; mmapper - .ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, MmapStrategy::TEST) + .ensure_mapped( + FIXED_ADDRESS, + pages_per_chunk * 2, + MmapStrategy::TEST, + mmap_anno_test!(), + ) .unwrap(); // protect 1 chunk @@ -581,7 +595,12 @@ mod tests { // ensure mapped - this will unprotect the previously protected chunk mmapper - .ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, MmapStrategy::TEST) + .ensure_mapped( + FIXED_ADDRESS, + pages_per_chunk * 2, + MmapStrategy::TEST, + mmap_anno_test!(), + ) .unwrap(); assert_eq!( get_chunk_map_state(&mmapper, FIXED_ADDRESS), diff --git a/src/util/heap/layout/mmapper.rs b/src/util/heap/layout/mmapper.rs index 9039bfe261..364cc0bc0d 100644 --- a/src/util/heap/layout/mmapper.rs +++ b/src/util/heap/layout/mmapper.rs @@ -32,11 +32,15 @@ pub trait Mmapper: Sync { /// Arguments: /// * `start`: Address of the first page to be quarantined /// * `bytes`: Number of bytes to quarantine from the start + /// * `strategy`: The mmap strategy. The `prot` field is ignored because we always use + /// `PROT_NONE`. + /// * `anno`: Human-readable annotation to apply to newly mapped memory ranges. fn quarantine_address_range( &self, start: Address, pages: usize, strategy: MmapStrategy, + anno: &MmapAnnotation, ) -> Result<()>; /// Ensure that a range of pages is mmapped (or equivalent). If the @@ -46,10 +50,18 @@ pub trait Mmapper: Sync { /// Arguments: /// * `start`: The start of the range to be mapped. /// * `pages`: The size of the range to be mapped, in pages + /// * `strategy`: The mmap strategy. + /// * `anno`: Human-readable annotation to apply to newly mapped memory ranges. // NOTE: There is a monotonicity assumption so that only updates require lock // acquisition. // TODO: Fix the above to support unmapping. - fn ensure_mapped(&self, start: Address, pages: usize, strategy: MmapStrategy) -> Result<()>; + fn ensure_mapped( + &self, + start: Address, + pages: usize, + strategy: MmapStrategy, + anno: &MmapAnnotation, + ) -> Result<()>; /// Is the page pointed to by this address mapped? Returns true if /// the page at the given address is mapped. @@ -88,6 +100,7 @@ impl MapState { state: &Atomic, mmap_start: Address, strategy: MmapStrategy, + anno: &MmapAnnotation, ) -> Result<()> { trace!( "Trying to map {} - {}", @@ -95,9 +108,11 @@ impl MapState { mmap_start + MMAP_CHUNK_BYTES ); let res = match state.load(Ordering::Relaxed) { - MapState::Unmapped => dzmmap_noreplace(mmap_start, MMAP_CHUNK_BYTES, strategy), + MapState::Unmapped => dzmmap_noreplace(mmap_start, MMAP_CHUNK_BYTES, strategy, anno), MapState::Protected => munprotect(mmap_start, MMAP_CHUNK_BYTES, strategy.prot), - MapState::Quarantined => unsafe { dzmmap(mmap_start, MMAP_CHUNK_BYTES, strategy) }, + MapState::Quarantined => unsafe { + dzmmap(mmap_start, MMAP_CHUNK_BYTES, strategy, anno) + }, // might have become MapState::Mapped here MapState::Mapped => Ok(()), }; @@ -113,6 +128,7 @@ impl MapState { state: &Atomic, mmap_start: Address, strategy: MmapStrategy, + anno: &MmapAnnotation, ) -> Result<()> { trace!( "Trying to quarantine {} - {}", @@ -120,7 +136,7 @@ impl MapState { mmap_start + MMAP_CHUNK_BYTES ); let res = match state.load(Ordering::Relaxed) { - MapState::Unmapped => mmap_noreserve(mmap_start, MMAP_CHUNK_BYTES, strategy), + MapState::Unmapped => mmap_noreserve(mmap_start, MMAP_CHUNK_BYTES, strategy, anno), MapState::Quarantined => Ok(()), MapState::Mapped => { // If a chunk is mapped by us and we try to quarantine it, we simply don't do anything. @@ -153,10 +169,13 @@ impl MapState { /// /// * `state_slices`: A slice of slices. Each inner slice is a part of a `Slab`. /// * `mmap_start`: The start of the region to transition. + /// * `strategy`: The mmap strategy. + /// * `anno`: Human-readable annotation to apply to newly mapped memory ranges. pub(super) fn bulk_transition_to_quarantined( state_slices: &[&[Atomic]], mmap_start: Address, strategy: MmapStrategy, + anno: &MmapAnnotation, ) -> Result<()> { trace!( "Trying to bulk-quarantine {} - {}", @@ -179,7 +198,7 @@ impl MapState { match group.key { MapState::Unmapped => { trace!("Trying to quarantine {} - {}", start_addr, end_addr); - mmap_noreserve(start_addr, end_addr - start_addr, strategy)?; + mmap_noreserve(start_addr, end_addr - start_addr, strategy, anno)?; for state in group { state.store(MapState::Quarantined, Ordering::Relaxed); diff --git a/src/util/memory.rs b/src/util/memory.rs index 771068aaff..0e689be6b9 100644 --- a/src/util/memory.rs +++ b/src/util/memory.rs @@ -84,6 +84,76 @@ pub enum HugePageSupport { TransparentHugePages, } +/// Annotation for an mmap entry. +/// +/// Invocations of `mmap_fixed` and other functions that may transitively call `mmap_fixed` +/// require an annotation that indicates the purpose of the memory mapping. +/// +/// This is for debugging. On Linux, mmtk-core will use `prctl` with `PR_SET_VMA` to set the +/// human-readable name for the given mmap region. The annotation is ignored on other platforms. +/// +/// Note that when using `Map32` (even when running on 64-bit architectures), the discontiguous +/// memory range is shared between different spaces. Spaces may use `mmap` to map new chunks, but +/// the same chunk may later be reused by other spaces. The annotation only applies when `mmap` is +/// called for a chunk for the first time, which reflects which space first attempted the mmap, not +/// which space is currently using the chunk. Use `crate::policy::space::print_vm_map` to print a +/// more accurate mapping between address ranges and spaces. +/// +/// On 32-bit architecture, side metadata are allocated in a chunked fasion. One single `mmap` +/// region will contain many different metadata. In that case, we simply annotate the whole region +/// with a `MmapAnnotation::SideMeta` where `meta` is `"all"`. +pub enum MmapAnnotation<'a> { + /// The mmap is for a space. + Space { + /// The name of the space. + name: &'a str, + }, + /// The mmap is for a side metadata. + SideMeta { + /// The name of the space. + space: &'a str, + /// The name of the side metadata. + meta: &'a str, + }, + /// The mmap is for a test case. Usually constructed using the [`mmap_anno_test!`] macro. + Test { + /// The source file. + file: &'a str, + /// The line number. + line: u32, + }, + /// For all other use cases. + Misc { + /// A human-readable descriptive name. + name: &'a str, + }, +} + +/// Construct an `MmapAnnotation::Test` with the current file name and line number. +#[macro_export] +macro_rules! mmap_anno_test { + () => { + &$crate::util::memory::MmapAnnotation::Test { + file: file!(), + line: line!(), + } + }; +} + +// Export this to external crates +pub use mmap_anno_test; + +impl<'a> std::fmt::Display for MmapAnnotation<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MmapAnnotation::Space { name } => write!(f, "mmtk:space:{name}"), + MmapAnnotation::SideMeta { space, meta } => write!(f, "mmtk:sidemeta:{space}:{meta}"), + MmapAnnotation::Test { file, line } => write!(f, "mmtk:test:{file}:{line}"), + MmapAnnotation::Misc { name } => write!(f, "mmtk:misc:{name}"), + } + } +} + /// Check the result from an mmap function in this module. /// Return true if the mmap has failed due to an existing conflicting mapping. pub(crate) fn result_is_mapped(result: Result<()>) -> bool { @@ -115,9 +185,14 @@ pub fn set(start: Address, val: u8, len: usize) { /// the memory has been reserved by mmtk (e.g. after the use of mmap_noreserve()). Otherwise using this function /// may corrupt others' data. #[allow(clippy::let_and_return)] // Zeroing is not neceesary for some OS/s -pub unsafe fn dzmmap(start: Address, size: usize, strategy: MmapStrategy) -> Result<()> { +pub unsafe fn dzmmap( + start: Address, + size: usize, + strategy: MmapStrategy, + anno: &MmapAnnotation, +) -> Result<()> { let flags = libc::MAP_ANON | libc::MAP_PRIVATE | libc::MAP_FIXED; - let ret = mmap_fixed(start, size, flags, strategy); + let ret = mmap_fixed(start, size, flags, strategy, anno); // We do not need to explicitly zero for Linux (memory is guaranteed to be zeroed) #[cfg(not(target_os = "linux"))] if ret.is_ok() { @@ -129,9 +204,14 @@ pub unsafe fn dzmmap(start: Address, size: usize, strategy: MmapStrategy) -> Res /// This function mmaps the memory and guarantees to zero all mapped memory. /// This function will not overwrite existing memory mapping, and it will result Err if there is an existing mapping. #[allow(clippy::let_and_return)] // Zeroing is not neceesary for some OS/s -pub fn dzmmap_noreplace(start: Address, size: usize, strategy: MmapStrategy) -> Result<()> { +pub fn dzmmap_noreplace( + start: Address, + size: usize, + strategy: MmapStrategy, + anno: &MmapAnnotation, +) -> Result<()> { let flags = MMAP_FLAGS; - let ret = mmap_fixed(start, size, flags, strategy); + let ret = mmap_fixed(start, size, flags, strategy, anno); // We do not need to explicitly zero for Linux (memory is guaranteed to be zeroed) #[cfg(not(target_os = "linux"))] if ret.is_ok() { @@ -144,10 +224,15 @@ pub fn dzmmap_noreplace(start: Address, size: usize, strategy: MmapStrategy) -> /// This function does not reserve swap space for this mapping, which means there is no guarantee that writes to the /// mapping can always be successful. In case of out of physical memory, one may get a segfault for writing to the mapping. /// We can use this to reserve the address range, and then later overwrites the mapping with dzmmap(). -pub fn mmap_noreserve(start: Address, size: usize, mut strategy: MmapStrategy) -> Result<()> { +pub fn mmap_noreserve( + start: Address, + size: usize, + mut strategy: MmapStrategy, + anno: &MmapAnnotation, +) -> Result<()> { strategy.prot = MmapProtection::NoAccess; let flags = MMAP_FLAGS | libc::MAP_NORESERVE; - mmap_fixed(start, size, flags, strategy) + mmap_fixed(start, size, flags, strategy, anno) } fn mmap_fixed( @@ -155,6 +240,7 @@ fn mmap_fixed( size: usize, flags: libc::c_int, strategy: MmapStrategy, + _anno: &MmapAnnotation, ) -> Result<()> { let ptr = start.to_mut_ptr(); let prot = strategy.prot.into_native_flags(); @@ -162,6 +248,37 @@ fn mmap_fixed( &|| unsafe { libc::mmap(start.to_mut_ptr(), size, prot, flags, -1, 0) }, ptr, )?; + + #[cfg(all( + any(target_os = "linux", target_os = "android"), + not(feature = "no_mmap_annotation") + ))] + { + // `PR_SET_VMA` is new in Linux 5.17. We compile against a version of the `libc` crate that + // has the `PR_SET_VMA_ANON_NAME` constant. When runnning on an older kernel, it will not + // recognize this attribute and will return `EINVAL`. However, `prctl` may return `EINVAL` + // for other reasons, too. That includes `start` being an invalid address, and the + // formatted `anno_cstr` being longer than 80 bytes including the trailing `'\0'`. But + // since this prctl is used for debugging, we log the error instead of panicking. + let anno_str = _anno.to_string(); + let anno_cstr = std::ffi::CString::new(anno_str).unwrap(); + let result = wrap_libc_call( + &|| unsafe { + libc::prctl( + libc::PR_SET_VMA, + libc::PR_SET_VMA_ANON_NAME, + start.to_ptr::(), + size, + anno_cstr.as_ptr(), + ) + }, + 0, + ); + if let Err(e) = result { + debug!("Error while calling prctl: {e}"); + } + } + match strategy.huge_page { HugePageSupport::No => Ok(()), HugePageSupport::TransparentHugePages => { @@ -229,39 +346,36 @@ pub fn handle_mmap_error( } /// Checks if the memory has already been mapped. If not, we panic. +/// /// Note that the checking has a side effect that it will map the memory if it was unmapped. So we panic if it was unmapped. /// Be very careful about using this function. -#[cfg(target_os = "linux")] -pub(crate) fn panic_if_unmapped(start: Address, size: usize) { - let flags = MMAP_FLAGS; - match mmap_fixed( - start, - size, - flags, - MmapStrategy { - huge_page: HugePageSupport::No, - prot: MmapProtection::ReadWrite, - }, - ) { - Ok(_) => panic!("{} of size {} is not mapped", start, size), - Err(e) => { - assert!( - e.kind() == std::io::ErrorKind::AlreadyExists, - "Failed to check mapped: {:?}", - e - ); - } - } -} - -/// Checks if the memory has already been mapped. If not, we panic. +/// /// This function is currently left empty for non-linux, and should be implemented in the future. /// As the function is only used for assertions, MMTk will still run even if we never panic. -#[cfg(not(target_os = "linux"))] -pub(crate) fn panic_if_unmapped(_start: Address, _size: usize) { - // This is only used for assertions, so MMTk will still run even if we never panic. - // TODO: We need a proper implementation for this. As we do not have MAP_FIXED_NOREPLACE, we cannot use the same implementation as Linux. - // Possibly we can use posix_mem_offset for both OS/s. +pub(crate) fn panic_if_unmapped(_start: Address, _size: usize, _anno: &MmapAnnotation) { + #[cfg(target_os = "linux")] + { + let flags = MMAP_FLAGS; + match mmap_fixed( + _start, + _size, + flags, + MmapStrategy { + huge_page: HugePageSupport::No, + prot: MmapProtection::ReadWrite, + }, + _anno, + ) { + Ok(_) => panic!("{} of size {} is not mapped", _start, _size), + Err(e) => { + assert!( + e.kind() == std::io::ErrorKind::AlreadyExists, + "Failed to check mapped: {:?}", + e + ); + } + } + } } /// Unprotect the given memory (in page granularity) to allow access (PROT_READ/WRITE/EXEC). @@ -371,10 +485,14 @@ mod tests { serial_test(|| { with_cleanup( || { - let res = unsafe { dzmmap(START, BYTES_IN_PAGE, MmapStrategy::TEST) }; + let res = unsafe { + dzmmap(START, BYTES_IN_PAGE, MmapStrategy::TEST, mmap_anno_test!()) + }; assert!(res.is_ok()); // We can overwrite with dzmmap - let res = unsafe { dzmmap(START, BYTES_IN_PAGE, MmapStrategy::TEST) }; + let res = unsafe { + dzmmap(START, BYTES_IN_PAGE, MmapStrategy::TEST, mmap_anno_test!()) + }; assert!(res.is_ok()); }, || { @@ -389,7 +507,12 @@ mod tests { serial_test(|| { with_cleanup( || { - let res = dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::TEST); + let res = dzmmap_noreplace( + START, + BYTES_IN_PAGE, + MmapStrategy::TEST, + mmap_anno_test!(), + ); assert!(res.is_ok()); let res = munmap(START, BYTES_IN_PAGE); assert!(res.is_ok()); @@ -408,10 +531,17 @@ mod tests { with_cleanup( || { // Make sure we mmapped the memory - let res = unsafe { dzmmap(START, BYTES_IN_PAGE, MmapStrategy::TEST) }; + let res = unsafe { + dzmmap(START, BYTES_IN_PAGE, MmapStrategy::TEST, mmap_anno_test!()) + }; assert!(res.is_ok()); // Use dzmmap_noreplace will fail - let res = dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::TEST); + let res = dzmmap_noreplace( + START, + BYTES_IN_PAGE, + MmapStrategy::TEST, + mmap_anno_test!(), + ); assert!(res.is_err()); }, || { @@ -426,10 +556,13 @@ mod tests { serial_test(|| { with_cleanup( || { - let res = mmap_noreserve(START, BYTES_IN_PAGE, MmapStrategy::TEST); + let res = + mmap_noreserve(START, BYTES_IN_PAGE, MmapStrategy::TEST, mmap_anno_test!()); assert!(res.is_ok()); // Try reserve it - let res = unsafe { dzmmap(START, BYTES_IN_PAGE, MmapStrategy::TEST) }; + let res = unsafe { + dzmmap(START, BYTES_IN_PAGE, MmapStrategy::TEST, mmap_anno_test!()) + }; assert!(res.is_ok()); }, || { @@ -447,7 +580,7 @@ mod tests { with_cleanup( || { // We expect this call to panic - panic_if_unmapped(START, BYTES_IN_PAGE); + panic_if_unmapped(START, BYTES_IN_PAGE, mmap_anno_test!()); }, || { assert!(munmap(START, BYTES_IN_PAGE).is_ok()); @@ -461,8 +594,14 @@ mod tests { serial_test(|| { with_cleanup( || { - assert!(dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::TEST).is_ok()); - panic_if_unmapped(START, BYTES_IN_PAGE); + assert!(dzmmap_noreplace( + START, + BYTES_IN_PAGE, + MmapStrategy::TEST, + mmap_anno_test!() + ) + .is_ok()); + panic_if_unmapped(START, BYTES_IN_PAGE, mmap_anno_test!()); }, || { assert!(munmap(START, BYTES_IN_PAGE).is_ok()); @@ -479,10 +618,16 @@ mod tests { with_cleanup( || { // map 1 page from START - assert!(dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::TEST).is_ok()); + assert!(dzmmap_noreplace( + START, + BYTES_IN_PAGE, + MmapStrategy::TEST, + mmap_anno_test!(), + ) + .is_ok()); // check if the next page is mapped - which should panic - panic_if_unmapped(START + BYTES_IN_PAGE, BYTES_IN_PAGE); + panic_if_unmapped(START + BYTES_IN_PAGE, BYTES_IN_PAGE, mmap_anno_test!()); }, || { assert!(munmap(START, BYTES_IN_PAGE * 2).is_ok()); @@ -501,10 +646,16 @@ mod tests { with_cleanup( || { // map 1 page from START - assert!(dzmmap_noreplace(START, BYTES_IN_PAGE, MmapStrategy::TEST).is_ok()); + assert!(dzmmap_noreplace( + START, + BYTES_IN_PAGE, + MmapStrategy::TEST, + mmap_anno_test!() + ) + .is_ok()); // check if the 2 pages from START are mapped. The second page is unmapped, so it should panic. - panic_if_unmapped(START, BYTES_IN_PAGE * 2); + panic_if_unmapped(START, BYTES_IN_PAGE * 2, mmap_anno_test!()); }, || { assert!(munmap(START, BYTES_IN_PAGE * 2).is_ok()); diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index 2a64b33848..b36a1332ad 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -2,7 +2,7 @@ use super::*; use crate::util::constants::{BYTES_IN_PAGE, BYTES_IN_WORD, LOG_BITS_IN_BYTE}; use crate::util::conversions::raw_align_up; use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; -use crate::util::memory; +use crate::util::memory::{self, MmapAnnotation}; use crate::util::metadata::metadata_val_traits::*; #[cfg(feature = "vo_bit")] use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; @@ -122,7 +122,13 @@ impl SideMetadataSpec { meta_start ); - memory::panic_if_unmapped(meta_start, BYTES_IN_PAGE); + memory::panic_if_unmapped( + meta_start, + BYTES_IN_PAGE, + &MmapAnnotation::Misc { + name: "assert_metadata_mapped", + }, + ); } /// Used only for debugging. @@ -1371,7 +1377,12 @@ impl SideMetadataContext { /// Tries to map the required metadata space and returns `true` is successful. /// This can be called at page granularity. - pub fn try_map_metadata_space(&self, start: Address, size: usize) -> Result<()> { + pub fn try_map_metadata_space( + &self, + start: Address, + size: usize, + space_name: &str, + ) -> Result<()> { debug!( "try_map_metadata_space({}, 0x{:x}, {}, {})", start, @@ -1382,14 +1393,19 @@ impl SideMetadataContext { // Page aligned debug_assert!(start.is_aligned_to(BYTES_IN_PAGE)); debug_assert!(size % BYTES_IN_PAGE == 0); - self.map_metadata_internal(start, size, false) + self.map_metadata_internal(start, size, false, space_name) } /// Tries to map the required metadata address range, without reserving swap-space/physical memory for it. /// This will make sure the address range is exclusive to the caller. This should be called at chunk granularity. /// /// NOTE: Accessing addresses in this range will produce a segmentation fault if swap-space is not mapped using the `try_map_metadata_space` function. - pub fn try_map_metadata_address_range(&self, start: Address, size: usize) -> Result<()> { + pub fn try_map_metadata_address_range( + &self, + start: Address, + size: usize, + name: &str, + ) -> Result<()> { debug!( "try_map_metadata_address_range({}, 0x{:x}, {}, {})", start, @@ -1400,7 +1416,7 @@ impl SideMetadataContext { // Chunk aligned debug_assert!(start.is_aligned_to(BYTES_IN_CHUNK)); debug_assert!(size % BYTES_IN_CHUNK == 0); - self.map_metadata_internal(start, size, true) + self.map_metadata_internal(start, size, true, name) } /// The internal function to mmap metadata @@ -1409,9 +1425,20 @@ impl SideMetadataContext { /// * `start` - The starting address of the source data. /// * `size` - The size of the source data (in bytes). /// * `no_reserve` - whether to invoke mmap with a noreserve flag (we use this flag to quarantine address range) - fn map_metadata_internal(&self, start: Address, size: usize, no_reserve: bool) -> Result<()> { + /// * `space_name`: The name of the space, used for annotating the mmap. + fn map_metadata_internal( + &self, + start: Address, + size: usize, + no_reserve: bool, + space_name: &str, + ) -> Result<()> { for spec in self.global.iter() { - match try_mmap_contiguous_metadata_space(start, size, spec, no_reserve) { + let anno = MmapAnnotation::SideMeta { + space: space_name, + meta: spec.name, + }; + match try_mmap_contiguous_metadata_space(start, size, spec, no_reserve, &anno) { Ok(_) => {} Err(e) => return Result::Err(e), } @@ -1434,7 +1461,11 @@ impl SideMetadataContext { // address space size as the current not-chunked approach. #[cfg(target_pointer_width = "64")] { - match try_mmap_contiguous_metadata_space(start, size, spec, no_reserve) { + let anno = MmapAnnotation::SideMeta { + space: space_name, + meta: spec.name, + }; + match try_mmap_contiguous_metadata_space(start, size, spec, no_reserve, &anno) { Ok(_) => {} Err(e) => return Result::Err(e), } @@ -1454,7 +1485,13 @@ impl SideMetadataContext { lsize, max ); - match try_map_per_chunk_metadata_space(start, size, lsize, no_reserve) { + // We are creating a mmap for all side metadata instead of one specific metadata. We + // just annotate it as "all" here. + let anno = MmapAnnotation::SideMeta { + space: space_name, + meta: "all", + }; + match try_map_per_chunk_metadata_space(start, size, lsize, no_reserve, &anno) { Ok(_) => {} Err(e) => return Result::Err(e), } @@ -1556,6 +1593,7 @@ impl MetadataByteArrayRef { #[cfg(test)] mod tests { use super::*; + use crate::mmap_anno_test; use crate::util::metadata::side_metadata::SideMetadataContext; // offset is not used in these tests. @@ -1636,12 +1674,13 @@ mod tests { let data_addr = vm_layout::vm_layout().heap_start; // Make sure the address is mapped. crate::MMAPPER - .ensure_mapped(data_addr, 1, MmapStrategy::TEST) + .ensure_mapped(data_addr, 1, MmapStrategy::TEST, mmap_anno_test!()) .unwrap(); let meta_addr = address_to_meta_address(&spec, data_addr); with_cleanup( || { - let mmap_result = context.try_map_metadata_space(data_addr, BYTES_IN_PAGE); + let mmap_result = + context.try_map_metadata_space(data_addr, BYTES_IN_PAGE, "test_space"); assert!(mmap_result.is_ok()); f(&spec, data_addr, meta_addr); diff --git a/src/util/metadata/side_metadata/helpers.rs b/src/util/metadata/side_metadata/helpers.rs index 0f0a442d9a..c8dc979764 100644 --- a/src/util/metadata/side_metadata/helpers.rs +++ b/src/util/metadata/side_metadata/helpers.rs @@ -3,7 +3,7 @@ use super::SideMetadataSpec; use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::constants::{BITS_IN_WORD, BYTES_IN_PAGE, LOG_BITS_IN_BYTE}; use crate::util::heap::layout::vm_layout::VMLayout; -use crate::util::memory::MmapStrategy; +use crate::util::memory::{MmapAnnotation, MmapStrategy}; #[cfg(target_pointer_width = "32")] use crate::util::metadata::side_metadata::address_to_chunked_meta_address; use crate::util::Address; @@ -126,6 +126,7 @@ pub(super) fn try_mmap_contiguous_metadata_space( size: usize, spec: &SideMetadataSpec, no_reserve: bool, + anno: &MmapAnnotation, ) -> Result { debug_assert!(start.is_aligned_to(BYTES_IN_PAGE)); debug_assert!(size % BYTES_IN_PAGE == 0); @@ -142,12 +143,14 @@ pub(super) fn try_mmap_contiguous_metadata_space( mmap_start, mmap_size >> LOG_BYTES_IN_PAGE, MmapStrategy::SIDE_METADATA, + anno, ) } else { MMAPPER.quarantine_address_range( mmap_start, mmap_size >> LOG_BYTES_IN_PAGE, MmapStrategy::SIDE_METADATA, + anno, ) } .map(|_| mmap_size) diff --git a/src/util/metadata/side_metadata/helpers_32.rs b/src/util/metadata/side_metadata/helpers_32.rs index ef413c0cc6..1372d5916c 100644 --- a/src/util/metadata/side_metadata/helpers_32.rs +++ b/src/util/metadata/side_metadata/helpers_32.rs @@ -2,7 +2,8 @@ use super::SideMetadataSpec; use crate::util::{ constants::{self, LOG_BITS_IN_BYTE}, heap::layout::vm_layout::{BYTES_IN_CHUNK, CHUNK_MASK, LOG_BYTES_IN_CHUNK}, - memory, Address, + memory::{self, MmapAnnotation}, + Address, }; use std::io::Result; @@ -111,6 +112,7 @@ pub(super) fn try_map_per_chunk_metadata_space( size: usize, local_per_chunk: usize, no_reserve: bool, + anno: &MmapAnnotation, ) -> Result { let mut aligned_start = start.align_down(BYTES_IN_CHUNK); let aligned_end = (start + size).align_up(BYTES_IN_CHUNK); @@ -121,7 +123,7 @@ pub(super) fn try_map_per_chunk_metadata_space( let mut total_mapped = 0; while aligned_start < aligned_end { - let res = try_mmap_metadata_chunk(aligned_start, local_per_chunk, no_reserve); + let res = try_mmap_metadata_chunk(aligned_start, local_per_chunk, no_reserve, anno); if res.is_err() { if munmap_first_chunk.is_some() { let mut munmap_start = if munmap_first_chunk.unwrap() { @@ -174,6 +176,7 @@ pub(super) fn try_mmap_metadata_chunk( start: Address, local_per_chunk: usize, no_reserve: bool, + anno: &MmapAnnotation, ) -> Result<()> { debug_assert!(start.is_aligned_to(BYTES_IN_CHUNK)); @@ -185,12 +188,14 @@ pub(super) fn try_mmap_metadata_chunk( policy_meta_start, pages, memory::MmapStrategy::SIDE_METADATA, + anno, ) } else { MMAPPER.quarantine_address_range( policy_meta_start, pages, memory::MmapStrategy::SIDE_METADATA, + anno, ) } } diff --git a/src/util/metadata/side_metadata/side_metadata_tests.rs b/src/util/metadata/side_metadata/side_metadata_tests.rs index ab1abe01d3..890ab1fdd3 100644 --- a/src/util/metadata/side_metadata/side_metadata_tests.rs +++ b/src/util/metadata/side_metadata/side_metadata_tests.rs @@ -230,7 +230,7 @@ mod tests { metadata_sanity.verify_metadata_context("NoPolicy", &metadata); assert!(metadata - .try_map_metadata_space(heap_start, constants::BYTES_IN_PAGE,) + .try_map_metadata_space(heap_start, constants::BYTES_IN_PAGE, "test_space") .is_ok()); gspec.assert_metadata_mapped(heap_start); @@ -259,6 +259,7 @@ mod tests { .try_map_metadata_space( heap_start + vm_layout::BYTES_IN_CHUNK, vm_layout::BYTES_IN_CHUNK, + "test_space", ) .is_ok()); @@ -313,7 +314,7 @@ mod tests { metadata_sanity.verify_metadata_context("NoPolicy", &metadata); assert!(metadata - .try_map_metadata_space(data_addr, constants::BYTES_IN_PAGE,) + .try_map_metadata_space(data_addr, constants::BYTES_IN_PAGE, "test_space",) .is_ok()); let zero = @@ -380,7 +381,7 @@ mod tests { metadata_sanity.verify_metadata_context("NoPolicy", &metadata); assert!(metadata - .try_map_metadata_space(data_addr, constants::BYTES_IN_PAGE,) + .try_map_metadata_space(data_addr, constants::BYTES_IN_PAGE, "test_space",) .is_ok()); let zero = @@ -437,7 +438,7 @@ mod tests { metadata_sanity.verify_metadata_context("NoPolicy", &metadata); assert!(metadata - .try_map_metadata_space(data_addr, constants::BYTES_IN_PAGE,) + .try_map_metadata_space(data_addr, constants::BYTES_IN_PAGE, "test_space",) .is_ok()); let zero = @@ -519,7 +520,7 @@ mod tests { metadata_sanity.verify_metadata_context("NoPolicy", &metadata); assert!(metadata - .try_map_metadata_space(data_addr, constants::BYTES_IN_PAGE,) + .try_map_metadata_space(data_addr, constants::BYTES_IN_PAGE, "test_space",) .is_ok()); let zero = @@ -587,7 +588,7 @@ mod tests { metadata_sanity.verify_metadata_context("NoPolicy", &metadata); assert!(metadata - .try_map_metadata_space(data_addr, constants::BYTES_IN_PAGE,) + .try_map_metadata_space(data_addr, constants::BYTES_IN_PAGE, "test_space",) .is_ok()); // First 9 regions @@ -644,7 +645,7 @@ mod tests { metadata_sanity.verify_metadata_context("NoPolicy", &metadata); assert!(metadata - .try_map_metadata_space(data_addr, constants::BYTES_IN_PAGE,) + .try_map_metadata_space(data_addr, constants::BYTES_IN_PAGE, "test_space",) .is_ok()); // First 9 regions @@ -753,7 +754,7 @@ mod tests { metadata_sanity.verify_metadata_context("NoPolicy", &metadata); metadata - .try_map_metadata_space(data_addr, total_size) + .try_map_metadata_space(data_addr, total_size, "test_space") .unwrap(); metadata_1_spec.bzero_metadata(data_addr, total_size); diff --git a/src/util/raw_memory_freelist.rs b/src/util/raw_memory_freelist.rs index 0b496ab47e..23f6844ed7 100644 --- a/src/util/raw_memory_freelist.rs +++ b/src/util/raw_memory_freelist.rs @@ -3,6 +3,7 @@ use super::memory::MmapStrategy; use crate::util::address::Address; use crate::util::constants::*; use crate::util::conversions; +use crate::util::memory::MmapAnnotation; /** log2 of the number of bits used by a free list entry (two entries per unit) */ const LOG_ENTRY_BITS: usize = LOG_BITS_IN_INT as _; @@ -198,7 +199,14 @@ impl RawMemoryFreeList { } fn mmap(&self, start: Address, bytes: usize) { - let res = super::memory::dzmmap_noreplace(start, bytes, self.strategy); + let res = super::memory::dzmmap_noreplace( + start, + bytes, + self.strategy, + &MmapAnnotation::Misc { + name: "RawMemoryFreeList", + }, + ); assert!(res.is_ok(), "Can't get more space with mmap()"); } pub fn get_limit(&self) -> Address { diff --git a/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs b/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs index b4cc7be194..1dc6a5759e 100644 --- a/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs +++ b/src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs @@ -11,13 +11,21 @@ pub fn test_handle_mmap_conflict() { || { let start = unsafe { Address::from_usize(0x100_0000) }; let one_megabyte = 1000000; - let mmap1_res = - memory::dzmmap_noreplace(start, one_megabyte, memory::MmapStrategy::TEST); + let mmap1_res = memory::dzmmap_noreplace( + start, + one_megabyte, + memory::MmapStrategy::TEST, + memory::mmap_anno_test!(), + ); assert!(mmap1_res.is_ok()); let panic_res = std::panic::catch_unwind(|| { - let mmap2_res = - memory::dzmmap_noreplace(start, one_megabyte, memory::MmapStrategy::TEST); + let mmap2_res = memory::dzmmap_noreplace( + start, + one_megabyte, + memory::MmapStrategy::TEST, + memory::mmap_anno_test!(), + ); assert!(mmap2_res.is_err()); memory::handle_mmap_error::( mmap2_res.err().unwrap(), diff --git a/src/vm/tests/mock_tests/mock_test_handle_mmap_oom.rs b/src/vm/tests/mock_tests/mock_test_handle_mmap_oom.rs index 3e23db0fcb..b810c47a43 100644 --- a/src/vm/tests/mock_tests/mock_test_handle_mmap_oom.rs +++ b/src/vm/tests/mock_tests/mock_test_handle_mmap_oom.rs @@ -18,8 +18,12 @@ pub fn test_handle_mmap_oom() { let start = unsafe { Address::from_usize(0x100_0000) }; // mmap 1 terabyte memory - we expect this will fail due to out of memory. // If that's not the case, increase the size we mmap. - let mmap_res = - memory::dzmmap_noreplace(start, LARGE_SIZE, memory::MmapStrategy::TEST); + let mmap_res = memory::dzmmap_noreplace( + start, + LARGE_SIZE, + memory::MmapStrategy::TEST, + memory::mmap_anno_test!(), + ); memory::handle_mmap_error::( mmap_res.err().unwrap(),