Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Annotate mmap ranges using PR_SET_VMA #1236

Merged
merged 24 commits into from
Nov 29, 2024
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,10 @@ is-terminal = "0.4.7"
itertools = "0.12.0"
jemalloc-sys = { version = "0.5.3", features = ["disable_initial_exec_tls"], optional = true }
lazy_static = "1.1"
libc = "0.2"
# libc 0.2.165 introduced an API-breaking change that breaks the `sysinfo` crate on MacOS.
# We temporarily pin the version of libc until the issue is resolved.
# See: https://github.com/GuillaumeGomez/sysinfo/issues/1392
libc = "=0.2.164"
log = { version = "0.4", features = ["max_level_trace", "release_max_level_off"] }
memoffset = "0.9"
mimalloc-sys = { version = "0.1.6", optional = true }
Expand Down
3 changes: 3 additions & 0 deletions src/mmtk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,9 @@ unsafe impl<VM: VMBinding> Send for MMTK<VM> {}
impl<VM: VMBinding> MMTK<VM> {
/// Create an MMTK instance. This is not public. Bindings should use [`MMTKBuilder::build`].
pub(crate) fn new(options: Arc<Options>) -> Self {
// Set the static variable for mmap annotation.
crate::util::memory::MMAP_ANNOTATION.store(*options.mmap_annotation, Ordering::SeqCst);
k-sareen marked this conversation as resolved.
Show resolved Hide resolved

// Initialize SFT first in case we need to use this in the constructor.
// The first call will initialize SFT map. Other calls will be blocked until SFT map is initialized.
crate::policy::sft_map::SFTRefStorage::pre_use_check();
Expand Down
24 changes: 16 additions & 8 deletions src/policy/lockfreeimmortalspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ use crate::util::heap::gc_trigger::GCTrigger;
use crate::util::heap::layout::vm_layout::vm_layout;
use crate::util::heap::PageResource;
use crate::util::heap::VMRequest;
use crate::util::memory::MmapAnnotation;
use crate::util::memory::MmapStrategy;
use crate::util::metadata::side_metadata::SideMetadataContext;
use crate::util::metadata::side_metadata::SideMetadataSanity;
Expand Down Expand Up @@ -241,15 +242,22 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
*args.options.transparent_hugepages,
crate::util::memory::MmapProtection::ReadWrite,
);
crate::util::memory::dzmmap_noreplace(start, aligned_total_bytes, strategy).unwrap();
if space
crate::util::memory::dzmmap_noreplace(
start,
aligned_total_bytes,
strategy,
&MmapAnnotation::Space {
name: space.get_name(),
},
)
.unwrap();
space
.metadata
.try_map_metadata_space(start, aligned_total_bytes)
.is_err()
{
// TODO(Javad): handle meta space allocation failure
panic!("failed to mmap meta memory");
}
.try_map_metadata_space(start, aligned_total_bytes, space.get_name())
.unwrap_or_else(|e| {
qinsoon marked this conversation as resolved.
Show resolved Hide resolved
// TODO(Javad): handle meta space allocation failure
panic!("failed to mmap meta memory: {e}")
});

space
}
Expand Down
2 changes: 1 addition & 1 deletion src/policy/marksweepspace/malloc_ms/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ impl<VM: VMBinding> MallocSpace<VM> {

fn map_metadata_and_update_bound(&self, addr: Address, size: usize) {
// Map the metadata space for the range [addr, addr + size)
map_meta_space(&self.metadata, addr, size);
map_meta_space(&self.metadata, addr, size, self.get_name());

// Update the bounds of the max and min chunk addresses seen -- this is used later in the sweep
// Lockless compare-and-swap loops perform better than a locking variant
Expand Down
21 changes: 13 additions & 8 deletions src/policy/marksweepspace/malloc_ms/metadata.rs
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ fn is_meta_space_mapped_for_address(address: Address) -> bool {
}

/// Eagerly map the active chunk metadata surrounding `chunk_start`
fn map_active_chunk_metadata(chunk_start: Address) {
fn map_active_chunk_metadata(chunk_start: Address, space_name: &str) {
debug_assert!(chunk_start.is_aligned_to(BYTES_IN_CHUNK));
// We eagerly map 16Gb worth of space for the chunk mark bytes on 64-bits
// We require saturating subtractions in order to not overflow the chunk_start by
Expand All @@ -99,16 +99,20 @@ fn map_active_chunk_metadata(chunk_start: Address) {
chunk_start + (size / 2)
);

assert!(
CHUNK_METADATA.try_map_metadata_space(start, size).is_ok(),
"failed to mmap meta memory"
);
CHUNK_METADATA
.try_map_metadata_space(start, size, space_name)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is good refactoring with clear improvement. The old code put the actual action performed in the assert! which is bad.

.unwrap_or_else(|e| panic!("failed to mmap meta memory: {e}"));
}

/// We map the active chunk metadata (if not previously mapped), as well as the VO bit metadata
/// and active page metadata here. Note that if [addr, addr + size) crosses multiple chunks, we
/// will map for each chunk.
pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size: usize) {
pub(super) fn map_meta_space(
metadata: &SideMetadataContext,
addr: Address,
size: usize,
space_name: &str,
) {
// In order to prevent race conditions, we synchronize on the lock first and then
// check if we need to map the active chunk metadata for `chunk_start`
let _lock = CHUNK_MAP_LOCK.lock().unwrap();
Expand All @@ -118,7 +122,7 @@ pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size
// Check if the chunk bit metadata is mapped. If it is not mapped, map it.
// Note that the chunk bit metadata is global. It may have been mapped because other policy mapped it.
if !is_chunk_mapped(start) {
map_active_chunk_metadata(start);
map_active_chunk_metadata(start, space_name);
}

// If we have set the chunk bit, return. This is needed just in case another thread has done this before
Expand All @@ -131,7 +135,8 @@ pub(super) fn map_meta_space(metadata: &SideMetadataContext, addr: Address, size
// Note that this might fail. For example, we have marked a chunk as active but later we freed all
// the objects in it, and unset its chunk bit. However, we do not free its metadata. So for the chunk,
// its chunk bit is mapped, but not marked, and all its local metadata is also mapped.
let mmap_metadata_result = metadata.try_map_metadata_space(start, BYTES_IN_CHUNK);
let mmap_metadata_result =
metadata.try_map_metadata_space(start, BYTES_IN_CHUNK, space_name);
debug_assert!(
mmap_metadata_result.is_ok(),
"mmap sidemetadata failed for chunk_start ({})",
Expand Down
8 changes: 5 additions & 3 deletions src/policy/sft_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -378,9 +378,11 @@ mod dense_chunk_map {
global: vec![SFT_DENSE_CHUNK_MAP_INDEX],
local: vec![],
};
if context.try_map_metadata_space(start, bytes).is_err() {
panic!("failed to mmap metadata memory");
}
context
.try_map_metadata_space(start, bytes, "SFTDenseChunkMap")
.unwrap_or_else(|e| {
panic!("failed to mmap metadata memory: {e}");
});

self.update(space, start, bytes);
}
Expand Down
45 changes: 24 additions & 21 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -144,12 +144,19 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
if let Err(mmap_error) = self
.common()
.mmapper
.ensure_mapped(res.start, res.pages, self.common().mmap_strategy())
.and(
self.common()
.metadata
.try_map_metadata_space(res.start, bytes),
.ensure_mapped(
res.start,
res.pages,
self.common().mmap_strategy(),
&memory::MmapAnnotation::Space {
name: self.get_name(),
},
)
.and(self.common().metadata.try_map_metadata_space(
res.start,
bytes,
self.name(),
k-sareen marked this conversation as resolved.
Show resolved Hide resolved
))
{
memory::handle_mmap_error::<VM>(mmap_error, tls, res.start, bytes);
}
Expand Down Expand Up @@ -293,15 +300,13 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
/// Ensure this space is marked as mapped -- used when the space is already
/// mapped (e.g. for a vm image which is externally mmapped.)
fn ensure_mapped(&self) {
if self
.common()
self.common()
.metadata
.try_map_metadata_space(self.common().start, self.common().extent)
.is_err()
{
// TODO(Javad): handle meta space allocation failure
panic!("failed to mmap meta memory");
}
.try_map_metadata_space(self.common().start, self.common().extent, self.name())
k-sareen marked this conversation as resolved.
Show resolved Hide resolved
.unwrap_or_else(|e| {
// TODO(Javad): handle meta space allocation failure
panic!("failed to mmap meta memory: {e}");
});

self.common()
.mmapper
Expand Down Expand Up @@ -609,14 +614,12 @@ impl<VM: VMBinding> CommonSpace<VM> {
}

// For contiguous space, we know its address range so we reserve metadata memory for its range.
if rtn
.metadata
.try_map_metadata_address_range(rtn.start, rtn.extent)
.is_err()
{
// TODO(Javad): handle meta space allocation failure
panic!("failed to mmap meta memory");
}
rtn.metadata
.try_map_metadata_address_range(rtn.start, rtn.extent, rtn.name)
.unwrap_or_else(|e| {
// TODO(Javad): handle meta space allocation failure
panic!("failed to mmap meta memory: {e}");
});

debug!(
"Created space {} [{}, {}) for {} bytes",
Expand Down
2 changes: 1 addition & 1 deletion src/policy/vmspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ impl<VM: VMBinding> VMSpace<VM> {
// Map side metadata
self.common
.metadata
.try_map_metadata_space(chunk_start, chunk_size)
.try_map_metadata_space(chunk_start, chunk_size, self.get_name())
.unwrap();
// Insert to vm map: it would be good if we can make VM map aware of the region. However, the region may be outside what we can map in our VM map implementation.
// self.common.vm_map.insert(chunk_start, chunk_size, self.common.descriptor);
Expand Down
44 changes: 35 additions & 9 deletions src/util/heap/layout/byte_map_mmapper.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use super::mmapper::MapState;
use super::Mmapper;
use crate::util::memory::MmapAnnotation;
use crate::util::Address;

use crate::util::constants::*;
Expand Down Expand Up @@ -44,7 +45,13 @@ impl Mmapper for ByteMapMmapper {
}
}

fn ensure_mapped(&self, start: Address, pages: usize, strategy: MmapStrategy) -> Result<()> {
fn ensure_mapped(
&self,
start: Address,
pages: usize,
strategy: MmapStrategy,
anno: &MmapAnnotation,
) -> Result<()> {
let start_chunk = Self::address_to_mmap_chunks_down(start);
let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages));
trace!(
Expand All @@ -62,7 +69,8 @@ impl Mmapper for ByteMapMmapper {

let mmap_start = Self::mmap_chunks_to_address(chunk);
let _guard = self.lock.lock().unwrap();
MapState::transition_to_mapped(&self.mapped[chunk], mmap_start, strategy).unwrap();
MapState::transition_to_mapped(&self.mapped[chunk], mmap_start, strategy, anno)
.unwrap();
}

Ok(())
Expand All @@ -73,6 +81,7 @@ impl Mmapper for ByteMapMmapper {
start: Address,
pages: usize,
strategy: MmapStrategy,
anno: &MmapAnnotation,
) -> Result<()> {
let start_chunk = Self::address_to_mmap_chunks_down(start);
let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages));
Expand All @@ -91,7 +100,8 @@ impl Mmapper for ByteMapMmapper {

let mmap_start = Self::mmap_chunks_to_address(chunk);
let _guard = self.lock.lock().unwrap();
MapState::transition_to_quarantined(&self.mapped[chunk], mmap_start, strategy).unwrap();
MapState::transition_to_quarantined(&self.mapped[chunk], mmap_start, strategy, anno)
.unwrap();
}

Ok(())
Expand Down Expand Up @@ -172,6 +182,7 @@ impl Default for ByteMapMmapper {
#[cfg(test)]
mod tests {
use super::ByteMapMmapper;
use crate::mmap_anno_test;
use crate::util::heap::layout::Mmapper;
use crate::util::Address;

Expand Down Expand Up @@ -237,7 +248,7 @@ mod tests {
|| {
let mmapper = ByteMapMmapper::new();
mmapper
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST)
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
.unwrap();

for chunk in start_chunk..end_chunk {
Expand Down Expand Up @@ -266,7 +277,7 @@ mod tests {
|| {
let mmapper = ByteMapMmapper::new();
mmapper
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST)
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
.unwrap();

for chunk in start_chunk..end_chunk {
Expand Down Expand Up @@ -295,7 +306,7 @@ mod tests {
|| {
let mmapper = ByteMapMmapper::new();
mmapper
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST)
.ensure_mapped(FIXED_ADDRESS, pages, MmapStrategy::TEST, mmap_anno_test!())
.unwrap();

let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
Expand Down Expand Up @@ -329,7 +340,12 @@ mod tests {
// map 2 chunks
let mmapper = ByteMapMmapper::new();
mmapper
.ensure_mapped(FIXED_ADDRESS, test_memory_pages, MmapStrategy::TEST)
.ensure_mapped(
FIXED_ADDRESS,
test_memory_pages,
MmapStrategy::TEST,
mmap_anno_test!(),
)
.unwrap();

// protect 1 chunk
Expand Down Expand Up @@ -364,7 +380,12 @@ mod tests {
// map 2 chunks
let mmapper = ByteMapMmapper::new();
mmapper
.ensure_mapped(FIXED_ADDRESS, test_memory_pages, MmapStrategy::TEST)
.ensure_mapped(
FIXED_ADDRESS,
test_memory_pages,
MmapStrategy::TEST,
mmap_anno_test!(),
)
.unwrap();

// protect 1 chunk
Expand All @@ -382,7 +403,12 @@ mod tests {

// ensure mapped - this will unprotect the previously protected chunk
mmapper
.ensure_mapped(FIXED_ADDRESS, protect_memory_pages_2, MmapStrategy::TEST)
.ensure_mapped(
FIXED_ADDRESS,
protect_memory_pages_2,
MmapStrategy::TEST,
mmap_anno_test!(),
)
.unwrap();
assert_eq!(
mmapper.mapped[chunk].load(Ordering::Relaxed),
Expand Down
Loading
Loading