Skip to content

Commit

Permalink
Boot-time configurable heap constants (#899)
Browse files Browse the repository at this point in the history
This CL refactors the `vm_layout_constants` related code and changes all
the related compile-time constants to run-time constants, so that VM
bindings can configure them during boot time based on dynamic user
inputs.

This is a necessary refactoring to support compressed pointers. For most
of the VMs (e.g. OpenJDK), compressed pointers are enabled at run-time
based on command line arguments, and VM bindings should be able to
enable compressed pointer heap dynamically during the VM boot process.
Such a process usually involves dynamically configuring heap ranges,
space memory layout, and choosing a different `VMMap` implementation
[1]. Thus all `vm_layout_constants`, including heap constants, should be
made as boot-time configurable, instead of compile-time constants.

This CL also makes risc-v's 39-bit heap support easier, as it now should
only involve adding a new set of `vm_layout_constants`.

[1] This is already done by #732
  • Loading branch information
wenyuzhao authored Aug 24, 2023
1 parent 244bb7b commit 039055a
Show file tree
Hide file tree
Showing 47 changed files with 606 additions and 413 deletions.
7 changes: 3 additions & 4 deletions src/memory_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@ use crate::scheduler::WorkBucketStage;
use crate::scheduler::{GCController, GCWork, GCWorker};
use crate::util::alloc::allocators::AllocatorSelector;
use crate::util::constants::{LOG_BYTES_IN_PAGE, MIN_OBJECT_SIZE};
use crate::util::heap::layout::vm_layout_constants::HEAP_END;
use crate::util::heap::layout::vm_layout_constants::HEAP_START;
use crate::util::heap::layout::vm_layout::vm_layout;
use crate::util::opaque_pointer::*;
use crate::util::{Address, ObjectReference};
use crate::vm::edge_shape::MemorySlice;
Expand Down Expand Up @@ -568,13 +567,13 @@ pub fn live_bytes_in_last_gc<VM: VMBinding>(mmtk: &MMTK<VM>) -> usize {
/// Return the starting address of the heap. *Note that currently MMTk uses
/// a fixed address range as heap.*
pub fn starting_heap_address() -> Address {
HEAP_START
vm_layout().heap_start
}

/// Return the ending address of the heap. *Note that currently MMTk uses
/// a fixed address range as heap.*
pub fn last_heap_address() -> Address {
HEAP_END
vm_layout().heap_end
}

/// Return the total memory in bytes.
Expand Down
7 changes: 7 additions & 0 deletions src/mmtk.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use crate::scheduler::GCWorkScheduler;
#[cfg(feature = "extreme_assertions")]
use crate::util::edge_logger::EdgeLogger;
use crate::util::finalizable_processor::FinalizableProcessor;
use crate::util::heap::layout::vm_layout::VMLayout;
use crate::util::heap::layout::{self, Mmapper, VMMap};
use crate::util::opaque_pointer::*;
use crate::util::options::Options;
Expand Down Expand Up @@ -65,6 +66,12 @@ impl MMTKBuilder {
self.options.set_bulk_from_command_line(options)
}

/// Custom VM layout constants. VM bindings may use this function for compressed or 39-bit heap support.
/// This function must be called before MMTk::new()
pub fn set_vm_layout(&mut self, constants: VMLayout) {
VMLayout::set_custom_vm_layout(constants)
}

/// Build an MMTk instance from the builder.
pub fn build<VM: VMBinding>(&self) -> MMTK<VM> {
MMTK::new(Arc::new(self.options.clone()))
Expand Down
6 changes: 3 additions & 3 deletions src/plan/generational/barrier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use crate::plan::barriers::BarrierSemantics;
use crate::plan::PlanTraceObject;
use crate::plan::VectorQueue;
use crate::scheduler::WorkBucketStage;
use crate::util::constants::BYTES_IN_ADDRESS;
use crate::util::constants::BYTES_IN_INT;
use crate::util::*;
use crate::vm::edge_shape::MemorySlice;
use crate::vm::VMBinding;
Expand Down Expand Up @@ -91,9 +91,9 @@ impl<VM: VMBinding, P: GenerationalPlanExt<VM> + PlanTraceObject<VM>> BarrierSem
if !dst_in_nursery {
// enqueue
debug_assert_eq!(
dst.bytes() & (BYTES_IN_ADDRESS - 1),
dst.bytes() & (BYTES_IN_INT - 1),
0,
"bytes should be a multiple of words"
"bytes should be a multiple of 32-bit words"
);
self.region_modbuf.push(dst);
self.region_modbuf
Expand Down
2 changes: 1 addition & 1 deletion src/policy/copyspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use crate::policy::space::{CommonSpace, Space};
use crate::scheduler::GCWorker;
use crate::util::copy::*;
#[cfg(feature = "vo_bit")]
use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK;
use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
use crate::util::heap::{MonotonePageResource, PageResource};
use crate::util::metadata::{extract_side_metadata, MetadataSpec};
use crate::util::object_forwarding;
Expand Down
17 changes: 9 additions & 8 deletions src/policy/lockfreeimmortalspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use crate::policy::space::{CommonSpace, Space};
use crate::util::address::Address;

use crate::util::conversions;
use crate::util::heap::layout::vm_layout_constants::{AVAILABLE_BYTES, AVAILABLE_START};
use crate::util::heap::layout::vm_layout::vm_layout;
use crate::util::heap::PageResource;
use crate::util::memory::MmapStrategy;
use crate::util::metadata::side_metadata::SideMetadataContext;
Expand Down Expand Up @@ -178,19 +178,19 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
_ => unimplemented!(),
};
assert!(
total_bytes <= AVAILABLE_BYTES,
total_bytes <= vm_layout().available_bytes(),
"Initial requested memory ({} bytes) overflows the heap. Max heap size is {} bytes.",
total_bytes,
AVAILABLE_BYTES
vm_layout().available_bytes()
);

// FIXME: This space assumes that it can use the entire heap range, which is definitely wrong.
// https://github.com/mmtk/mmtk-core/issues/314
let space = Self {
name: args.name,
cursor: Atomic::new(AVAILABLE_START),
limit: AVAILABLE_START + total_bytes,
start: AVAILABLE_START,
cursor: Atomic::new(vm_layout().available_start()),
limit: vm_layout().available_start() + total_bytes,
start: vm_layout().available_start(),
extent: total_bytes,
slow_path_zeroing,
metadata: SideMetadataContext {
Expand All @@ -206,10 +206,11 @@ impl<VM: VMBinding> LockFreeImmortalSpace<VM> {
} else {
MmapStrategy::Normal
};
crate::util::memory::dzmmap_noreplace(AVAILABLE_START, total_bytes, strategy).unwrap();
crate::util::memory::dzmmap_noreplace(vm_layout().available_start(), total_bytes, strategy)
.unwrap();
if space
.metadata
.try_map_metadata_space(AVAILABLE_START, total_bytes)
.try_map_metadata_space(vm_layout().available_start(), total_bytes)
.is_err()
{
// TODO(Javad): handle meta space allocation failure
Expand Down
2 changes: 1 addition & 1 deletion src/policy/marksweepspace/malloc_ms/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ use crate::util::ObjectReference;
use crate::util::{conversions, metadata};
use crate::vm::VMBinding;
use crate::vm::{ActivePlan, Collection, ObjectModel};
use crate::{policy::space::Space, util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK};
use crate::{policy::space::Space, util::heap::layout::vm_layout::BYTES_IN_CHUNK};
#[cfg(debug_assertions)]
use std::collections::HashMap;
use std::marker::PhantomData;
Expand Down
2 changes: 1 addition & 1 deletion src/policy/marksweepspace/malloc_ms/metadata.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::util::conversions;
use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK;
use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
use crate::util::metadata::side_metadata;
use crate::util::metadata::side_metadata::SideMetadataContext;
use crate::util::metadata::side_metadata::SideMetadataSpec;
Expand Down
65 changes: 29 additions & 36 deletions src/policy/sft_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,12 @@ pub(crate) fn create_sft_map() -> Box<dyn SFTMap> {
// 64-bit malloc mark sweep needs a chunk-based SFT map, but the sparse map is not suitable for 64bits.
Box::new(dense_chunk_map::SFTDenseChunkMap::new())
} else if #[cfg(target_pointer_width = "64")] {
Box::new(space_map::SFTSpaceMap::new())
use crate::util::heap::layout::vm_layout::vm_layout;
if vm_layout().force_use_contiguous_spaces {
Box::new(space_map::SFTSpaceMap::new())
} else {
Box::new(sparse_chunk_map::SFTSparseChunkMap::new())
}
} else if #[cfg(target_pointer_width = "32")] {
Box::new(sparse_chunk_map::SFTSparseChunkMap::new())
} else {
Expand All @@ -79,9 +84,7 @@ pub(crate) fn create_sft_map() -> Box<dyn SFTMap> {
#[cfg(target_pointer_width = "64")] // This impl only works for 64 bits: 1. the mask is designed for our 64bit heap range, 2. on 64bits, all our spaces are contiguous.
mod space_map {
use super::*;
use crate::util::heap::layout::vm_layout_constants::{
HEAP_START, LOG_SPACE_EXTENT, MAX_SPACE_EXTENT,
};
use crate::util::heap::layout::vm_layout::vm_layout;
use std::cell::UnsafeCell;

/// Space map is a small table, and it has one entry for each MMTk space.
Expand Down Expand Up @@ -118,6 +121,7 @@ mod space_map {
start: Address,
bytes: usize,
) {
let table_size = Self::addr_to_index(Address::MAX) + 1;
let index = Self::addr_to_index(start);
if cfg!(debug_assertions) {
// Make sure we only update from empty to a valid space, or overwrite the space
Expand All @@ -128,9 +132,9 @@ mod space_map {
// FIXME: Curerntly skip the check for the last space. The following works fine for MMTk internal spaces,
// but the VM space is an exception. Any address after the last space is considered as the last space,
// based on our indexing function. In that case, we cannot assume the end of the region is within the last space (with MAX_SPACE_EXTENT).
if index != Self::TABLE_SIZE - 1 {
if index != table_size - 1 {
assert!(start >= space_start);
assert!(start + bytes <= space_start + MAX_SPACE_EXTENT);
assert!(start + bytes <= space_start + vm_layout().max_space_extent());
}
}

Expand All @@ -144,43 +148,32 @@ mod space_map {
}

impl SFTSpaceMap {
/// This mask extracts a few bits from address, and use it as index to the space map table.
/// This constant is specially picked for the current heap range (HEAP_STRAT/HEAP_END), and the space size (MAX_SPACE_EXTENT).
/// If any of these changes, the test `test_address_arithmetic()` may fail, and this constant will need to be updated.
/// Currently our spaces are using address range 0x0000_0200_0000_0000 to 0x0000_2200_0000_0000 (with a maximum of 16 spaces).
/// When masked with this constant, the index is 1 to 16. If we mask any arbitrary address with this mask, we will get 0 to 31 (32 entries).
pub const ADDRESS_MASK: usize = 0x0000_3f00_0000_0000usize;
/// The table size for the space map.
pub const TABLE_SIZE: usize = Self::addr_to_index(Address::MAX) + 1;

/// Create a new space map.
#[allow(clippy::assertions_on_constants)] // We assert to make sure the constants
pub fn new() -> Self {
debug_assert!(
Self::TABLE_SIZE >= crate::util::heap::layout::heap_parameters::MAX_SPACES
);
let table_size = Self::addr_to_index(Address::MAX) + 1;
debug_assert!(table_size >= crate::util::heap::layout::heap_parameters::MAX_SPACES);
Self {
sft: UnsafeCell::new(vec![&EMPTY_SPACE_SFT; Self::TABLE_SIZE]),
sft: UnsafeCell::new(vec![&EMPTY_SPACE_SFT; table_size]),
}
}

const fn addr_to_index(addr: Address) -> usize {
addr.and(Self::ADDRESS_MASK) >> LOG_SPACE_EXTENT
fn addr_to_index(addr: Address) -> usize {
addr.and(vm_layout().address_mask()) >> vm_layout().log_space_extent
}

const fn index_to_space_start(i: usize) -> Address {
fn index_to_space_start(i: usize) -> Address {
let (start, _) = Self::index_to_space_range(i);
start
}

const fn index_to_space_range(i: usize) -> (Address, Address) {
fn index_to_space_range(i: usize) -> (Address, Address) {
if i == 0 {
panic!("Invalid index: there is no space for index 0")
} else {
(
HEAP_START.add((i - 1) << LOG_SPACE_EXTENT),
HEAP_START.add(i << LOG_SPACE_EXTENT),
)
let start = Address::ZERO.add(i << vm_layout().log_space_extent);
let extent = 1 << vm_layout().log_space_extent;
(start, start.add(extent))
}
}
}
Expand All @@ -189,15 +182,15 @@ mod space_map {
mod tests {
use super::*;
use crate::util::heap::layout::heap_parameters::MAX_SPACES;
use crate::util::heap::layout::vm_layout_constants::{HEAP_END, HEAP_START};
use crate::util::heap::layout::vm_layout::vm_layout;

// If the test `test_address_arithmetic()` fails, it is possible due to change of our heap range, max space extent, or max number of spaces.
// We need to update the code and the constants for the address arithemtic.
#[test]
fn test_address_arithmetic() {
// Before 1st space
assert_eq!(SFTSpaceMap::addr_to_index(Address::ZERO), 0);
assert_eq!(SFTSpaceMap::addr_to_index(HEAP_START - 1), 0);
assert_eq!(SFTSpaceMap::addr_to_index(vm_layout().heap_start - 1), 0);

let assert_for_index = |i: usize| {
let (start, end) = SFTSpaceMap::index_to_space_range(i);
Expand All @@ -214,8 +207,8 @@ mod space_map {
// assert space end
let (_, last_space_end) = SFTSpaceMap::index_to_space_range(MAX_SPACES);
println!("Space end = {}", last_space_end);
println!("Heap end = {}", HEAP_END);
assert_eq!(last_space_end, HEAP_END);
println!("Heap end = {}", vm_layout().heap_end);
assert_eq!(last_space_end, vm_layout().heap_end);

// after last space
assert_eq!(SFTSpaceMap::addr_to_index(last_space_end), 17);
Expand All @@ -228,7 +221,7 @@ mod space_map {
mod dense_chunk_map {
use super::*;
use crate::util::conversions;
use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK;
use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
use crate::util::metadata::side_metadata::spec_defs::SFT_DENSE_CHUNK_MAP_INDEX;
use crate::util::metadata::side_metadata::*;
use std::cell::UnsafeCell;
Expand Down Expand Up @@ -381,8 +374,8 @@ mod sparse_chunk_map {
use super::*;
use crate::util::conversions;
use crate::util::conversions::*;
use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK;
use crate::util::heap::layout::vm_layout_constants::MAX_CHUNKS;
use crate::util::heap::layout::vm_layout::vm_layout;
use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;

/// The chunk map is a sparse table. It has one entry for each chunk in the address space we may use.
pub struct SFTSparseChunkMap {
Expand All @@ -393,7 +386,7 @@ mod sparse_chunk_map {

impl SFTMap for SFTSparseChunkMap {
fn has_sft_entry(&self, addr: Address) -> bool {
addr.chunk_index() < MAX_CHUNKS
addr.chunk_index() < vm_layout().max_chunks()
}

fn get_side_metadata(&self) -> Option<&SideMetadataSpec> {
Expand Down Expand Up @@ -453,7 +446,7 @@ mod sparse_chunk_map {
impl SFTSparseChunkMap {
pub fn new() -> Self {
SFTSparseChunkMap {
sft: UnsafeCell::new(vec![&EMPTY_SPACE_SFT; MAX_CHUNKS]),
sft: UnsafeCell::new(vec![&EMPTY_SPACE_SFT; vm_layout().max_chunks()]),
}
}

Expand Down
13 changes: 6 additions & 7 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@ use crate::util::metadata::side_metadata::{
use crate::util::Address;
use crate::util::ObjectReference;

use crate::util::heap::layout::vm_layout_constants::{AVAILABLE_BYTES, LOG_BYTES_IN_CHUNK};
use crate::util::heap::layout::vm_layout_constants::{AVAILABLE_END, AVAILABLE_START};
use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK};
use crate::util::heap::{PageResource, VMRequest};
use crate::util::options::Options;
use crate::vm::{ActivePlan, Collection};
Expand All @@ -23,7 +22,7 @@ use crate::policy::sft::EMPTY_SFT_NAME;
use crate::policy::sft::SFT;
use crate::util::copy::*;
use crate::util::heap::gc_trigger::GCTrigger;
use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK;
use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;
use crate::util::heap::layout::Mmapper;
use crate::util::heap::layout::VMMap;
use crate::util::heap::space_descriptor::SpaceDescriptor;
Expand Down Expand Up @@ -613,10 +612,10 @@ impl<VM: VMBinding> CommonSpace<VM> {
}

fn get_frac_available(frac: f32) -> usize {
trace!("AVAILABLE_START={}", AVAILABLE_START);
trace!("AVAILABLE_END={}", AVAILABLE_END);
let bytes = (frac * AVAILABLE_BYTES as f32) as usize;
trace!("bytes={}*{}={}", frac, AVAILABLE_BYTES, bytes);
trace!("AVAILABLE_START={}", vm_layout().available_start());
trace!("AVAILABLE_END={}", vm_layout().available_end());
let bytes = (frac * vm_layout().available_bytes() as f32) as usize;
trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes);
let mb = bytes >> LOG_BYTES_IN_MBYTE;
let rtn = mb << LOG_BYTES_IN_MBYTE;
trace!("rtn={}", rtn);
Expand Down
2 changes: 1 addition & 1 deletion src/policy/vmspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ impl<VM: VMBinding> VMSpace<VM> {
args: &mut CreateSpecificPlanArgs<VM>,
location: Option<(Address, usize)>,
) -> ImmortalSpace<VM> {
use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK;
use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK;

// If the location of the VM space is not supplied, find them in the options.
let (vm_space_start, vm_space_bytes) = location.unwrap_or((
Expand Down
2 changes: 1 addition & 1 deletion src/util/address.rs
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ impl Address {
}

/// is this address aligned to the given alignment
pub fn is_aligned_to(self, align: usize) -> bool {
pub const fn is_aligned_to(self, align: usize) -> bool {
use crate::util::conversions;
conversions::raw_is_aligned(self.0, align)
}
Expand Down
3 changes: 3 additions & 0 deletions src/util/constants.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ pub const BYTES_IN_BYTE: usize = 1;
pub const LOG_BITS_IN_BYTE: u8 = 3;
pub const BITS_IN_BYTE: usize = 1 << LOG_BITS_IN_BYTE;

pub const LOG_BYTES_IN_GBYTE: u8 = 30;
pub const BYTES_IN_GBYTE: usize = 1 << LOG_BYTES_IN_GBYTE;

pub const LOG_BYTES_IN_MBYTE: u8 = 20;
pub const BYTES_IN_MBYTE: usize = 1 << LOG_BYTES_IN_MBYTE;

Expand Down
2 changes: 1 addition & 1 deletion src/util/conversions.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::util::constants::*;
use crate::util::heap::layout::vm_layout_constants::*;
use crate::util::heap::layout::vm_layout::*;
use crate::util::Address;

/* Alignment */
Expand Down
3 changes: 1 addition & 2 deletions src/util/heap/blockpageresource.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use super::pageresource::{PRAllocFail, PRAllocResult};
use super::{FreeListPageResource, PageResource};
use crate::util::address::Address;
use crate::util::constants::*;
use crate::util::heap::layout::vm_layout_constants::*;
use crate::util::heap::layout::vm_layout::*;
use crate::util::heap::layout::VMMap;
use crate::util::heap::pageresource::CommonPageResource;
use crate::util::heap::space_descriptor::SpaceDescriptor;
Expand Down Expand Up @@ -161,7 +161,6 @@ impl<VM: VMBinding, B: Region> BlockPageResource<VM, B> {
}

pub fn release_block(&self, block: B) {
debug_assert!(self.common().contiguous);
let pages = 1 << Self::LOG_PAGES;
debug_assert!(pages as usize <= self.common().accounting.get_committed_pages());
self.common().accounting.release(pages as _);
Expand Down
Loading

0 comments on commit 039055a

Please sign in to comment.