diff --git a/src/memory_manager.rs b/src/memory_manager.rs index be0a5e627c..6327207601 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -19,8 +19,7 @@ use crate::scheduler::WorkBucketStage; use crate::scheduler::{GCController, GCWork, GCWorker}; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::constants::{LOG_BYTES_IN_PAGE, MIN_OBJECT_SIZE}; -use crate::util::heap::layout::vm_layout_constants::HEAP_END; -use crate::util::heap::layout::vm_layout_constants::HEAP_START; +use crate::util::heap::layout::vm_layout::vm_layout; use crate::util::opaque_pointer::*; use crate::util::{Address, ObjectReference}; use crate::vm::edge_shape::MemorySlice; @@ -568,13 +567,13 @@ pub fn live_bytes_in_last_gc(mmtk: &MMTK) -> usize { /// Return the starting address of the heap. *Note that currently MMTk uses /// a fixed address range as heap.* pub fn starting_heap_address() -> Address { - HEAP_START + vm_layout().heap_start } /// Return the ending address of the heap. *Note that currently MMTk uses /// a fixed address range as heap.* pub fn last_heap_address() -> Address { - HEAP_END + vm_layout().heap_end } /// Return the total memory in bytes. diff --git a/src/mmtk.rs b/src/mmtk.rs index 9174d9a4e4..687b6894fa 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -6,6 +6,7 @@ use crate::scheduler::GCWorkScheduler; #[cfg(feature = "extreme_assertions")] use crate::util::edge_logger::EdgeLogger; use crate::util::finalizable_processor::FinalizableProcessor; +use crate::util::heap::layout::vm_layout::VMLayout; use crate::util::heap::layout::{self, Mmapper, VMMap}; use crate::util::opaque_pointer::*; use crate::util::options::Options; @@ -65,6 +66,12 @@ impl MMTKBuilder { self.options.set_bulk_from_command_line(options) } + /// Custom VM layout constants. VM bindings may use this function for compressed or 39-bit heap support. + /// This function must be called before MMTk::new() + pub fn set_vm_layout(&mut self, constants: VMLayout) { + VMLayout::set_custom_vm_layout(constants) + } + /// Build an MMTk instance from the builder. pub fn build(&self) -> MMTK { MMTK::new(Arc::new(self.options.clone())) diff --git a/src/plan/generational/barrier.rs b/src/plan/generational/barrier.rs index dc4ae7b74b..1fe17c93b0 100644 --- a/src/plan/generational/barrier.rs +++ b/src/plan/generational/barrier.rs @@ -4,7 +4,7 @@ use crate::plan::barriers::BarrierSemantics; use crate::plan::PlanTraceObject; use crate::plan::VectorQueue; use crate::scheduler::WorkBucketStage; -use crate::util::constants::BYTES_IN_ADDRESS; +use crate::util::constants::BYTES_IN_INT; use crate::util::*; use crate::vm::edge_shape::MemorySlice; use crate::vm::VMBinding; @@ -91,9 +91,9 @@ impl + PlanTraceObject> BarrierSem if !dst_in_nursery { // enqueue debug_assert_eq!( - dst.bytes() & (BYTES_IN_ADDRESS - 1), + dst.bytes() & (BYTES_IN_INT - 1), 0, - "bytes should be a multiple of words" + "bytes should be a multiple of 32-bit words" ); self.region_modbuf.push(dst); self.region_modbuf diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 11bc3baace..5e1ff0cbc2 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -6,7 +6,7 @@ use crate::policy::space::{CommonSpace, Space}; use crate::scheduler::GCWorker; use crate::util::copy::*; #[cfg(feature = "vo_bit")] -use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; +use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; use crate::util::heap::{MonotonePageResource, PageResource}; use crate::util::metadata::{extract_side_metadata, MetadataSpec}; use crate::util::object_forwarding; diff --git a/src/policy/lockfreeimmortalspace.rs b/src/policy/lockfreeimmortalspace.rs index ea7167c799..ba1dc6fff4 100644 --- a/src/policy/lockfreeimmortalspace.rs +++ b/src/policy/lockfreeimmortalspace.rs @@ -10,7 +10,7 @@ use crate::policy::space::{CommonSpace, Space}; use crate::util::address::Address; use crate::util::conversions; -use crate::util::heap::layout::vm_layout_constants::{AVAILABLE_BYTES, AVAILABLE_START}; +use crate::util::heap::layout::vm_layout::vm_layout; use crate::util::heap::PageResource; use crate::util::memory::MmapStrategy; use crate::util::metadata::side_metadata::SideMetadataContext; @@ -178,19 +178,19 @@ impl LockFreeImmortalSpace { _ => unimplemented!(), }; assert!( - total_bytes <= AVAILABLE_BYTES, + total_bytes <= vm_layout().available_bytes(), "Initial requested memory ({} bytes) overflows the heap. Max heap size is {} bytes.", total_bytes, - AVAILABLE_BYTES + vm_layout().available_bytes() ); // FIXME: This space assumes that it can use the entire heap range, which is definitely wrong. // https://github.com/mmtk/mmtk-core/issues/314 let space = Self { name: args.name, - cursor: Atomic::new(AVAILABLE_START), - limit: AVAILABLE_START + total_bytes, - start: AVAILABLE_START, + cursor: Atomic::new(vm_layout().available_start()), + limit: vm_layout().available_start() + total_bytes, + start: vm_layout().available_start(), extent: total_bytes, slow_path_zeroing, metadata: SideMetadataContext { @@ -206,10 +206,11 @@ impl LockFreeImmortalSpace { } else { MmapStrategy::Normal }; - crate::util::memory::dzmmap_noreplace(AVAILABLE_START, total_bytes, strategy).unwrap(); + crate::util::memory::dzmmap_noreplace(vm_layout().available_start(), total_bytes, strategy) + .unwrap(); if space .metadata - .try_map_metadata_space(AVAILABLE_START, total_bytes) + .try_map_metadata_space(vm_layout().available_start(), total_bytes) .is_err() { // TODO(Javad): handle meta space allocation failure diff --git a/src/policy/marksweepspace/malloc_ms/global.rs b/src/policy/marksweepspace/malloc_ms/global.rs index 656faadd55..9b9a3eeb74 100644 --- a/src/policy/marksweepspace/malloc_ms/global.rs +++ b/src/policy/marksweepspace/malloc_ms/global.rs @@ -21,7 +21,7 @@ use crate::util::ObjectReference; use crate::util::{conversions, metadata}; use crate::vm::VMBinding; use crate::vm::{ActivePlan, Collection, ObjectModel}; -use crate::{policy::space::Space, util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK}; +use crate::{policy::space::Space, util::heap::layout::vm_layout::BYTES_IN_CHUNK}; #[cfg(debug_assertions)] use std::collections::HashMap; use std::marker::PhantomData; diff --git a/src/policy/marksweepspace/malloc_ms/metadata.rs b/src/policy/marksweepspace/malloc_ms/metadata.rs index b6ef3e82e6..34a1cd128a 100644 --- a/src/policy/marksweepspace/malloc_ms/metadata.rs +++ b/src/policy/marksweepspace/malloc_ms/metadata.rs @@ -1,5 +1,5 @@ use crate::util::conversions; -use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; +use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; use crate::util::metadata::side_metadata; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSpec; diff --git a/src/policy/sft_map.rs b/src/policy/sft_map.rs index 12111eed6b..8df090a8e2 100644 --- a/src/policy/sft_map.rs +++ b/src/policy/sft_map.rs @@ -66,7 +66,12 @@ pub(crate) fn create_sft_map() -> Box { // 64-bit malloc mark sweep needs a chunk-based SFT map, but the sparse map is not suitable for 64bits. Box::new(dense_chunk_map::SFTDenseChunkMap::new()) } else if #[cfg(target_pointer_width = "64")] { - Box::new(space_map::SFTSpaceMap::new()) + use crate::util::heap::layout::vm_layout::vm_layout; + if vm_layout().force_use_contiguous_spaces { + Box::new(space_map::SFTSpaceMap::new()) + } else { + Box::new(sparse_chunk_map::SFTSparseChunkMap::new()) + } } else if #[cfg(target_pointer_width = "32")] { Box::new(sparse_chunk_map::SFTSparseChunkMap::new()) } else { @@ -79,9 +84,7 @@ pub(crate) fn create_sft_map() -> Box { #[cfg(target_pointer_width = "64")] // This impl only works for 64 bits: 1. the mask is designed for our 64bit heap range, 2. on 64bits, all our spaces are contiguous. mod space_map { use super::*; - use crate::util::heap::layout::vm_layout_constants::{ - HEAP_START, LOG_SPACE_EXTENT, MAX_SPACE_EXTENT, - }; + use crate::util::heap::layout::vm_layout::vm_layout; use std::cell::UnsafeCell; /// Space map is a small table, and it has one entry for each MMTk space. @@ -118,6 +121,7 @@ mod space_map { start: Address, bytes: usize, ) { + let table_size = Self::addr_to_index(Address::MAX) + 1; let index = Self::addr_to_index(start); if cfg!(debug_assertions) { // Make sure we only update from empty to a valid space, or overwrite the space @@ -128,9 +132,9 @@ mod space_map { // FIXME: Curerntly skip the check for the last space. The following works fine for MMTk internal spaces, // but the VM space is an exception. Any address after the last space is considered as the last space, // based on our indexing function. In that case, we cannot assume the end of the region is within the last space (with MAX_SPACE_EXTENT). - if index != Self::TABLE_SIZE - 1 { + if index != table_size - 1 { assert!(start >= space_start); - assert!(start + bytes <= space_start + MAX_SPACE_EXTENT); + assert!(start + bytes <= space_start + vm_layout().max_space_extent()); } } @@ -144,43 +148,32 @@ mod space_map { } impl SFTSpaceMap { - /// This mask extracts a few bits from address, and use it as index to the space map table. - /// This constant is specially picked for the current heap range (HEAP_STRAT/HEAP_END), and the space size (MAX_SPACE_EXTENT). - /// If any of these changes, the test `test_address_arithmetic()` may fail, and this constant will need to be updated. - /// Currently our spaces are using address range 0x0000_0200_0000_0000 to 0x0000_2200_0000_0000 (with a maximum of 16 spaces). - /// When masked with this constant, the index is 1 to 16. If we mask any arbitrary address with this mask, we will get 0 to 31 (32 entries). - pub const ADDRESS_MASK: usize = 0x0000_3f00_0000_0000usize; - /// The table size for the space map. - pub const TABLE_SIZE: usize = Self::addr_to_index(Address::MAX) + 1; - /// Create a new space map. #[allow(clippy::assertions_on_constants)] // We assert to make sure the constants pub fn new() -> Self { - debug_assert!( - Self::TABLE_SIZE >= crate::util::heap::layout::heap_parameters::MAX_SPACES - ); + let table_size = Self::addr_to_index(Address::MAX) + 1; + debug_assert!(table_size >= crate::util::heap::layout::heap_parameters::MAX_SPACES); Self { - sft: UnsafeCell::new(vec![&EMPTY_SPACE_SFT; Self::TABLE_SIZE]), + sft: UnsafeCell::new(vec![&EMPTY_SPACE_SFT; table_size]), } } - const fn addr_to_index(addr: Address) -> usize { - addr.and(Self::ADDRESS_MASK) >> LOG_SPACE_EXTENT + fn addr_to_index(addr: Address) -> usize { + addr.and(vm_layout().address_mask()) >> vm_layout().log_space_extent } - const fn index_to_space_start(i: usize) -> Address { + fn index_to_space_start(i: usize) -> Address { let (start, _) = Self::index_to_space_range(i); start } - const fn index_to_space_range(i: usize) -> (Address, Address) { + fn index_to_space_range(i: usize) -> (Address, Address) { if i == 0 { panic!("Invalid index: there is no space for index 0") } else { - ( - HEAP_START.add((i - 1) << LOG_SPACE_EXTENT), - HEAP_START.add(i << LOG_SPACE_EXTENT), - ) + let start = Address::ZERO.add(i << vm_layout().log_space_extent); + let extent = 1 << vm_layout().log_space_extent; + (start, start.add(extent)) } } } @@ -189,7 +182,7 @@ mod space_map { mod tests { use super::*; use crate::util::heap::layout::heap_parameters::MAX_SPACES; - use crate::util::heap::layout::vm_layout_constants::{HEAP_END, HEAP_START}; + use crate::util::heap::layout::vm_layout::vm_layout; // If the test `test_address_arithmetic()` fails, it is possible due to change of our heap range, max space extent, or max number of spaces. // We need to update the code and the constants for the address arithemtic. @@ -197,7 +190,7 @@ mod space_map { fn test_address_arithmetic() { // Before 1st space assert_eq!(SFTSpaceMap::addr_to_index(Address::ZERO), 0); - assert_eq!(SFTSpaceMap::addr_to_index(HEAP_START - 1), 0); + assert_eq!(SFTSpaceMap::addr_to_index(vm_layout().heap_start - 1), 0); let assert_for_index = |i: usize| { let (start, end) = SFTSpaceMap::index_to_space_range(i); @@ -214,8 +207,8 @@ mod space_map { // assert space end let (_, last_space_end) = SFTSpaceMap::index_to_space_range(MAX_SPACES); println!("Space end = {}", last_space_end); - println!("Heap end = {}", HEAP_END); - assert_eq!(last_space_end, HEAP_END); + println!("Heap end = {}", vm_layout().heap_end); + assert_eq!(last_space_end, vm_layout().heap_end); // after last space assert_eq!(SFTSpaceMap::addr_to_index(last_space_end), 17); @@ -228,7 +221,7 @@ mod space_map { mod dense_chunk_map { use super::*; use crate::util::conversions; - use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; + use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; use crate::util::metadata::side_metadata::spec_defs::SFT_DENSE_CHUNK_MAP_INDEX; use crate::util::metadata::side_metadata::*; use std::cell::UnsafeCell; @@ -381,8 +374,8 @@ mod sparse_chunk_map { use super::*; use crate::util::conversions; use crate::util::conversions::*; - use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; - use crate::util::heap::layout::vm_layout_constants::MAX_CHUNKS; + use crate::util::heap::layout::vm_layout::vm_layout; + use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; /// The chunk map is a sparse table. It has one entry for each chunk in the address space we may use. pub struct SFTSparseChunkMap { @@ -393,7 +386,7 @@ mod sparse_chunk_map { impl SFTMap for SFTSparseChunkMap { fn has_sft_entry(&self, addr: Address) -> bool { - addr.chunk_index() < MAX_CHUNKS + addr.chunk_index() < vm_layout().max_chunks() } fn get_side_metadata(&self) -> Option<&SideMetadataSpec> { @@ -453,7 +446,7 @@ mod sparse_chunk_map { impl SFTSparseChunkMap { pub fn new() -> Self { SFTSparseChunkMap { - sft: UnsafeCell::new(vec![&EMPTY_SPACE_SFT; MAX_CHUNKS]), + sft: UnsafeCell::new(vec![&EMPTY_SPACE_SFT; vm_layout().max_chunks()]), } } diff --git a/src/policy/space.rs b/src/policy/space.rs index 2bd2df0e57..f8004fba41 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -7,8 +7,7 @@ use crate::util::metadata::side_metadata::{ use crate::util::Address; use crate::util::ObjectReference; -use crate::util::heap::layout::vm_layout_constants::{AVAILABLE_BYTES, LOG_BYTES_IN_CHUNK}; -use crate::util::heap::layout::vm_layout_constants::{AVAILABLE_END, AVAILABLE_START}; +use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK}; use crate::util::heap::{PageResource, VMRequest}; use crate::util::options::Options; use crate::vm::{ActivePlan, Collection}; @@ -23,7 +22,7 @@ use crate::policy::sft::EMPTY_SFT_NAME; use crate::policy::sft::SFT; use crate::util::copy::*; use crate::util::heap::gc_trigger::GCTrigger; -use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; +use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; use crate::util::heap::layout::Mmapper; use crate::util::heap::layout::VMMap; use crate::util::heap::space_descriptor::SpaceDescriptor; @@ -613,10 +612,10 @@ impl CommonSpace { } fn get_frac_available(frac: f32) -> usize { - trace!("AVAILABLE_START={}", AVAILABLE_START); - trace!("AVAILABLE_END={}", AVAILABLE_END); - let bytes = (frac * AVAILABLE_BYTES as f32) as usize; - trace!("bytes={}*{}={}", frac, AVAILABLE_BYTES, bytes); + trace!("AVAILABLE_START={}", vm_layout().available_start()); + trace!("AVAILABLE_END={}", vm_layout().available_end()); + let bytes = (frac * vm_layout().available_bytes() as f32) as usize; + trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes); let mb = bytes >> LOG_BYTES_IN_MBYTE; let rtn = mb << LOG_BYTES_IN_MBYTE; trace!("rtn={}", rtn); diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index 387e0ef116..4d26460966 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -145,7 +145,7 @@ impl VMSpace { args: &mut CreateSpecificPlanArgs, location: Option<(Address, usize)>, ) -> ImmortalSpace { - use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; + use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; // If the location of the VM space is not supplied, find them in the options. let (vm_space_start, vm_space_bytes) = location.unwrap_or(( diff --git a/src/util/address.rs b/src/util/address.rs index abd3674de6..c67578c3ec 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -277,7 +277,7 @@ impl Address { } /// is this address aligned to the given alignment - pub fn is_aligned_to(self, align: usize) -> bool { + pub const fn is_aligned_to(self, align: usize) -> bool { use crate::util::conversions; conversions::raw_is_aligned(self.0, align) } diff --git a/src/util/constants.rs b/src/util/constants.rs index 20f36a12fb..fd30fd1830 100644 --- a/src/util/constants.rs +++ b/src/util/constants.rs @@ -16,6 +16,9 @@ pub const BYTES_IN_BYTE: usize = 1; pub const LOG_BITS_IN_BYTE: u8 = 3; pub const BITS_IN_BYTE: usize = 1 << LOG_BITS_IN_BYTE; +pub const LOG_BYTES_IN_GBYTE: u8 = 30; +pub const BYTES_IN_GBYTE: usize = 1 << LOG_BYTES_IN_GBYTE; + pub const LOG_BYTES_IN_MBYTE: u8 = 20; pub const BYTES_IN_MBYTE: usize = 1 << LOG_BYTES_IN_MBYTE; diff --git a/src/util/conversions.rs b/src/util/conversions.rs index 988b711a8a..5121bf05c1 100644 --- a/src/util/conversions.rs +++ b/src/util/conversions.rs @@ -1,5 +1,5 @@ use crate::util::constants::*; -use crate::util::heap::layout::vm_layout_constants::*; +use crate::util::heap::layout::vm_layout::*; use crate::util::Address; /* Alignment */ diff --git a/src/util/heap/blockpageresource.rs b/src/util/heap/blockpageresource.rs index 5cf15eb9b1..5b3ffbc2f2 100644 --- a/src/util/heap/blockpageresource.rs +++ b/src/util/heap/blockpageresource.rs @@ -2,7 +2,7 @@ use super::pageresource::{PRAllocFail, PRAllocResult}; use super::{FreeListPageResource, PageResource}; use crate::util::address::Address; use crate::util::constants::*; -use crate::util::heap::layout::vm_layout_constants::*; +use crate::util::heap::layout::vm_layout::*; use crate::util::heap::layout::VMMap; use crate::util::heap::pageresource::CommonPageResource; use crate::util::heap::space_descriptor::SpaceDescriptor; @@ -161,7 +161,6 @@ impl BlockPageResource { } pub fn release_block(&self, block: B) { - debug_assert!(self.common().contiguous); let pages = 1 << Self::LOG_PAGES; debug_assert!(pages as usize <= self.common().accounting.get_committed_pages()); self.common().accounting.release(pages as _); diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index 72f978348a..26912b3f89 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -13,7 +13,7 @@ use std::ops::Range; pub struct Chunk(Address); impl Region for Chunk { - const LOG_BYTES: usize = crate::util::heap::layout::vm_layout_constants::LOG_BYTES_IN_CHUNK; + const LOG_BYTES: usize = crate::util::heap::layout::vm_layout::LOG_BYTES_IN_CHUNK; fn from_aligned_address(address: Address) -> Self { debug_assert!(address.is_aligned_to(Self::BYTES)); diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index 9340a293ff..12173e0add 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -1,7 +1,7 @@ use std::ops::{Deref, DerefMut}; use std::sync::{Mutex, MutexGuard}; -use super::layout::vm_layout_constants::{PAGES_IN_CHUNK, PAGES_IN_SPACE64}; +use super::layout::vm_layout::PAGES_IN_CHUNK; use super::layout::VMMap; use super::pageresource::{PRAllocFail, PRAllocResult}; use super::PageResource; @@ -11,7 +11,7 @@ use crate::util::alloc::embedded_meta_data::*; use crate::util::conversions; use crate::util::freelist; use crate::util::freelist::FreeList; -use crate::util::heap::layout::vm_layout_constants::*; +use crate::util::heap::layout::vm_layout::*; use crate::util::heap::pageresource::CommonPageResource; use crate::util::heap::space_descriptor::SpaceDescriptor; use crate::util::memory; @@ -82,7 +82,7 @@ impl PageResource for FreeListPageResource { .saturating_sub(self.common.vm_map.get_chunk_consumer_count()); rtn += chunks * PAGES_IN_CHUNK; } else if self.common.growable && cfg!(target_pointer_width = "64") { - rtn = PAGES_IN_SPACE64 - self.reserved_pages(); + rtn = vm_layout().pages_in_space64() - self.reserved_pages(); } rtn @@ -177,7 +177,7 @@ impl FreeListPageResource { pub fn new_discontiguous(vm_map: &'static dyn VMMap) -> Self { let common_flpr = { - let start = AVAILABLE_START; + let start = vm_layout().available_start(); let common_flpr = Box::new(CommonFreeListPageResource { free_list: vm_map.create_freelist(start), start, diff --git a/src/util/heap/heap_meta.rs b/src/util/heap/heap_meta.rs index 2c7c84e18b..179d95a904 100644 --- a/src/util/heap/heap_meta.rs +++ b/src/util/heap/heap_meta.rs @@ -1,4 +1,4 @@ -use crate::util::heap::layout::vm_layout_constants::{HEAP_END, HEAP_START}; +use crate::util::heap::layout::vm_layout::vm_layout; use crate::util::Address; pub struct HeapMeta { @@ -9,8 +9,8 @@ pub struct HeapMeta { impl HeapMeta { pub fn new() -> Self { HeapMeta { - heap_cursor: HEAP_START, - heap_limit: HEAP_END, + heap_cursor: vm_layout().heap_start, + heap_limit: vm_layout().heap_end, } } diff --git a/src/util/heap/layout/byte_map_mmapper.rs b/src/util/heap/layout/byte_map_mmapper.rs index 147c24439c..b3b70b6d6f 100644 --- a/src/util/heap/layout/byte_map_mmapper.rs +++ b/src/util/heap/layout/byte_map_mmapper.rs @@ -4,7 +4,7 @@ use crate::util::Address; use crate::util::constants::*; use crate::util::conversions::pages_to_bytes; -use crate::util::heap::layout::vm_layout_constants::*; +use crate::util::heap::layout::vm_layout::*; use crate::util::memory::MmapStrategy; use std::fmt; use std::sync::atomic::Ordering; @@ -179,7 +179,7 @@ mod tests { use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::conversions::pages_to_bytes; use crate::util::heap::layout::mmapper::MapState; - use crate::util::heap::layout::vm_layout_constants::MMAP_CHUNK_BYTES; + use crate::util::heap::layout::vm_layout::MMAP_CHUNK_BYTES; use crate::util::memory; use crate::util::test_util::BYTE_MAP_MMAPPER_TEST_REGION; use crate::util::test_util::{serial_test, with_cleanup}; diff --git a/src/util/heap/layout/fragmented_mapper.rs b/src/util/heap/layout/fragmented_mapper.rs index b3338dc1ac..3630f84cc2 100644 --- a/src/util/heap/layout/fragmented_mapper.rs +++ b/src/util/heap/layout/fragmented_mapper.rs @@ -2,7 +2,7 @@ use super::mmapper::MapState; use super::Mmapper; use crate::util::constants::BYTES_IN_PAGE; use crate::util::conversions; -use crate::util::heap::layout::vm_layout_constants::*; +use crate::util::heap::layout::vm_layout::*; use crate::util::memory::MmapStrategy; use crate::util::Address; use atomic::{Atomic, Ordering}; @@ -367,7 +367,7 @@ impl Default for FragmentedMapper { mod tests { use super::*; use crate::util::constants::LOG_BYTES_IN_PAGE; - use crate::util::heap::layout::vm_layout_constants::MMAP_CHUNK_BYTES; + use crate::util::heap::layout::vm_layout::MMAP_CHUNK_BYTES; use crate::util::memory; use crate::util::test_util::FRAGMENTED_MMAPPER_TEST_REGION; use crate::util::test_util::{serial_test, with_cleanup}; diff --git a/src/util/heap/layout/heap_parameters.rs b/src/util/heap/layout/heap_parameters.rs index 63dcb75e0c..e2788c6f2b 100644 --- a/src/util/heap/layout/heap_parameters.rs +++ b/src/util/heap/layout/heap_parameters.rs @@ -7,11 +7,3 @@ pub const LOG_MAX_SPACES: usize = 4; * Maximum number of spaces a Plan can support. */ pub const MAX_SPACES: usize = 1 << LOG_MAX_SPACES; - -/** - * In a 64-bit addressing model, each space is the same size, given - * by this constant. At the moment, we require that the number of - * pages in a space fit into a 32-bit signed int, so the maximum - * size of this constant is 41 (assuming 4k pages). - */ -pub const LOG_SPACE_SIZE_64: usize = 41; diff --git a/src/util/heap/layout/map32.rs b/src/util/heap/layout/map32.rs index bb80139a48..d25e0a55f7 100644 --- a/src/util/heap/layout/map32.rs +++ b/src/util/heap/layout/map32.rs @@ -4,7 +4,7 @@ use crate::util::conversions; use crate::util::freelist::FreeList; use crate::util::heap::freelistpageresource::CommonFreeListPageResource; use crate::util::heap::layout::heap_parameters::*; -use crate::util::heap::layout::vm_layout_constants::*; +use crate::util::heap::layout::vm_layout::*; use crate::util::heap::space_descriptor::SpaceDescriptor; use crate::util::int_array_freelist::IntArrayFreeList; use crate::util::Address; @@ -32,17 +32,18 @@ pub struct Map32 { impl Map32 { pub fn new() -> Self { + let max_chunks = vm_layout().max_chunks(); Map32 { - prev_link: vec![0; MAX_CHUNKS], - next_link: vec![0; MAX_CHUNKS], - region_map: IntArrayFreeList::new(MAX_CHUNKS, MAX_CHUNKS as _, 1), + prev_link: vec![0; max_chunks], + next_link: vec![0; max_chunks], + region_map: IntArrayFreeList::new(max_chunks, max_chunks as _, 1), global_page_map: IntArrayFreeList::new(1, 1, MAX_SPACES), shared_discontig_fl_count: 0, shared_fl_map: vec![None; MAX_SPACES], total_available_discontiguous_chunks: 0, finalized: false, sync: Mutex::new(()), - descriptor_map: vec![SpaceDescriptor::UNINITIALIZED; MAX_CHUNKS], + descriptor_map: vec![SpaceDescriptor::UNINITIALIZED; max_chunks], cumulative_committed_pages: AtomicUsize::new(0), } } @@ -186,7 +187,7 @@ impl VMMap for Map32 { let first_chunk = start_address.chunk_index(); let last_chunk = to.chunk_index(); let unavail_start_chunk = last_chunk + 1; - let trailing_chunks = MAX_CHUNKS - unavail_start_chunk; + let trailing_chunks = vm_layout().max_chunks() - unavail_start_chunk; let pages = (1 + last_chunk - first_chunk) * PAGES_IN_CHUNK; // start_address=0xb0000000, first_chunk=704, last_chunk=703, unavail_start_chunk=704, trailing_chunks=320, pages=0 // startAddress=0x68000000 firstChunk=416 lastChunk=703 unavailStartChunk=704 trailingChunks=320 pages=294912 diff --git a/src/util/heap/layout/map64.rs b/src/util/heap/layout/map64.rs index 025f683392..aa04f406cf 100644 --- a/src/util/heap/layout/map64.rs +++ b/src/util/heap/layout/map64.rs @@ -4,7 +4,7 @@ use crate::util::conversions; use crate::util::freelist::FreeList; use crate::util::heap::freelistpageresource::CommonFreeListPageResource; use crate::util::heap::layout::heap_parameters::*; -use crate::util::heap::layout::vm_layout_constants::*; +use crate::util::heap::layout::vm_layout::*; use crate::util::heap::space_descriptor::SpaceDescriptor; use crate::util::memory::MmapStrategy; use crate::util::raw_memory_freelist::RawMemoryFreeList; @@ -35,7 +35,7 @@ impl Map64 { let mut base_address = vec![Address::ZERO; MAX_SPACES]; for i in 0..MAX_SPACES { - let base = unsafe { Address::from_usize(i << LOG_SPACE_SIZE_64) }; + let base = unsafe { Address::from_usize(i << vm_layout().log_space_extent) }; high_water[i] = base; base_address[i] = base; } @@ -46,7 +46,7 @@ impl Map64 { // elide the storing of 0 for each of the element. Using standard vector creation, // such as `vec![SpaceDescriptor::UNINITIALIZED; MAX_CHUNKS]`, will cause severe // slowdown during start-up. - descriptor_map: unsafe { new_zeroed_vec::(MAX_CHUNKS) }, + descriptor_map: unsafe { new_zeroed_vec::(vm_layout().max_chunks()) }, high_water, base_address, fl_page_resources: vec![None; MAX_SPACES], @@ -60,7 +60,7 @@ impl Map64 { impl VMMap for Map64 { fn insert(&self, start: Address, extent: usize, descriptor: SpaceDescriptor) { debug_assert!(Self::is_space_start(start)); - debug_assert!(extent <= SPACE_SIZE_64); + debug_assert!(extent <= vm_layout().space_size_64()); // Each space will call this on exclusive address ranges. It is fine to mutate the descriptor map, // as each space will update different indices. let self_mut = unsafe { self.mut_self() }; @@ -69,7 +69,7 @@ impl VMMap for Map64 { } fn create_freelist(&self, start: Address) -> Box { - let units = SPACE_SIZE_64 >> LOG_BYTES_IN_PAGE; + let units = vm_layout().space_size_64() >> LOG_BYTES_IN_PAGE; self.create_parent_freelist(start, units, units as _) } @@ -234,14 +234,14 @@ impl Map64 { } fn space_index(addr: Address) -> Option { - if addr > HEAP_END { + if addr > vm_layout().heap_end { return None; } - Some(addr >> SPACE_SHIFT_64) + Some(addr >> vm_layout().space_shift_64()) } fn is_space_start(base: Address) -> bool { - (base & !SPACE_MASK_64) == 0 + (base & !vm_layout().space_mask_64()) == 0 } } diff --git a/src/util/heap/layout/mmapper.rs b/src/util/heap/layout/mmapper.rs index 03c429addf..cee9df26e4 100644 --- a/src/util/heap/layout/mmapper.rs +++ b/src/util/heap/layout/mmapper.rs @@ -1,4 +1,4 @@ -use crate::util::heap::layout::vm_layout_constants::*; +use crate::util::heap::layout::vm_layout::*; use crate::util::memory::*; use crate::util::rust_util::rev_group::RevisitableGroupByForIterator; use crate::util::Address; diff --git a/src/util/heap/layout/mod.rs b/src/util/heap/layout/mod.rs index f8fd768dbe..e303d3f749 100644 --- a/src/util/heap/layout/mod.rs +++ b/src/util/heap/layout/mod.rs @@ -1,5 +1,5 @@ pub mod heap_parameters; -pub mod vm_layout_constants; +pub mod vm_layout; mod mmapper; pub use self::mmapper::Mmapper; @@ -9,6 +9,7 @@ mod fragmented_mapper; mod map; pub use self::map::VMMap; +use self::vm_layout::vm_layout; mod map32; #[cfg(target_pointer_width = "64")] mod map64; @@ -20,8 +21,11 @@ pub fn create_vm_map() -> Box { #[cfg(target_pointer_width = "64")] pub fn create_vm_map() -> Box { - // TODO: Map32 for compressed pointers - Box::new(map64::Map64::new()) + if !vm_layout().force_use_contiguous_spaces { + Box::new(map32::Map32::new()) + } else { + Box::new(map64::Map64::new()) + } } #[cfg(target_pointer_width = "32")] @@ -42,12 +46,12 @@ use std::ops::Range; /// Heap range include the availble range, but may include some address ranges /// that we count as part of the heap but we do not allocate into, such as /// VM spaces. However, currently, heap range is the same as available range. -pub const fn heap_range() -> Range
{ - vm_layout_constants::HEAP_START..vm_layout_constants::HEAP_END +pub fn heap_range() -> Range
{ + vm_layout().heap_start..vm_layout().heap_end } /// The avialable heap range between AVAILABLE_START and AVAILABLE_END. /// Available range is what MMTk may allocate into. -pub const fn available_range() -> Range
{ - vm_layout_constants::AVAILABLE_START..vm_layout_constants::AVAILABLE_END +pub fn available_range() -> Range
{ + vm_layout().available_start()..vm_layout().available_end() } diff --git a/src/util/heap/layout/vm_layout.rs b/src/util/heap/layout/vm_layout.rs new file mode 100644 index 0000000000..ddf4472a5a --- /dev/null +++ b/src/util/heap/layout/vm_layout.rs @@ -0,0 +1,197 @@ +use std::sync::atomic::AtomicBool; + +use atomic::Ordering; + +use super::heap_parameters::*; +use crate::util::constants::*; +use crate::util::Address; + +use crate::util::conversions::{chunk_align_down, chunk_align_up}; + +/** + * log_2 of the coarsest unit of address space allocation. + * + * In the 32-bit VM layout, this determines the granularity of + * allocation in a discontigouous space. In the 64-bit layout, + * this determines the growth factor of the large contiguous spaces + * that we provide. + */ +pub const LOG_BYTES_IN_CHUNK: usize = 22; + +/** Coarsest unit of address space allocation. */ +pub const BYTES_IN_CHUNK: usize = 1 << LOG_BYTES_IN_CHUNK; +pub const CHUNK_MASK: usize = (1 << LOG_BYTES_IN_CHUNK) - 1; + +/** Coarsest unit of address space allocation, in pages */ +pub const PAGES_IN_CHUNK: usize = 1 << (LOG_BYTES_IN_CHUNK - LOG_BYTES_IN_PAGE as usize); + +/** Granularity at which we map and unmap virtual address space in the heap */ +pub const LOG_MMAP_CHUNK_BYTES: usize = LOG_BYTES_IN_CHUNK; + +pub const MMAP_CHUNK_BYTES: usize = 1 << LOG_MMAP_CHUNK_BYTES; + +/// Runtime-initialized virtual memory constants +#[derive(Clone, Debug)] +pub struct VMLayout { + /// log_2 of the addressable heap virtual space. + pub log_address_space: usize, + /// Lowest virtual address used by the virtual machine. Should be chunk aligned. + pub heap_start: Address, + /// Highest virtual address used by the virtual machine. Should be chunk aligned. + pub heap_end: Address, + /// An upper bound on the extent of any space in the + /// current memory layout + pub log_space_extent: usize, + /// Should mmtk enable contiguous spaces and virtual memory for all spaces? + /// For normal 64-bit config, this should be set to true. Each space should own a contiguous piece of virtual memory. + /// For 32-bit or 64-bit compressed heap, we don't have enough virtual memory, so this should be set to false. + pub force_use_contiguous_spaces: bool, +} + +impl VMLayout { + #[cfg(target_pointer_width = "32")] + pub const LOG_ARCH_ADDRESS_SPACE: usize = 32; + #[cfg(target_pointer_width = "64")] + pub const LOG_ARCH_ADDRESS_SPACE: usize = 47; + /// An upper bound on the extent of any space in the + /// current memory layout + pub const fn max_space_extent(&self) -> usize { + 1 << self.log_space_extent + } + /// Lowest virtual address available for MMTk to manage. + pub const fn available_start(&self) -> Address { + self.heap_start + } + /// Highest virtual address available for MMTk to manage. + pub const fn available_end(&self) -> Address { + self.heap_end + } + /// Size of the address space available to the MMTk heap. + pub const fn available_bytes(&self) -> usize { + self.available_end().get_extent(self.available_start()) + } + /// Maximum number of chunks we need to track. Only used in 32-bit layout. + pub const fn max_chunks(&self) -> usize { + 1 << self.log_max_chunks() + } + /// log_2 of the maximum number of chunks we need to track. Only used in 32-bit layout. + pub const fn log_max_chunks(&self) -> usize { + Self::LOG_ARCH_ADDRESS_SPACE - LOG_BYTES_IN_CHUNK + } + /// Number of bits to shift a space index into/out of a virtual address. + /// In a 32-bit model, use a dummy value so that the compiler doesn't barf. + pub(crate) fn space_shift_64(&self) -> usize { + self.log_space_extent + } + /// Bitwise mask to isolate a space index in a virtual address. + /// We can't express this constant in a 32-bit environment, hence the + /// conditional definition. + pub(crate) fn space_mask_64(&self) -> usize { + ((1 << LOG_MAX_SPACES) - 1) << self.space_shift_64() + } + /// Size of each space in the 64-bit memory layout + /// We can't express this constant in a 32-bit environment, hence the + /// conditional definition. + /// FIXME: When Compiling for 32 bits this expression makes no sense + pub(crate) fn space_size_64(&self) -> usize { + self.max_space_extent() + } + /// log_2 of the number of pages in a 64-bit space + pub(crate) fn log_pages_in_space64(&self) -> usize { + self.log_space_extent - LOG_BYTES_IN_PAGE as usize + } + /// The number of pages in a 64-bit space + pub(crate) fn pages_in_space64(&self) -> usize { + 1 << self.log_pages_in_space64() + } + + /// This mask extracts a few bits from address, and use it as index to the space map table. + /// When masked with this constant, the index is 1 to 16. If we mask any arbitrary address with this mask, we will get 0 to 31 (32 entries). + pub(crate) fn address_mask(&self) -> usize { + 0x1f << self.log_space_extent + } + + const fn validate(&self) { + assert!(self.heap_start.is_aligned_to(BYTES_IN_CHUNK)); + assert!(self.heap_end.is_aligned_to(BYTES_IN_CHUNK)); + assert!(self.heap_start.as_usize() < self.heap_end.as_usize()); + assert!(self.log_address_space <= Self::LOG_ARCH_ADDRESS_SPACE); + assert!(self.log_space_extent <= self.log_address_space); + if self.force_use_contiguous_spaces { + assert!(self.log_space_extent <= (self.log_address_space - LOG_MAX_SPACES)); + assert!(self.heap_start.is_aligned_to(self.max_space_extent())); + } + } +} + +impl VMLayout { + /// Normal 32-bit configuration + pub const fn new_32bit() -> Self { + let layout32 = Self { + log_address_space: 32, + heap_start: chunk_align_down(unsafe { Address::from_usize(0x8000_0000) }), + heap_end: chunk_align_up(unsafe { Address::from_usize(0xd000_0000) }), + log_space_extent: 31, + force_use_contiguous_spaces: false, + }; + layout32.validate(); + layout32 + } + /// Normal 64-bit configuration + #[cfg(target_pointer_width = "64")] + pub const fn new_64bit() -> Self { + let layout64 = Self { + log_address_space: 47, + heap_start: chunk_align_down(unsafe { + Address::from_usize(0x0000_0200_0000_0000usize) + }), + heap_end: chunk_align_up(unsafe { Address::from_usize(0x0000_2200_0000_0000usize) }), + log_space_extent: 41, + force_use_contiguous_spaces: true, + }; + layout64.validate(); + layout64 + } + + /// Custom VM layout constants. VM bindings may use this function for compressed or 39-bit heap support. + /// This function must be called before MMTk::new() + pub(crate) fn set_custom_vm_layout(constants: VMLayout) { + if cfg!(debug_assertions) { + assert!( + !VM_LAYOUT_FETCHED.load(Ordering::SeqCst), + "vm_layout is already been used before setup" + ); + } + constants.validate(); + unsafe { + VM_LAYOUT = constants; + } + } +} + +// Implement default so bindings can selectively change some parameters while using default for others. +impl std::default::Default for VMLayout { + #[cfg(target_pointer_width = "32")] + fn default() -> Self { + Self::new_32bit() + } + + #[cfg(target_pointer_width = "64")] + fn default() -> Self { + Self::new_64bit() + } +} + +#[cfg(target_pointer_width = "32")] +static mut VM_LAYOUT: VMLayout = VMLayout::new_32bit(); +#[cfg(target_pointer_width = "64")] +static mut VM_LAYOUT: VMLayout = VMLayout::new_64bit(); + +static VM_LAYOUT_FETCHED: AtomicBool = AtomicBool::new(false); + +pub fn vm_layout() -> &'static VMLayout { + if cfg!(debug_assertions) { + VM_LAYOUT_FETCHED.store(true, Ordering::SeqCst); + } + unsafe { &VM_LAYOUT } +} diff --git a/src/util/heap/layout/vm_layout_constants.rs b/src/util/heap/layout/vm_layout_constants.rs deleted file mode 100644 index 8b411b5f04..0000000000 --- a/src/util/heap/layout/vm_layout_constants.rs +++ /dev/null @@ -1,167 +0,0 @@ -use super::heap_parameters::*; -use crate::util::constants::*; -use crate::util::Address; - -use crate::util::conversions::{chunk_align_down, chunk_align_up}; - -/// log_2 of the addressable virtual space. -#[cfg(target_pointer_width = "64")] -// This used to be LOG_SPACE_SIZE_64 + LOG_MAX_SPACES (45). -// We increase this as we also use malloc which may give us addresses that is beyond 1 << 45. -// This affects how much address space we need to reserve for side metadata. -pub const LOG_ADDRESS_SPACE: usize = 47; -#[cfg(target_pointer_width = "32")] -pub const LOG_ADDRESS_SPACE: usize = 32; -/** - * log_2 of the coarsest unit of address space allocation. - * - * In the 32-bit VM layout, this determines the granularity of - * allocation in a discontigouous space. In the 64-bit layout, - * this determines the growth factor of the large contiguous spaces - * that we provide. - */ -pub const LOG_BYTES_IN_CHUNK: usize = 22; - -/** Coarsest unit of address space allocation. */ -pub const BYTES_IN_CHUNK: usize = 1 << LOG_BYTES_IN_CHUNK; -pub const CHUNK_MASK: usize = (1 << LOG_BYTES_IN_CHUNK) - 1; - -/** Coarsest unit of address space allocation, in pages */ -pub const PAGES_IN_CHUNK: usize = 1 << (LOG_BYTES_IN_CHUNK - LOG_BYTES_IN_PAGE as usize); - -/** log_2 of the maximum number of chunks we need to track. Only used in 32-bit layout.*/ -pub const LOG_MAX_CHUNKS: usize = LOG_ADDRESS_SPACE - LOG_BYTES_IN_CHUNK; - -/** Maximum number of chunks we need to track. Only used in 32-bit layout. */ -pub const MAX_CHUNKS: usize = 1 << LOG_MAX_CHUNKS; - -/** - * An upper bound on the extent of any space in the - * current memory layout - */ -#[cfg(target_pointer_width = "64")] -pub const LOG_SPACE_EXTENT: usize = LOG_SPACE_SIZE_64; -#[cfg(target_pointer_width = "32")] -pub const LOG_SPACE_EXTENT: usize = 31; - -/** - * An upper bound on the extent of any space in the - * current memory layout - */ -pub const MAX_SPACE_EXTENT: usize = 1 << LOG_SPACE_EXTENT; - -// FIXME: HEAP_START, HEAP_END are VM-dependent -/** Lowest virtual address used by the virtual machine */ -#[cfg(target_pointer_width = "32")] -pub const HEAP_START: Address = chunk_align_down(unsafe { Address::from_usize(0x8000_0000) }); -#[cfg(target_pointer_width = "64")] -pub const HEAP_START: Address = - chunk_align_down(unsafe { Address::from_usize(0x0000_0200_0000_0000usize) }); - -/** Highest virtual address used by the virtual machine */ -#[cfg(target_pointer_width = "32")] -pub const HEAP_END: Address = chunk_align_up(unsafe { Address::from_usize(0xd000_0000) }); -#[cfg(target_pointer_width = "64")] -pub const HEAP_END: Address = HEAP_START.add(1 << (LOG_MAX_SPACES + LOG_SPACE_EXTENT)); - -#[cfg(test)] -mod test_heap_range { - #[cfg(target_pointer_width = "64")] - #[test] - fn test_heap_end() { - use super::*; - // Just to ensure we know if the heap end is changed - assert_eq!( - HEAP_END, - chunk_align_up(unsafe { Address::from_usize(0x0000_2200_0000_0000usize) }) - ) - } -} - -/// vm-sapce size (currently only used by jikesrvm) -#[cfg(target_pointer_width = "32")] -pub const VM_SPACE_SIZE: usize = - chunk_align_up(unsafe { Address::from_usize(0x800_0000) }).as_usize(); -#[cfg(target_pointer_width = "64")] -pub const VM_SPACE_SIZE: usize = - chunk_align_up(unsafe { Address::from_usize(0xdc0_0000) }).as_usize(); - -// In Java MMTk, the virtual memory between HEAP_START and AVIALBE_START, and between AVAILABLE_END -// and HEAP_END, are VM spaces. -// For us, the address range for VM spaces is set by the runtime, and we do not know them -// as constants. At this point, our AVAILALBE_START is the same as HEAP_START, and our AVIALABLE_END -// is the same as HEAP_END. -// TOOD: We should decide if VM space is considered as part of our heap range, and remove either AVAILABLE_START/END, or HEAP_START/END. -// We can do either: -// 1. Our heap is what we use for MMTk. So VM spaces are not in our heap. Or -// 2. Our heap includes VM spaces, so its address range depends on the VM space range. - -/** - * Lowest virtual address available for MMTk to manage. - */ -pub const AVAILABLE_START: Address = HEAP_START; - -/** - * Highest virtual address available for MMTk to manage. -*/ -pub const AVAILABLE_END: Address = HEAP_END; - -/** Size of the address space available to the MMTk heap. */ -pub const AVAILABLE_BYTES: usize = AVAILABLE_END.get_extent(AVAILABLE_START); - -/** Granularity at which we map and unmap virtual address space in the heap */ -pub const LOG_MMAP_CHUNK_BYTES: usize = LOG_BYTES_IN_CHUNK; - -pub const MMAP_CHUNK_BYTES: usize = 1 << LOG_MMAP_CHUNK_BYTES; - -/** log_2 of the number of pages in a 64-bit space */ -pub const LOG_PAGES_IN_SPACE64: usize = LOG_SPACE_SIZE_64 - LOG_BYTES_IN_PAGE as usize; - -/** The number of pages in a 64-bit space */ -pub const PAGES_IN_SPACE64: usize = 1 << LOG_PAGES_IN_SPACE64; - -/* - * The 64-bit VM layout divides address space into LOG_MAX_SPACES (k) fixed size - * regions of size 2^n, aligned at 2^n byte boundaries. A virtual address can be - * subdivided into fields as follows - * - * 64 0 - * 00...0SSSSSaaaaaaaaaaa...aaaaaaaa - * - * The field 'S' identifies the space to which the address points. - */ - -/** - * Number of bits to shift a space index into/out of a virtual address. - */ -/* In a 32-bit model, use a dummy value so that the compiler doesn't barf. */ -#[cfg(target_pointer_width = "32")] -pub const SPACE_SHIFT_64: usize = 0; -#[cfg(target_pointer_width = "64")] -pub const SPACE_SHIFT_64: usize = LOG_SPACE_SIZE_64; - -/** - * Bitwise mask to isolate a space index in a virtual address. - * - * We can't express this constant in a 32-bit environment, hence the - * conditional definition. - */ -#[cfg(target_pointer_width = "32")] -pub const SPACE_MASK_64: usize = 0; -#[cfg(target_pointer_width = "64")] -pub const SPACE_MASK_64: usize = ((1 << LOG_MAX_SPACES) - 1) << SPACE_SHIFT_64; - -/* - * Size of each space in the 64-bit memory layout - * - * We can't express this constant in a 32-bit environment, hence the - * conditional definition. - */ -// FIXME: When Compiling for 32 bits this expression makes no sense -// #[allow(const_err)] -// pub const SPACE_SIZE_64: usize = if_then_else_usize!(HEAP_LAYOUT_64BIT, -// 1 << LOG_SPACE_SIZE_64, MAX_SPACE_EXTENT); -#[cfg(target_pointer_width = "64")] -pub const SPACE_SIZE_64: usize = 1 << LOG_SPACE_SIZE_64; -#[cfg(target_pointer_width = "32")] -pub const SPACE_SIZE_64: usize = MAX_SPACE_EXTENT; diff --git a/src/util/heap/mod.rs b/src/util/heap/mod.rs index 903d0c8476..a41a5a53e3 100644 --- a/src/util/heap/mod.rs +++ b/src/util/heap/mod.rs @@ -1,20 +1,21 @@ mod accounting; #[macro_use] -pub mod layout; -pub mod blockpageresource; -pub mod chunk_map; -pub mod freelistpageresource; -pub mod gc_trigger; +pub(crate) mod layout; +pub(crate) mod blockpageresource; +pub(crate) mod chunk_map; +pub(crate) mod freelistpageresource; +pub(crate) mod gc_trigger; mod heap_meta; -pub mod monotonepageresource; -pub mod pageresource; -pub mod space_descriptor; +pub(crate) mod monotonepageresource; +pub(crate) mod pageresource; +pub(crate) mod space_descriptor; mod vmrequest; -pub use self::accounting::PageAccounting; -pub use self::blockpageresource::BlockPageResource; -pub use self::freelistpageresource::FreeListPageResource; -pub use self::heap_meta::HeapMeta; -pub use self::monotonepageresource::MonotonePageResource; -pub use self::pageresource::PageResource; -pub use self::vmrequest::VMRequest; +pub(crate) use self::accounting::PageAccounting; +pub(crate) use self::blockpageresource::BlockPageResource; +pub(crate) use self::freelistpageresource::FreeListPageResource; +pub(crate) use self::heap_meta::HeapMeta; +pub use self::layout::vm_layout; +pub(crate) use self::monotonepageresource::MonotonePageResource; +pub(crate) use self::pageresource::PageResource; +pub(crate) use self::vmrequest::VMRequest; diff --git a/src/util/heap/monotonepageresource.rs b/src/util/heap/monotonepageresource.rs index 297c9e60f4..093b0198dc 100644 --- a/src/util/heap/monotonepageresource.rs +++ b/src/util/heap/monotonepageresource.rs @@ -1,11 +1,11 @@ -use super::layout::vm_layout_constants::{BYTES_IN_CHUNK, PAGES_IN_CHUNK}; +use super::layout::vm_layout::{BYTES_IN_CHUNK, PAGES_IN_CHUNK}; use crate::policy::space::required_chunks; use crate::util::address::Address; use crate::util::conversions::*; use std::sync::{Mutex, MutexGuard}; use crate::util::alloc::embedded_meta_data::*; -use crate::util::heap::layout::vm_layout_constants::LOG_BYTES_IN_CHUNK; +use crate::util::heap::layout::vm_layout::LOG_BYTES_IN_CHUNK; use crate::util::heap::pageresource::CommonPageResource; use crate::util::opaque_pointer::*; diff --git a/src/util/heap/space_descriptor.rs b/src/util/heap/space_descriptor.rs index a52e5161b4..4303eb9041 100644 --- a/src/util/heap/space_descriptor.rs +++ b/src/util/heap/space_descriptor.rs @@ -1,5 +1,5 @@ use crate::util::constants::*; -use crate::util::heap::layout::vm_layout_constants; +use crate::util::heap::layout::vm_layout::{self, vm_layout}; use crate::util::Address; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -11,18 +11,14 @@ const TYPE_CONTIGUOUS_HI: usize = 3; const TYPE_MASK: usize = (1 << TYPE_BITS) - 1; const SIZE_SHIFT: usize = TYPE_BITS; const SIZE_BITS: usize = 10; -#[cfg(target_pointer_width = "32")] const SIZE_MASK: usize = ((1 << SIZE_BITS) - 1) << SIZE_SHIFT; const EXPONENT_SHIFT: usize = SIZE_SHIFT + SIZE_BITS; const EXPONENT_BITS: usize = 5; -#[cfg(target_pointer_width = "32")] const EXPONENT_MASK: usize = ((1 << EXPONENT_BITS) - 1) << EXPONENT_SHIFT; const MANTISSA_SHIFT: usize = EXPONENT_SHIFT + EXPONENT_BITS; const MANTISSA_BITS: usize = 14; const BASE_EXPONENT: usize = BITS_IN_INT - MANTISSA_BITS; -// get_index() is only implemented for 64 bits -#[cfg(target_pointer_width = "64")] const INDEX_MASK: usize = !TYPE_MASK; const INDEX_SHIFT: usize = TYPE_BITS; @@ -36,12 +32,12 @@ impl SpaceDescriptor { pub const UNINITIALIZED: Self = SpaceDescriptor(0); pub fn create_descriptor_from_heap_range(start: Address, end: Address) -> SpaceDescriptor { - let top = end == vm_layout_constants::HEAP_END; - if cfg!(target_pointer_width = "64") { - let space_index = if start > vm_layout_constants::HEAP_END { + let top = end == vm_layout().heap_end; + if vm_layout().force_use_contiguous_spaces { + let space_index = if start > vm_layout().heap_end { ::std::usize::MAX } else { - start >> vm_layout_constants::SPACE_SHIFT_64 + start >> vm_layout().space_shift_64() }; return SpaceDescriptor( space_index << INDEX_SHIFT @@ -52,7 +48,7 @@ impl SpaceDescriptor { }), ); } - let chunks = (end - start) >> vm_layout_constants::LOG_BYTES_IN_CHUNK; + let chunks = (end - start) >> vm_layout::LOG_BYTES_IN_CHUNK; debug_assert!(!start.is_zero() && chunks > 0 && chunks < (1 << SIZE_BITS)); let mut tmp = start >> BASE_EXPONENT; let mut exponent = 0; @@ -94,14 +90,16 @@ impl SpaceDescriptor { (self.0 & TYPE_MASK) == TYPE_CONTIGUOUS_HI } - #[cfg(target_pointer_width = "64")] pub fn get_start(self) -> Address { - use crate::util::heap::layout::heap_parameters; - unsafe { Address::from_usize(self.get_index() << heap_parameters::LOG_SPACE_SIZE_64) } + if !vm_layout().force_use_contiguous_spaces { + // For 64-bit discontiguous space, use 32-bit start address + self.get_start_32() + } else { + unsafe { Address::from_usize(self.get_index() << vm_layout().log_space_extent) } + } } - #[cfg(target_pointer_width = "32")] - pub fn get_start(self) -> Address { + fn get_start_32(self) -> Address { debug_assert!(self.is_contiguous()); let descriptor = self.0; @@ -112,17 +110,25 @@ impl SpaceDescriptor { #[cfg(target_pointer_width = "64")] pub fn get_extent(self) -> usize { - vm_layout_constants::SPACE_SIZE_64 + if !vm_layout().force_use_contiguous_spaces { + // For 64-bit discontiguous space, use 32-bit extent + self.get_extent_32() + } else { + vm_layout().space_size_64() + } } #[cfg(target_pointer_width = "32")] pub fn get_extent(self) -> usize { + self.get_extent_32() + } + + fn get_extent_32(self) -> usize { debug_assert!(self.is_contiguous()); let chunks = (self.0 & SIZE_MASK) >> SIZE_SHIFT; - chunks << vm_layout_constants::LOG_BYTES_IN_CHUNK + chunks << vm_layout::LOG_BYTES_IN_CHUNK } - #[cfg(target_pointer_width = "64")] pub fn get_index(self) -> usize { (self.0 & INDEX_MASK) >> INDEX_SHIFT } @@ -131,7 +137,7 @@ impl SpaceDescriptor { #[cfg(test)] mod tests { use super::*; - use crate::util::heap::layout::vm_layout_constants::*; + use crate::util::heap::layout::vm_layout::*; #[test] fn create_discontiguous_descriptor() { @@ -151,15 +157,15 @@ mod tests { #[test] fn create_contiguous_descriptor_at_heap_start() { let d = SpaceDescriptor::create_descriptor_from_heap_range( - HEAP_START, - HEAP_START + TEST_SPACE_SIZE, + vm_layout().heap_start, + vm_layout().heap_start + TEST_SPACE_SIZE, ); assert!(!d.is_empty()); assert!(d.is_contiguous()); assert!(!d.is_contiguous_hi()); - assert_eq!(d.get_start(), HEAP_START); + assert_eq!(d.get_start(), vm_layout().heap_start); if cfg!(target_pointer_width = "64") { - assert_eq!(d.get_extent(), SPACE_SIZE_64); + assert_eq!(d.get_extent(), vm_layout().space_size_64()); } else { assert_eq!(d.get_extent(), TEST_SPACE_SIZE); } @@ -168,17 +174,17 @@ mod tests { #[test] fn create_contiguous_descriptor_in_heap() { let d = SpaceDescriptor::create_descriptor_from_heap_range( - HEAP_START + TEST_SPACE_SIZE, - HEAP_START + TEST_SPACE_SIZE * 2, + vm_layout().heap_start + TEST_SPACE_SIZE, + vm_layout().heap_start + TEST_SPACE_SIZE * 2, ); assert!(!d.is_empty()); assert!(d.is_contiguous()); assert!(!d.is_contiguous_hi()); if cfg!(target_pointer_width = "64") { - assert_eq!(d.get_start(), HEAP_START); - assert_eq!(d.get_extent(), SPACE_SIZE_64); + assert_eq!(d.get_start(), vm_layout().heap_start); + assert_eq!(d.get_extent(), vm_layout().space_size_64()); } else { - assert_eq!(d.get_start(), HEAP_START + TEST_SPACE_SIZE); + assert_eq!(d.get_start(), vm_layout().heap_start + TEST_SPACE_SIZE); assert_eq!(d.get_extent(), TEST_SPACE_SIZE); } } @@ -186,17 +192,20 @@ mod tests { #[test] fn create_contiguous_descriptor_at_heap_end() { let d = SpaceDescriptor::create_descriptor_from_heap_range( - HEAP_END - TEST_SPACE_SIZE, - HEAP_END, + vm_layout().heap_end - TEST_SPACE_SIZE, + vm_layout().heap_end, ); assert!(!d.is_empty()); assert!(d.is_contiguous()); assert!(d.is_contiguous_hi()); if cfg!(target_pointer_width = "64") { - assert_eq!(d.get_start(), HEAP_END - SPACE_SIZE_64); - assert_eq!(d.get_extent(), SPACE_SIZE_64); + assert_eq!( + d.get_start(), + vm_layout().heap_end - vm_layout().space_size_64() + ); + assert_eq!(d.get_extent(), vm_layout().space_size_64()); } else { - assert_eq!(d.get_start(), HEAP_END - TEST_SPACE_SIZE); + assert_eq!(d.get_start(), vm_layout().heap_end - TEST_SPACE_SIZE); assert_eq!(d.get_extent(), TEST_SPACE_SIZE); } } diff --git a/src/util/heap/vmrequest.rs b/src/util/heap/vmrequest.rs index e8adbd69d6..7a6f8148f8 100644 --- a/src/util/heap/vmrequest.rs +++ b/src/util/heap/vmrequest.rs @@ -1,4 +1,4 @@ -use super::layout::vm_layout_constants::*; +use super::layout::vm_layout::*; use crate::util::constants::*; use crate::util::Address; @@ -17,20 +17,20 @@ impl VMRequest { pub fn common64bit(top: bool) -> Self { VMRequest::Extent { - extent: MAX_SPACE_EXTENT, + extent: vm_layout().max_space_extent(), top, } } pub fn discontiguous() -> Self { - if cfg!(target_pointer_width = "64") { + if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { return Self::common64bit(false); } VMRequest::Discontiguous } pub fn fixed_size(mb: usize) -> Self { - if cfg!(target_pointer_width = "64") { + if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { return Self::common64bit(false); } VMRequest::Extent { @@ -40,14 +40,14 @@ impl VMRequest { } pub fn fraction(frac: f32) -> Self { - if cfg!(target_pointer_width = "64") { + if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { return Self::common64bit(false); } VMRequest::Fraction { frac, top: false } } pub fn high_fixed_size(mb: usize) -> Self { - if cfg!(target_pointer_width = "64") { + if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { return Self::common64bit(true); } VMRequest::Extent { @@ -57,7 +57,7 @@ impl VMRequest { } pub fn fixed_extent(extent: usize, top: bool) -> Self { - if cfg!(target_pointer_width = "64") { + if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { return Self::common64bit(top); } VMRequest::Extent { extent, top } diff --git a/src/util/metadata/side_metadata/constants.rs b/src/util/metadata/side_metadata/constants.rs index 5b39598b6d..eefd360078 100644 --- a/src/util/metadata/side_metadata/constants.rs +++ b/src/util/metadata/side_metadata/constants.rs @@ -1,6 +1,6 @@ +use crate::util::heap::layout::vm_layout::VMLayout; #[cfg(target_pointer_width = "32")] -use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; -use crate::util::heap::layout::vm_layout_constants::LOG_ADDRESS_SPACE; +use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; use crate::util::metadata::side_metadata::SideMetadataOffset; use crate::util::Address; @@ -50,7 +50,7 @@ pub(super) const LOG_LOCAL_SIDE_METADATA_WORST_CASE_RATIO: usize = 3; pub(super) const LOG_LOCAL_SIDE_METADATA_WORST_CASE_RATIO: usize = 1; pub const LOG_MAX_GLOBAL_SIDE_METADATA_SIZE: usize = - LOG_ADDRESS_SPACE - LOG_GLOBAL_SIDE_METADATA_WORST_CASE_RATIO; + VMLayout::LOG_ARCH_ADDRESS_SPACE - LOG_GLOBAL_SIDE_METADATA_WORST_CASE_RATIO; // TODO - we should check this limit somewhere // pub(crate) const LOG_MAX_LOCAL_SIDE_METADATA_SIZE: usize = // 1 << (LOG_ADDRESS_SPACE - LOG_LOCAL_SIDE_METADATA_WORST_CASE_RATIO); diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index 74b6c0e481..64144e3e84 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1,7 +1,7 @@ use super::*; use crate::util::constants::{BYTES_IN_PAGE, BYTES_IN_WORD, LOG_BITS_IN_BYTE}; use crate::util::conversions::raw_align_up; -use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK; +use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; use crate::util::memory; use crate::util::metadata::metadata_val_traits::*; #[cfg(feature = "vo_bit")] @@ -1292,7 +1292,7 @@ mod tests { assert_eq!(side_metadata.calculate_reserved_pages(1024), 16 + 1); } - use crate::util::heap::layout::vm_layout_constants; + use crate::util::heap::layout::vm_layout; use crate::util::test_util::{serial_test, with_cleanup}; use paste::paste; @@ -1315,7 +1315,7 @@ mod tests { let mut sanity = SideMetadataSanity::new(); sanity.verify_metadata_context("TestPolicy", &context); - let data_addr = vm_layout_constants::HEAP_START; + let data_addr = vm_layout::vm_layout().heap_start; let meta_addr = address_to_meta_address(&spec, data_addr); with_cleanup( || { diff --git a/src/util/metadata/side_metadata/helpers.rs b/src/util/metadata/side_metadata/helpers.rs index 8e059d3932..90dd496698 100644 --- a/src/util/metadata/side_metadata/helpers.rs +++ b/src/util/metadata/side_metadata/helpers.rs @@ -1,12 +1,10 @@ use super::SideMetadataSpec; use crate::util::constants::LOG_BYTES_IN_PAGE; +use crate::util::constants::{BITS_IN_WORD, BYTES_IN_PAGE, LOG_BITS_IN_BYTE}; +use crate::util::heap::layout::vm_layout::VMLayout; #[cfg(target_pointer_width = "32")] use crate::util::metadata::side_metadata::address_to_chunked_meta_address; use crate::util::Address; -use crate::util::{ - constants::{BITS_IN_WORD, BYTES_IN_PAGE, LOG_BITS_IN_BYTE}, - heap::layout::vm_layout_constants::LOG_ADDRESS_SPACE, -}; use crate::MMAPPER; use std::io::Result; @@ -119,7 +117,7 @@ pub(crate) const fn addr_rshift(metadata_spec: &SideMetadataSpec) -> i32 { #[allow(dead_code)] pub const fn metadata_address_range_size(metadata_spec: &SideMetadataSpec) -> usize { - 1usize << (LOG_ADDRESS_SPACE - addr_rshift(metadata_spec) as usize) + 1usize << (VMLayout::LOG_ARCH_ADDRESS_SPACE - addr_rshift(metadata_spec) as usize) } pub(crate) fn meta_byte_lshift(metadata_spec: &SideMetadataSpec, data_addr: Address) -> u8 { diff --git a/src/util/metadata/side_metadata/helpers_32.rs b/src/util/metadata/side_metadata/helpers_32.rs index 58294c9e6b..649ba01027 100644 --- a/src/util/metadata/side_metadata/helpers_32.rs +++ b/src/util/metadata/side_metadata/helpers_32.rs @@ -1,7 +1,7 @@ use super::SideMetadataSpec; use crate::util::{ constants::{self, LOG_BITS_IN_BYTE}, - heap::layout::vm_layout_constants::{BYTES_IN_CHUNK, CHUNK_MASK, LOG_BYTES_IN_CHUNK}, + heap::layout::vm_layout::{BYTES_IN_CHUNK, CHUNK_MASK, LOG_BYTES_IN_CHUNK}, memory, Address, }; use std::io::Result; diff --git a/src/util/metadata/side_metadata/sanity.rs b/src/util/metadata/side_metadata/sanity.rs index e2b92f696a..ae1377a10f 100644 --- a/src/util/metadata/side_metadata/sanity.rs +++ b/src/util/metadata/side_metadata/sanity.rs @@ -7,9 +7,11 @@ use super::constants::{ LOG_GLOBAL_SIDE_METADATA_WORST_CASE_RATIO, LOG_LOCAL_SIDE_METADATA_WORST_CASE_RATIO, }; use super::{SideMetadataContext, SideMetadataSpec}; -use crate::util::heap::layout::vm_layout_constants::LOG_ADDRESS_SPACE; +#[cfg(target_pointer_width = "64")] +use crate::util::heap::layout::vm_layout::vm_layout; +use crate::util::heap::layout::vm_layout::VMLayout; #[cfg(target_pointer_width = "32")] -use crate::util::heap::layout::vm_layout_constants::LOG_BYTES_IN_CHUNK; +use crate::util::heap::layout::vm_layout::LOG_BYTES_IN_CHUNK; /// An internal enum to enhance code style for add/sub #[cfg(feature = "extreme_assertions")] @@ -63,7 +65,9 @@ fn verify_global_specs_total_size(g_specs: &[SideMetadataSpec]) -> Result<()> { total_size += super::metadata_address_range_size(spec); } - if total_size <= 1usize << (LOG_ADDRESS_SPACE - LOG_GLOBAL_SIDE_METADATA_WORST_CASE_RATIO) { + if total_size + <= 1usize << (VMLayout::LOG_ARCH_ADDRESS_SPACE - LOG_GLOBAL_SIDE_METADATA_WORST_CASE_RATIO) + { Ok(()) } else { Err(Error::new( @@ -84,7 +88,8 @@ fn verify_global_specs_total_size(g_specs: &[SideMetadataSpec]) -> Result<()> { fn verify_local_specs_size(l_specs: &[SideMetadataSpec]) -> Result<()> { for spec in l_specs { if super::metadata_address_range_size(spec) - > 1usize << (LOG_ADDRESS_SPACE - LOG_LOCAL_SIDE_METADATA_WORST_CASE_RATIO) + > 1usize + << (VMLayout::LOG_ARCH_ADDRESS_SPACE - LOG_LOCAL_SIDE_METADATA_WORST_CASE_RATIO) { return Err(Error::new( ErrorKind::InvalidInput, @@ -367,12 +372,12 @@ impl SideMetadataSanity { /// 2. Check if metadata address is out of bounds. If this fails, we will panic. fn verify_metadata_address_bound(spec: &SideMetadataSpec, data_addr: Address) { #[cfg(target_pointer_width = "32")] - assert_eq!(LOG_ADDRESS_SPACE, 32, "We assume we use all address space in 32 bits. This seems not true any more, we need a proper check here."); + assert_eq!(VMLayout::LOG_ARCH_ADDRESS_SPACE, 32, "We assume we use all address space in 32 bits. This seems not true any more, we need a proper check here."); #[cfg(target_pointer_width = "32")] let data_addr_in_address_space = true; #[cfg(target_pointer_width = "64")] let data_addr_in_address_space = - data_addr <= unsafe { Address::from_usize(1usize << LOG_ADDRESS_SPACE) }; + data_addr <= unsafe { Address::from_usize(1usize << vm_layout().log_address_space) }; if !data_addr_in_address_space { warn!( diff --git a/src/util/metadata/side_metadata/side_metadata_tests.rs b/src/util/metadata/side_metadata/side_metadata_tests.rs index 4a07f188e3..69eeaabd5a 100644 --- a/src/util/metadata/side_metadata/side_metadata_tests.rs +++ b/src/util/metadata/side_metadata/side_metadata_tests.rs @@ -3,7 +3,8 @@ mod tests { use atomic::Ordering; use crate::util::constants; - use crate::util::heap::layout::vm_layout_constants; + use crate::util::heap::layout::vm_layout; + use crate::util::heap::layout::vm_layout::vm_layout; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::metadata::side_metadata::*; @@ -190,6 +191,7 @@ mod tests { #[test] fn test_side_metadata_try_mmap_metadata() { + let heap_start = vm_layout().heap_start; serial_test(|| { with_cleanup( || { @@ -228,25 +230,15 @@ mod tests { metadata_sanity.verify_metadata_context("NoPolicy", &metadata); assert!(metadata - .try_map_metadata_space( - vm_layout_constants::HEAP_START, - constants::BYTES_IN_PAGE, - ) + .try_map_metadata_space(heap_start, constants::BYTES_IN_PAGE,) .is_ok()); - gspec.assert_metadata_mapped(vm_layout_constants::HEAP_START); - lspec.assert_metadata_mapped(vm_layout_constants::HEAP_START); - gspec.assert_metadata_mapped( - vm_layout_constants::HEAP_START + constants::BYTES_IN_PAGE - 1, - ); - lspec.assert_metadata_mapped( - vm_layout_constants::HEAP_START + constants::BYTES_IN_PAGE - 1, - ); + gspec.assert_metadata_mapped(heap_start); + lspec.assert_metadata_mapped(heap_start); + gspec.assert_metadata_mapped(heap_start + constants::BYTES_IN_PAGE - 1); + lspec.assert_metadata_mapped(heap_start + constants::BYTES_IN_PAGE - 1); - metadata.ensure_unmap_metadata_space( - vm_layout_constants::HEAP_START, - constants::BYTES_IN_PAGE, - ); + metadata.ensure_unmap_metadata_space(heap_start, constants::BYTES_IN_PAGE); gspec.log_bytes_in_region = 4; gspec.log_num_of_bits = 4; @@ -265,29 +257,19 @@ mod tests { assert!(metadata .try_map_metadata_space( - vm_layout_constants::HEAP_START + vm_layout_constants::BYTES_IN_CHUNK, - vm_layout_constants::BYTES_IN_CHUNK, + heap_start + vm_layout::BYTES_IN_CHUNK, + vm_layout::BYTES_IN_CHUNK, ) .is_ok()); - gspec.assert_metadata_mapped( - vm_layout_constants::HEAP_START + vm_layout_constants::BYTES_IN_CHUNK, - ); - lspec.assert_metadata_mapped( - vm_layout_constants::HEAP_START + vm_layout_constants::BYTES_IN_CHUNK, - ); - gspec.assert_metadata_mapped( - vm_layout_constants::HEAP_START + vm_layout_constants::BYTES_IN_CHUNK * 2 - - 8, - ); - lspec.assert_metadata_mapped( - vm_layout_constants::HEAP_START + vm_layout_constants::BYTES_IN_CHUNK * 2 - - 16, - ); + gspec.assert_metadata_mapped(heap_start + vm_layout::BYTES_IN_CHUNK); + lspec.assert_metadata_mapped(heap_start + vm_layout::BYTES_IN_CHUNK); + gspec.assert_metadata_mapped(heap_start + vm_layout::BYTES_IN_CHUNK * 2 - 8); + lspec.assert_metadata_mapped(heap_start + vm_layout::BYTES_IN_CHUNK * 2 - 16); metadata.ensure_unmap_metadata_space( - vm_layout_constants::HEAP_START + vm_layout_constants::BYTES_IN_CHUNK, - vm_layout_constants::BYTES_IN_CHUNK, + heap_start + vm_layout::BYTES_IN_CHUNK, + vm_layout::BYTES_IN_CHUNK, ); }, || { @@ -304,7 +286,7 @@ mod tests { || { // We need to do this because of the static NO_METADATA // sanity::reset(); - let data_addr = vm_layout_constants::HEAP_START; + let data_addr = vm_layout().heap_start; let metadata_1_spec = SideMetadataSpec { name: "metadata_1_spec", @@ -379,8 +361,7 @@ mod tests { || { // We need to do this because of the static NO_METADATA // sanity::reset(); - let data_addr = vm_layout_constants::HEAP_START - + (vm_layout_constants::BYTES_IN_CHUNK << 1) * 2; + let data_addr = vm_layout().heap_start + (vm_layout::BYTES_IN_CHUNK << 1) * 2; let metadata_1_spec = SideMetadataSpec { name: "metadata_1_spec", @@ -436,8 +417,8 @@ mod tests { || { // We need to do this because of the static NO_METADATA // sanity::reset(); - let data_addr = vm_layout_constants::HEAP_START - + (vm_layout_constants::BYTES_IN_CHUNK << 1); + let data_addr = + vm_layout::vm_layout().heap_start + (vm_layout::BYTES_IN_CHUNK << 1); let metadata_1_spec = SideMetadataSpec { name: "metadata_1_spec", @@ -493,8 +474,7 @@ mod tests { || { // We need to do this because of the static NO_METADATA // sanity::reset(); - let data_addr = vm_layout_constants::HEAP_START - + (vm_layout_constants::BYTES_IN_CHUNK << 2); + let data_addr = vm_layout().heap_start + (vm_layout::BYTES_IN_CHUNK << 2); #[cfg(target_pointer_width = "64")] let metadata_1_spec = SideMetadataSpec { @@ -586,7 +566,7 @@ mod tests { serial_test(|| { with_cleanup( || { - let data_addr = vm_layout_constants::HEAP_START; + let data_addr = vm_layout::vm_layout().heap_start; // 1 bit per 8 bytes let spec = SideMetadataSpec { @@ -643,7 +623,7 @@ mod tests { serial_test(|| { with_cleanup( || { - let data_addr = vm_layout_constants::HEAP_START; + let data_addr = vm_layout::vm_layout().heap_start; // 1 bit per 8 bytes let spec = SideMetadataSpec { @@ -739,7 +719,7 @@ mod tests { serial_test(|| { with_cleanup( || { - let data_addr = vm_layout_constants::HEAP_START; + let data_addr = vm_layout().heap_start; let log_num_of_bits = 0; let log_bytes_in_region = 3; diff --git a/src/util/metadata/side_metadata/spec_defs.rs b/src/util/metadata/side_metadata/spec_defs.rs index 150b28dc28..bcdf678b0d 100644 --- a/src/util/metadata/side_metadata/spec_defs.rs +++ b/src/util/metadata/side_metadata/spec_defs.rs @@ -1,5 +1,5 @@ use crate::util::constants::*; -use crate::util::heap::layout::vm_layout_constants::*; +use crate::util::heap::layout::vm_layout::*; use crate::util::linear_scan::Region; use crate::util::metadata::side_metadata::constants::{ GLOBAL_SIDE_METADATA_BASE_OFFSET, LOCAL_SIDE_METADATA_BASE_OFFSET, diff --git a/src/util/mod.rs b/src/util/mod.rs index a6e3c6ea9c..59d531ad55 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -40,7 +40,7 @@ pub(crate) mod erase_vm; /// Finalization implementation. pub(crate) mod finalizable_processor; /// Heap implementation, including page resource, mmapper, etc. -pub(crate) mod heap; +pub mod heap; #[cfg(feature = "is_mmtk_object")] pub mod is_mmtk_object; /// Logger initialization diff --git a/src/util/options.rs b/src/util/options.rs index 0019808824..9be6d20e18 100644 --- a/src/util/options.rs +++ b/src/util/options.rs @@ -7,6 +7,8 @@ use std::fmt::Debug; use std::str::FromStr; use strum_macros::EnumString; +use super::heap::vm_layout::vm_layout; + #[derive(Copy, Clone, EnumString, Debug)] pub enum NurseryZeroingOptions { Temporal, @@ -93,8 +95,9 @@ pub const NURSERY_SIZE: usize = 32 << LOG_BYTES_IN_MBYTE; pub const DEFAULT_MIN_NURSERY: usize = 2 << LOG_BYTES_IN_MBYTE; /// The default max nursery size. This does not affect the actual space we create as nursery. It is /// only used in the GC trigger check. +pub const DEFAULT_MAX_NURSERY_32: usize = 32 << LOG_BYTES_IN_MBYTE; #[cfg(target_pointer_width = "32")] -pub const DEFAULT_MAX_NURSERY: usize = 32 << LOG_BYTES_IN_MBYTE; +pub const DEFAULT_MAX_NURSERY: usize = DEFAULT_MAX_NURSERY_32; fn always_valid(_: &T) -> bool { true @@ -375,11 +378,11 @@ pub struct NurserySize { /// Minimum nursery size (in bytes) pub min: usize, /// Maximum nursery size (in bytes) - pub max: usize, + max: Option, } impl NurserySize { - pub fn new(kind: NurseryKind, value: usize) -> Self { + pub fn new(kind: NurseryKind, value: Option) -> Self { match kind { NurseryKind::Bounded => NurserySize { kind, @@ -388,7 +391,7 @@ impl NurserySize { }, NurseryKind::Fixed => NurserySize { kind, - min: value, + min: value.unwrap(), max: value, }, } @@ -405,7 +408,7 @@ impl NurserySize { let value = ns[1] .parse() .map_err(|_| String::from("Failed to parse size"))?; - Ok(NurserySize::new(kind, value)) + Ok(NurserySize::new(kind, Some(value))) } } @@ -420,12 +423,18 @@ impl FromStr for NurserySize { impl Options { /// Return upper bound of the nursery size (in number of bytes) pub fn get_max_nursery_bytes(&self) -> usize { - self.nursery.max + self.nursery.max.unwrap_or_else(|| { + if !vm_layout().force_use_contiguous_spaces { + DEFAULT_MAX_NURSERY_32 + } else { + DEFAULT_MAX_NURSERY + } + }) } /// Return upper bound of the nursery size (in number of pages) pub fn get_max_nursery_pages(&self) -> usize { - crate::util::conversions::bytes_to_pages_up(self.nursery.max) + crate::util::conversions::bytes_to_pages_up(self.get_max_nursery_bytes()) } /// Return lower bound of the nursery size (in number of bytes) @@ -452,6 +461,15 @@ impl GCTriggerSelector { const G: u64 = 1024 * Self::M; const T: u64 = 1024 * Self::G; + /// get max heap size + pub fn max_heap_size(&self) -> usize { + match self { + Self::FixedHeapSize(s) => *s, + Self::DynamicHeapSize(_, s) => *s, + _ => unreachable!("Cannot get max heap size"), + } + } + /// Parse a size representation, which could be a number to represents bytes, /// or a number with the suffix K/k/M/m/G/g. Return the byte number if it can be /// parsed properly, otherwise return an error string. @@ -657,8 +675,8 @@ options! { // Bounded nursery only controls the upper bound, whereas the size for a Fixed nursery controls // both the upper and lower bounds. The nursery size can be set like "Fixed:8192", for example, // to have a Fixed nursery size of 8192 bytes - nursery: NurserySize [env_var: true, command_line: true] [|v: &NurserySize| v.min > 0 && v.max > 0 && v.max >= v.min] - = NurserySize { kind: NurseryKind::Bounded, min: DEFAULT_MIN_NURSERY, max: DEFAULT_MAX_NURSERY }, + nursery: NurserySize [env_var: true, command_line: true] [|v: &NurserySize| v.min > 0 && v.max.map(|max| max > 0 && max >= v.min).unwrap_or(true)] + = NurserySize { kind: NurseryKind::Bounded, min: DEFAULT_MIN_NURSERY, max: None }, // Should a major GC be performed when a system GC is required? full_heap_system_gc: bool [env_var: true, command_line: true] [always_valid] = false, // Should we shrink/grow the heap to adjust to application working set? (not supported) @@ -686,7 +704,7 @@ options! { // The start of vmspace. vm_space_start: Address [env_var: true, command_line: true] [always_valid] = Address::ZERO, // The size of vmspace. - vm_space_size: usize [env_var: true, command_line: true] [|v: &usize| *v > 0] = usize::MAX, + vm_space_size: usize [env_var: true, command_line: true] [|v: &usize| *v > 0] = 0xdc0_0000, // Perf events to measure // Semicolons are used to separate events // Each event is in the format of event_name,pid,cpu (see man perf_event_open for what pid and cpu mean). diff --git a/src/util/test_util.rs b/src/util/test_util.rs index dff73e8461..8401d79c4d 100644 --- a/src/util/test_util.rs +++ b/src/util/test_util.rs @@ -1,5 +1,5 @@ use crate::util::address::{Address, ByteSize}; -use crate::util::heap::layout::vm_layout_constants::*; +use crate::util::heap::layout::vm_layout::*; use std::panic; use std::sync::mpsc; use std::sync::Mutex; @@ -26,10 +26,18 @@ impl MmapTestRegion { } // Make sure we use the address range before our heap start so we won't conflict with our heap range. -const_assert!( - TEST_ADDRESS.as_usize() - <= crate::util::heap::layout::vm_layout_constants::HEAP_START.as_usize() -); +#[cfg(test)] +mod test { + #[test] + fn verify_test_address() { + assert!( + super::TEST_ADDRESS.as_usize() + <= crate::util::heap::layout::vm_layout::vm_layout() + .heap_start + .as_usize() + ); + } +} // Test with an address that works for 32bits. #[cfg(target_os = "linux")] diff --git a/vmbindings/dummyvm/src/api.rs b/vmbindings/dummyvm/src/api.rs index 3a1b61971c..6a57295e08 100644 --- a/vmbindings/dummyvm/src/api.rs +++ b/vmbindings/dummyvm/src/api.rs @@ -7,6 +7,7 @@ use crate::SINGLETON; use libc::c_char; use mmtk::memory_manager; use mmtk::scheduler::{GCController, GCWorker}; +use mmtk::util::heap::vm_layout::VMLayout; use mmtk::util::opaque_pointer::*; use mmtk::util::{Address, ObjectReference}; use mmtk::AllocationSemantics; @@ -15,10 +16,18 @@ use std::ffi::CStr; use std::sync::atomic::Ordering; #[no_mangle] -pub extern "C" fn mmtk_init(heap_size: usize) { +pub fn mmtk_init(heap_size: usize) { + mmtk_init_with_layout(heap_size, None) +} + +#[no_mangle] +pub fn mmtk_init_with_layout(heap_size: usize, layout: Option) { // set heap size first { let mut builder = BUILDER.lock().unwrap(); + if let Some(layout) = layout { + builder.set_vm_layout(layout); + } let success = builder .options diff --git a/vmbindings/dummyvm/src/tests/fixtures/mod.rs b/vmbindings/dummyvm/src/tests/fixtures/mod.rs index 0d17e38b54..ccda59970f 100644 --- a/vmbindings/dummyvm/src/tests/fixtures/mod.rs +++ b/vmbindings/dummyvm/src/tests/fixtures/mod.rs @@ -186,3 +186,35 @@ impl FixtureContent for MutatorFixture { } unsafe impl Send for MutatorFixture {} + +use mmtk::util::heap::vm_layout::VMLayout; + +pub struct VMLayoutFixture { + pub mmtk: &'static MMTK, + pub mutator: *mut Mutator, +} + +impl VMLayoutFixture { + pub fn create_with_layout(layout: Option) -> Self { + const MB: usize = 1024 * 1024; + // 1MB heap + mmtk_init_with_layout(MB, layout); + mmtk_initialize_collection(VMThread::UNINITIALIZED); + // Make sure GC does not run during test. + mmtk_disable_collection(); + let handle = mmtk_bind_mutator(VMMutatorThread(VMThread::UNINITIALIZED)); + + VMLayoutFixture { + mmtk: &crate::SINGLETON, + mutator: handle, + } + } +} + +impl FixtureContent for VMLayoutFixture { + fn create() -> Self { + Self::create_with_layout(None::) + } +} + +unsafe impl Send for VMLayoutFixture {} diff --git a/vmbindings/dummyvm/src/tests/mod.rs b/vmbindings/dummyvm/src/tests/mod.rs index 25aae114d6..acc48a3eb5 100644 --- a/vmbindings/dummyvm/src/tests/mod.rs +++ b/vmbindings/dummyvm/src/tests/mod.rs @@ -25,3 +25,8 @@ mod malloc_api; #[cfg(feature = "malloc_counted_size")] mod malloc_counted; mod malloc_ms; +#[cfg(target_pointer_width = "64")] +mod vm_layout_compressed_pointer_64; +mod vm_layout_default; +mod vm_layout_heap_start; +mod vm_layout_log_address_space; diff --git a/vmbindings/dummyvm/src/tests/vm_layout_compressed_pointer_64.rs b/vmbindings/dummyvm/src/tests/vm_layout_compressed_pointer_64.rs new file mode 100644 index 0000000000..9abc782a41 --- /dev/null +++ b/vmbindings/dummyvm/src/tests/vm_layout_compressed_pointer_64.rs @@ -0,0 +1,33 @@ +// GITHUB-CI: MMTK_PLAN=all + +use mmtk::util::conversions::*; +use mmtk::util::heap::vm_layout::VMLayout; +use mmtk::util::Address; + +use crate::tests::vm_layout_default::test_with_vm_layout; + +// This test only run on 64bits. + +#[test] +fn test_vm_layout_compressed_pointer() { + let start = if cfg!(target_os = "macos") { + // Impossible to map 0x4000_0000 on maocOS. SO choose a different address. + 0x40_0000_0000 + } else { + 0x4000_0000 + }; + let heap_size = 1024 * 1024; + let end = match start + heap_size { + end if end <= (4usize << 30) => 4usize << 30, + end if end <= (32usize << 30) => 32usize << 30, + _ => start + (32usize << 30), + }; + let layout = VMLayout { + log_address_space: 35, + heap_start: chunk_align_down(unsafe { Address::from_usize(start) }), + heap_end: chunk_align_up(unsafe { Address::from_usize(end) }), + log_space_extent: 31, + force_use_contiguous_spaces: false, + }; + test_with_vm_layout(Some(layout)); +} diff --git a/vmbindings/dummyvm/src/tests/vm_layout_default.rs b/vmbindings/dummyvm/src/tests/vm_layout_default.rs new file mode 100644 index 0000000000..9d95c314ca --- /dev/null +++ b/vmbindings/dummyvm/src/tests/vm_layout_default.rs @@ -0,0 +1,25 @@ +// GITHUB-CI: MMTK_PLAN=all + +use mmtk::util::heap::vm_layout::VMLayout; + +pub fn test_with_vm_layout(layout: Option) { + use crate::api; + use crate::tests::fixtures::VMLayoutFixture; + use mmtk::plan::AllocationSemantics; + use mmtk::vm::ObjectModel; + + let fixture = VMLayoutFixture::create_with_layout(layout); + + // Test allocation + let addr = api::mmtk_alloc(fixture.mutator, 8, 8, 0, AllocationSemantics::Default); + let obj = crate::object_model::VMObjectModel::address_to_ref(addr); + // Test SFT + assert!(api::mmtk_is_in_mmtk_spaces(obj)); + // Test mmapper + assert!(api::mmtk_is_mapped_address(addr)); +} + +#[test] +fn test_vm_layout_default() { + test_with_vm_layout(None::); +} diff --git a/vmbindings/dummyvm/src/tests/vm_layout_heap_start.rs b/vmbindings/dummyvm/src/tests/vm_layout_heap_start.rs new file mode 100644 index 0000000000..ec64a4413e --- /dev/null +++ b/vmbindings/dummyvm/src/tests/vm_layout_heap_start.rs @@ -0,0 +1,25 @@ +// GITHUB-CI: MMTK_PLAN=all + +use crate::tests::vm_layout_default::test_with_vm_layout; +use mmtk::util::heap::vm_layout::VMLayout; +use mmtk::util::Address; + +#[test] +fn test_vm_layout_heap_start() { + let default = VMLayout::default(); + + // Test with an start address that is different to the default heap start + #[cfg(target_pointer_width = "32")] + let heap_start = unsafe { Address::from_usize(0x7000_0000) }; + #[cfg(target_pointer_width = "64")] + let heap_start = unsafe { Address::from_usize(0x0000_0400_0000_0000usize) }; + #[cfg(target_pointer_width = "64")] + assert!(heap_start.is_aligned_to(default.max_space_extent())); + + let layout = VMLayout { + heap_start, + // Use default for the rest. + ..default + }; + test_with_vm_layout(Some(layout)); +} diff --git a/vmbindings/dummyvm/src/tests/vm_layout_log_address_space.rs b/vmbindings/dummyvm/src/tests/vm_layout_log_address_space.rs new file mode 100644 index 0000000000..504e66b30f --- /dev/null +++ b/vmbindings/dummyvm/src/tests/vm_layout_log_address_space.rs @@ -0,0 +1,17 @@ +// GITHUB-CI: MMTK_PLAN=all + +use crate::tests::vm_layout_default::test_with_vm_layout; +use mmtk::util::heap::vm_layout::VMLayout; + +#[test] +fn test_vm_layout_log_address_space() { + let layout = VMLayout { + #[cfg(target_pointer_width = "32")] + log_address_space: 31, + #[cfg(target_pointer_width = "64")] + log_address_space: 45, + // Use default for the rest. + ..VMLayout::default() + }; + test_with_vm_layout(Some(layout)); +}