From 62f1dc9c0165e667e28f2690d56fbcc8e50e03ca Mon Sep 17 00:00:00 2001 From: Wenyu Zhao Date: Thu, 7 Sep 2023 16:00:17 +1000 Subject: [PATCH] Discontiguous PageProtect GC support (#946) This CL fixes PageProtect's memory unprotect calls. When using discontiguous space, * If the allocated pages are not in a new chunk, unprotect the pages as before. * If the pages are in a new chunk, there can be two cases: * If the new chunk is not mapped, no need to unprotect * If the new chunk is mapped, this means that the chunk is allocated and released on previous GC epochs before. The GC should unprotect these pages. --- src/util/heap/freelistpageresource.rs | 35 +++++++++++++++++---------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index 5552c9a01c..756ccfda03 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -147,19 +147,28 @@ impl PageResource for FreeListPageResource { let rtn = self.start + conversions::pages_to_bytes(page_offset as _); // The meta-data portion of reserved Pages was committed above. self.commit_pages(reserved_pages, required_pages, tls); - if self.protect_memory_on_release && !new_chunk { - // This check is necessary to prevent us from mprotecting an address that is not yet mapped by mmapper. - // See https://github.com/mmtk/mmtk-core/issues/400. - // It is possible that one thread gets a new chunk, and returns from this function. However, the Space.acquire() - // has not yet call ensure_mapped() for it. So the chunk is not yet mmapped. At this point, if another thread calls - // this function, and get a few more pages from the same chunk, it is no longer seen as 'new_chunk', and we - // will try to munprotect on it. But the chunk may not yet be mapped. - // - // If we want to improve and get rid of this loop, we need to move this munprotect to anywhere after the ensure_mapped() call - // in Space.acquire(). We can either move it the option of 'protect_on_release' to space, or have a call to page resource - // after ensure_mapped(). However, I think this is sufficient given that this option is only used for PageProtect for debugging use. - while !MMAPPER.is_mapped_address(rtn) {} - self.munprotect(rtn, self.free_list.size(page_offset as _) as _) + if self.protect_memory_on_release { + if !new_chunk { + // This check is necessary to prevent us from mprotecting an address that is not yet mapped by mmapper. + // See https://github.com/mmtk/mmtk-core/issues/400. + // It is possible that one thread gets a new chunk, and returns from this function. However, the Space.acquire() + // has not yet call ensure_mapped() for it. So the chunk is not yet mmapped. At this point, if another thread calls + // this function, and get a few more pages from the same chunk, it is no longer seen as 'new_chunk', and we + // will try to munprotect on it. But the chunk may not yet be mapped. + // + // If we want to improve and get rid of this loop, we need to move this munprotect to anywhere after the ensure_mapped() call + // in Space.acquire(). We can either move it the option of 'protect_on_release' to space, or have a call to page resource + // after ensure_mapped(). However, I think this is sufficient given that this option is only used for PageProtect for debugging use. + while !new_chunk && !MMAPPER.is_mapped_address(rtn) {} + self.munprotect(rtn, self.free_list.size(page_offset as _) as _) + } else if !self.common().contiguous && new_chunk { + // Don't unprotect if this is a new unmapped discontiguous chunk + // For a new mapped discontiguous chunk, this should previously be released and protected by us. + // We still need to unprotect it. + if MMAPPER.is_mapped_address(rtn) { + self.munprotect(rtn, self.free_list.size(page_offset as _) as _) + } + } }; Result::Ok(PRAllocResult { start: rtn,