Skip to content

Commit

Permalink
Introduce VMSpace, and allow VMSpace to be set lazily (#802)
Browse files Browse the repository at this point in the history
This PR allows users to set the start address and size for VM space, and
also allows VM space to be initialised lazily.

* Add `VMSpace` which wraps the normal `ImmortalSpace`. The policy is
mostly the same as `ImmortalSpace` except that the space may be
uninitialized and can be initialized later.
* Add option `vm_space_start` and `vm_space_size`.
* Modify the heap start constant for 32 bits, so JikesRVM's VM space
does not overlap with our heap range.
* Fix a subtraction overflow issue in `align_allocation`.
  • Loading branch information
qinsoon authored May 8, 2023
1 parent ed347f1 commit e309f93
Show file tree
Hide file tree
Showing 25 changed files with 392 additions and 71 deletions.
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ itertools = "0.10.5"
sys-info = "0.9"
regex = "1.7.0"
static_assertions = "1.1.0"
delegate = "0.9.0"

[dev-dependencies]
rand = "0.8.5"
Expand Down
4 changes: 4 additions & 0 deletions docs/tutorial/code/mygc_semispace/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,10 @@ impl<VM: VMBinding> Plan for MyGC<VM> {
fn base(&self) -> &BasePlan<VM> {
&self.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}
// ANCHOR_END: plan_base

// Add
Expand Down
5 changes: 5 additions & 0 deletions src/memory_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,11 @@ pub fn mmtk_init<VM: VMBinding>(builder: &MMTKBuilder) -> Box<MMTK<VM>> {
Box::new(mmtk)
}

#[cfg(feature = "vm_space")]
pub fn lazy_init_vm_space<VM: VMBinding>(mmtk: &'static mut MMTK<VM>, start: Address, size: usize) {
mmtk.plan.base_mut().vm_space.lazy_initialize(start, size);
}

/// Request MMTk to create a mutator for the given thread. The ownership
/// of returned boxed mutator is transferred to the binding, and the binding needs to take care of its
/// lifetime. For performance reasons, A VM should store the returned mutator in a thread local storage
Expand Down
4 changes: 4 additions & 0 deletions src/plan/generational/copying/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,10 @@ impl<VM: VMBinding> Plan for GenCopy<VM> {
&self.gen.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.gen.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.gen.common
}
Expand Down
4 changes: 4 additions & 0 deletions src/plan/generational/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,10 @@ impl<VM: VMBinding> Plan for GenImmix<VM> {
&self.gen.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.gen.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.gen.common
}
Expand Down
29 changes: 5 additions & 24 deletions src/plan/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ use crate::plan::Mutator;
use crate::policy::immortalspace::ImmortalSpace;
use crate::policy::largeobjectspace::LargeObjectSpace;
use crate::policy::space::{PlanCreateSpaceArgs, Space};
#[cfg(feature = "vm_space")]
use crate::policy::vmspace::VMSpace;
use crate::scheduler::*;
use crate::util::alloc::allocators::AllocatorSelector;
#[cfg(feature = "analysis")]
Expand Down Expand Up @@ -176,6 +178,7 @@ pub trait Plan: 'static + Sync + Downcast {
}

fn base(&self) -> &BasePlan<Self::VM>;
fn base_mut(&mut self) -> &mut BasePlan<Self::VM>;
fn schedule_collection(&'static self, _scheduler: &GCWorkScheduler<Self::VM>);
fn common(&self) -> &CommonPlan<Self::VM> {
panic!("Common Plan not handled!")
Expand Down Expand Up @@ -423,29 +426,7 @@ pub struct BasePlan<VM: VMBinding> {
/// the VM space.
#[cfg(feature = "vm_space")]
#[trace]
pub vm_space: ImmortalSpace<VM>,
}

#[cfg(feature = "vm_space")]
pub fn create_vm_space<VM: VMBinding>(args: &mut CreateSpecificPlanArgs<VM>) -> ImmortalSpace<VM> {
use crate::util::constants::LOG_BYTES_IN_MBYTE;
let boot_segment_bytes = *args.global_args.options.vm_space_size;
debug_assert!(boot_segment_bytes > 0);

use crate::util::conversions::raw_align_up;
use crate::util::heap::layout::vm_layout_constants::BYTES_IN_CHUNK;
let boot_segment_mb = raw_align_up(boot_segment_bytes, BYTES_IN_CHUNK) >> LOG_BYTES_IN_MBYTE;

let space = ImmortalSpace::new_vm_space(args.get_space_args(
"boot",
false,
VMRequest::fixed_size(boot_segment_mb),
));

// The space is mapped externally by the VM. We need to update our mmapper to mark the range as mapped.
space.ensure_mapped();

space
pub vm_space: VMSpace<VM>,
}

/// Args needed for creating any plan. This includes a set of contexts from MMTK or global. This
Expand Down Expand Up @@ -520,7 +501,7 @@ impl<VM: VMBinding> BasePlan<VM> {
VMRequest::discontiguous(),
)),
#[cfg(feature = "vm_space")]
vm_space: create_vm_space(&mut args),
vm_space: VMSpace::new(&mut args),

initialized: AtomicBool::new(false),
trigger_gc_when_heap_is_full: AtomicBool::new(true),
Expand Down
4 changes: 4 additions & 0 deletions src/plan/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,10 @@ impl<VM: VMBinding> Plan for Immix<VM> {
&self.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.common
}
Expand Down
4 changes: 4 additions & 0 deletions src/plan/markcompact/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,10 @@ impl<VM: VMBinding> Plan for MarkCompact<VM> {
&self.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.common
}
Expand Down
4 changes: 4 additions & 0 deletions src/plan/marksweep/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,10 @@ impl<VM: VMBinding> Plan for MarkSweep<VM> {
&self.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.common
}
Expand Down
2 changes: 2 additions & 0 deletions src/plan/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ pub use global::AllocationSemantics;
pub(crate) use global::GcStatus;
pub use global::Plan;
pub(crate) use global::PlanTraceObject;
#[cfg(feature = "vm_space")] // This is used for creating VM space
pub(crate) use global::{CreateGeneralPlanArgs, CreateSpecificPlanArgs};

mod mutator_context;
pub use mutator_context::Mutator;
Expand Down
4 changes: 4 additions & 0 deletions src/plan/nogc/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@ impl<VM: VMBinding> Plan for NoGC<VM> {
&self.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.base
}

fn prepare(&mut self, _tls: VMWorkerThread) {
unreachable!()
}
Expand Down
4 changes: 4 additions & 0 deletions src/plan/pageprotect/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,10 @@ impl<VM: VMBinding> Plan for PageProtect<VM> {
&self.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.common
}
Expand Down
4 changes: 4 additions & 0 deletions src/plan/semispace/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,10 @@ impl<VM: VMBinding> Plan for SemiSpace<VM> {
&self.common.base
}

fn base_mut(&mut self) -> &mut BasePlan<Self::VM> {
&mut self.common.base
}

fn common(&self) -> &CommonPlan<VM> {
&self.common
}
Expand Down
4 changes: 4 additions & 0 deletions src/plan/sticky/immix/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,10 @@ impl<VM: VMBinding> Plan for StickyImmix<VM> {
self.immix.base()
}

fn base_mut(&mut self) -> &mut crate::plan::global::BasePlan<Self::VM> {
self.immix.base_mut()
}

fn generational(
&self,
) -> Option<&dyn crate::plan::generational::global::GenerationalPlan<VM = Self::VM>> {
Expand Down
30 changes: 20 additions & 10 deletions src/policy/immortalspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,15 +120,6 @@ impl<VM: VMBinding> crate::policy::gc_work::PolicyTraceObject<VM> for ImmortalSp

impl<VM: VMBinding> ImmortalSpace<VM> {
pub fn new(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
Self::new_inner(args, false)
}

#[cfg(feature = "vm_space")]
pub fn new_vm_space(args: crate::policy::space::PlanCreateSpaceArgs<VM>) -> Self {
Self::new_inner(args, true)
}

pub fn new_inner(args: crate::policy::space::PlanCreateSpaceArgs<VM>, vm_space: bool) -> Self {
let vm_map = args.vm_map;
let is_discontiguous = args.vmrequest.is_discontiguous();
let common = CommonSpace::new(args.into_policy_args(
Expand All @@ -144,7 +135,26 @@ impl<VM: VMBinding> ImmortalSpace<VM> {
MonotonePageResource::new_contiguous(common.start, common.extent, vm_map)
},
common,
vm_space,
vm_space: false,
}
}

#[cfg(feature = "vm_space")]
pub fn new_vm_space(
args: crate::policy::space::PlanCreateSpaceArgs<VM>,
start: Address,
size: usize,
) -> Self {
assert!(!args.vmrequest.is_discontiguous());
ImmortalSpace {
mark_state: MarkState::new(),
pr: MonotonePageResource::new_contiguous(start, size, args.vm_map),
common: CommonSpace::new(args.into_policy_args(
false,
true,
metadata::extract_side_metadata(&[*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC]),
)),
vm_space: true,
}
}

Expand Down
2 changes: 2 additions & 0 deletions src/policy/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,5 @@ pub mod largeobjectspace;
pub mod lockfreeimmortalspace;
pub mod markcompactspace;
pub mod marksweepspace;
#[cfg(feature = "vm_space")]
pub mod vmspace;
12 changes: 9 additions & 3 deletions src/policy/sft_map.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,13 @@ mod space_map {
assert!(old.name() == EMPTY_SFT_NAME || old.name() == space.name());
// Make sure the range is in the space
let space_start = Self::index_to_space_start(index);
assert!(start >= space_start);
assert!(start + bytes <= space_start + MAX_SPACE_EXTENT);
// FIXME: Curerntly skip the check for the last space. The following works fine for MMTk internal spaces,
// but the VM space is an exception. Any address after the last space is considered as the last space,
// based on our indexing function. In that case, we cannot assume the end of the region is within the last space (with MAX_SPACE_EXTENT).
if index != Self::TABLE_SIZE - 1 {
assert!(start >= space_start);
assert!(start + bytes <= space_start + MAX_SPACE_EXTENT);
}
}
*mut_self.sft.get_unchecked_mut(index) = space;
}
Expand Down Expand Up @@ -512,8 +517,9 @@ mod sparse_chunk_map {
// in which case, we still set SFT map again.
debug_assert!(
old == EMPTY_SFT_NAME || new == EMPTY_SFT_NAME || old == new,
"attempt to overwrite a non-empty chunk {} in SFT map (from {} to {})",
"attempt to overwrite a non-empty chunk {} ({}) in SFT map (from {} to {})",
chunk,
crate::util::conversions::chunk_index_to_address(chunk),
old,
new
);
Expand Down
23 changes: 18 additions & 5 deletions src/policy/space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -490,10 +490,8 @@ impl<VM: VMBinding> CommonSpace<VM> {
top: _top,
} => (_extent, _top),
VMRequest::Fixed {
extent: _extent,
top: _top,
..
} => (_extent, _top),
extent: _extent, ..
} => (_extent, false),
_ => unreachable!(),
};

Expand Down Expand Up @@ -524,7 +522,22 @@ impl<VM: VMBinding> CommonSpace<VM> {
// FIXME
rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent);
// VM.memory.setHeapRange(index, start, start.plus(extent));
args.plan_args.vm_map.insert(start, extent, rtn.descriptor);

// We only initialize our vm map if the range of the space is in our available heap range. For normally spaces,
// they are definitely in our heap range. But for VM space, a runtime could give us an arbitrary range. We only
// insert into our vm map if the range overlaps with our heap.
{
use crate::util::heap::layout;
let overlap =
Address::range_intersection(&(start..start + extent), &layout::available_range());
if !overlap.is_empty() {
args.plan_args.vm_map.insert(
overlap.start,
overlap.end - overlap.start,
rtn.descriptor,
);
}
}

// For contiguous space, we know its address range so we reserve metadata memory for its range.
if rtn
Expand Down
Loading

0 comments on commit e309f93

Please sign in to comment.