From 3c6fad44f956789521ac66ceb2c972b500fc19c6 Mon Sep 17 00:00:00 2001 From: Joe Richey Date: Fri, 28 Feb 2020 15:06:05 -0800 Subject: [PATCH] paging: Use dedicated paging::Manager Like PortIO the page tables are a fundamentally global structure. By moving the paging logic to a separate file, the requirement for exclusive access is now correctly modeled with Rust types. --- layout.ld | 2 +- src/main.rs | 29 ++-------------------- src/paging.rs | 68 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 71 insertions(+), 28 deletions(-) create mode 100644 src/paging.rs diff --git a/layout.ld b/layout.ld index 43da9791..af87cf45 100644 --- a/layout.ld +++ b/layout.ld @@ -35,7 +35,7 @@ SECTIONS /* Memory for identity mapping, keep synced with ADDRESS_SPACE_GIB */ address_space_gib = 4; . = ALIGN(4K); - pml2t = .; + pml2ts = .; . += 4K * address_space_gib; ASSERT((. <= ram_max - stack_size), "firmware size too big for RAM region") diff --git a/src/main.rs b/src/main.rs index b4a2e185..2c6b3766 100644 --- a/src/main.rs +++ b/src/main.rs @@ -37,6 +37,7 @@ mod efi; mod fat; mod loader; mod mem; +mod paging; mod part; mod pci; mod pe; @@ -60,32 +61,6 @@ fn panic(_: &PanicInfo) -> ! { loop {} } -/// Setup page tables to provide an identity mapping over the full 4GiB range -fn setup_pagetables() { - type PageTable = [u64; 512]; - - extern "C" { - static pml3t: PageTable; - static pml2t: PageTable; - static address_space_gib: u8; - } - let num_gib = unsafe { &address_space_gib } as *const _ as usize as u64; - log!("Setting up {} GiB identity mapping", num_gib); - - let pml2t_addr = unsafe { pml2t.as_ptr() } as usize as u64; - let pte = mem::MemoryRegion::new(pml2t_addr, num_gib * 4096); - for i in 0..(512 * num_gib) { - pte.io_write_u64(i * 8, (i << 21) + 0x83u64) - } - - let pde = mem::MemoryRegion::from_slice(unsafe { &pml3t }); - for i in 0..num_gib { - pde.io_write_u64(i * 8, (pml2t_addr + (0x1000u64 * i)) | 0x03); - } - - log!("Page tables setup"); -} - // Enable SSE2 for XMM registers (needed for EFI calling) fn enable_sse() { let mut cr0 = Cr0::read(); @@ -182,7 +157,7 @@ fn boot_from_device(device: &mut block::VirtioBlockDevice) -> bool { pub extern "C" fn rust64_start() -> ! { log!("\nStarting.."); enable_sse(); - setup_pagetables(); + paging::MANAGER.borrow_mut().setup(); pci::print_bus(); diff --git a/src/paging.rs b/src/paging.rs new file mode 100644 index 00000000..b10a5fda --- /dev/null +++ b/src/paging.rs @@ -0,0 +1,68 @@ +use atomic_refcell::AtomicRefCell; +use x86_64::{ + registers::control::Cr3, + structures::paging::{PageSize, PageTable, PageTableFlags, PhysFrame, Size2MiB}, + PhysAddr, +}; + +// Keep in sync with address_space_gib in layout.ld +const ADDRESS_SPACE_GIB: usize = 4; + +pub static MANAGER: AtomicRefCell = AtomicRefCell::new(Manager); +pub struct Manager; + +extern "C" { + static mut pml4t: PageTable; + static mut pml3t: PageTable; + static mut pml2ts: [PageTable; ADDRESS_SPACE_GIB]; +} + +struct Tables<'a> { + l4: &'a mut PageTable, + l3: &'a mut PageTable, + l2s: &'a mut [PageTable], +} + +impl Manager { + fn tables(&mut self) -> Tables { + Tables { + l4: unsafe { &mut pml4t }, + l3: unsafe { &mut pml3t }, + l2s: unsafe { &mut pml2ts }, + } + } + + pub fn setup(&mut self) { + log!("Setting up {} GiB identity mapping", ADDRESS_SPACE_GIB); + let Tables { l4, l3, l2s } = self.tables(); + + let pt_flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; + // Setup Identity map using L2 huge pages + let mut next_addr = PhysAddr::new(0); + for l2 in l2s.iter_mut() { + for l2e in l2.iter_mut() { + l2e.set_addr(next_addr, pt_flags | PageTableFlags::HUGE_PAGE); + next_addr += Size2MiB::SIZE; + } + } + + // Point L3 at L2s + for (i, l2) in l2s.iter().enumerate() { + l3[i].set_addr(phys_addr(l2), pt_flags); + } + + // Point L4 at L3 + l4[0].set_addr(phys_addr(l3), pt_flags); + + // Point Cr3 at PML4 + let cr3_flags = Cr3::read().1; + let pml4t_frame = PhysFrame::from_start_address(phys_addr(l4)).unwrap(); + unsafe { Cr3::write(pml4t_frame, cr3_flags) }; + log!("Page tables setup"); + } +} + +// Map a virtual address to a PhysAddr (assumes identity mapping) +fn phys_addr(virt_addr: *const T) -> PhysAddr { + PhysAddr::new(virt_addr as u64) +}