From cd4d3c2f895a0282ec920b2b3be051e22ed87fca Mon Sep 17 00:00:00 2001 From: Tom Dohrmann Date: Sun, 5 Sep 2021 13:27:56 +0200 Subject: [PATCH] Add `clean_up` and `clean_up_with_filter` (#264) --- src/addr.rs | 7 ++ src/instructions/tlb.rs | 1 + src/registers/control.rs | 4 + src/structures/idt.rs | 1 + .../paging/mapper/mapped_page_table.rs | 90 ++++++++++++++++- src/structures/paging/mapper/mod.rs | 48 ++++++++- .../paging/mapper/offset_page_table.rs | 24 ++++- .../paging/mapper/recursive_page_table.rs | 97 ++++++++++++++++++- src/structures/paging/page.rs | 13 +++ src/structures/paging/page_table.rs | 35 +++++++ 10 files changed, 311 insertions(+), 9 deletions(-) diff --git a/src/addr.rs b/src/addr.rs index 1c93f0219..4c962e2b4 100644 --- a/src/addr.rs +++ b/src/addr.rs @@ -3,6 +3,7 @@ use core::fmt; use core::ops::{Add, AddAssign, Sub, SubAssign}; +use crate::structures::paging::page_table::PageTableLevel; use crate::structures::paging::{PageOffset, PageTableIndex}; use bit_field::BitField; @@ -198,6 +199,12 @@ impl VirtAddr { pub const fn p4_index(self) -> PageTableIndex { PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9 >> 9) as u16) } + + /// Returns the 9-bit level page table index. + #[inline] + pub const fn page_table_index(self, level: PageTableLevel) -> PageTableIndex { + PageTableIndex::new_truncate((self.0 >> 12 >> ((level as u8 - 1) * 9)) as u16) + } } impl fmt::Debug for VirtAddr { diff --git a/src/instructions/tlb.rs b/src/instructions/tlb.rs index ea980863d..6e93f905d 100644 --- a/src/instructions/tlb.rs +++ b/src/instructions/tlb.rs @@ -72,6 +72,7 @@ impl Pcid { /// Invalidate the given address in the TLB using the `invpcid` instruction. /// /// ## Safety +/// /// This function is unsafe as it requires CPUID.(EAX=07H, ECX=0H):EBX.INVPCID to be 1. #[inline] pub unsafe fn flush_pcid(command: InvPicdCommand) { diff --git a/src/registers/control.rs b/src/registers/control.rs index 904be4100..0c8c81e12 100644 --- a/src/registers/control.rs +++ b/src/registers/control.rs @@ -301,6 +301,7 @@ mod x86_64 { /// Write a new P4 table address into the CR3 register. /// /// ## Safety + /// /// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by /// changing the page mapping. #[inline] @@ -311,6 +312,7 @@ mod x86_64 { /// Write a new P4 table address into the CR3 register. /// /// ## Safety + /// /// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by /// changing the page mapping. /// [`Cr4Flags::PCID`] must be set before calling this method. @@ -322,6 +324,7 @@ mod x86_64 { /// Write a new P4 table address into the CR3 register. /// /// ## Safety + /// /// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by /// changing the page mapping. #[inline] @@ -400,6 +403,7 @@ mod x86_64 { /// Updates CR4 flags. /// /// Preserves the value of reserved fields. + /// /// ## Safety /// /// This function is unsafe because it's possible to violate memory diff --git a/src/structures/idt.rs b/src/structures/idt.rs index f65b91721..d427a76ed 100644 --- a/src/structures/idt.rs +++ b/src/structures/idt.rs @@ -789,6 +789,7 @@ impl EntryOptions { /// This function panics if the index is not in the range 0..7. /// /// ## Safety + /// /// This function is unsafe because the caller must ensure that the passed stack index is /// valid and not used by other interrupts. Otherwise, memory safety violations are possible. #[inline] diff --git a/src/structures/paging/mapper/mapped_page_table.rs b/src/structures/paging/mapper/mapped_page_table.rs index 7ae9e2c83..452e38cf7 100644 --- a/src/structures/paging/mapper/mapped_page_table.rs +++ b/src/structures/paging/mapper/mapped_page_table.rs @@ -1,9 +1,9 @@ use crate::structures::paging::{ frame::PhysFrame, - frame_alloc::FrameAllocator, + frame_alloc::{FrameAllocator, FrameDeallocator}, mapper::*, - page::{AddressNotAligned, Page, Size1GiB, Size2MiB, Size4KiB}, - page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags}, + page::{AddressNotAligned, Page, PageRangeInclusive, Size1GiB, Size2MiB, Size4KiB}, + page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags, PageTableLevel}, }; /// A Mapper implementation that relies on a PhysAddr to VirtAddr conversion function. @@ -584,6 +584,90 @@ impl<'a, P: PageTableFrameMapping> Translate for MappedPageTable<'a, P> { } } +impl<'a, P: PageTableFrameMapping> CleanUp for MappedPageTable<'a, P> { + #[inline] + unsafe fn clean_up(&mut self, frame_deallocator: &mut D) + where + D: FrameDeallocator, + { + self.clean_up_addr_range( + PageRangeInclusive { + start: Page::from_start_address(VirtAddr::new(0)).unwrap(), + end: Page::from_start_address(VirtAddr::new(0xffff_ffff_ffff_f000)).unwrap(), + }, + frame_deallocator, + ) + } + + unsafe fn clean_up_addr_range( + &mut self, + range: PageRangeInclusive, + frame_deallocator: &mut D, + ) where + D: FrameDeallocator, + { + unsafe fn clean_up( + page_table: &mut PageTable, + page_table_walker: &PageTableWalker

, + level: PageTableLevel, + range: PageRangeInclusive, + frame_deallocator: &mut impl FrameDeallocator, + ) -> bool { + if range.is_empty() { + return false; + } + + let table_addr = range + .start + .start_address() + .align_down(level.table_address_space_alignment()); + + let start = range.start.page_table_index(level); + let end = range.end.page_table_index(level); + + if let Some(next_level) = level.next_lower_level() { + let offset_per_entry = level.entry_address_space_alignment(); + for (i, entry) in page_table + .iter_mut() + .enumerate() + .take(usize::from(end) + 1) + .skip(usize::from(start)) + { + if let Ok(page_table) = page_table_walker.next_table_mut(entry) { + let start = table_addr + (offset_per_entry * (i as u64)); + let end = start + (offset_per_entry - 1); + let start = Page::::containing_address(start); + let start = start.max(range.start); + let end = Page::::containing_address(end); + let end = end.min(range.end); + if clean_up( + page_table, + page_table_walker, + next_level, + Page::range_inclusive(start, end), + frame_deallocator, + ) { + let frame = entry.frame().unwrap(); + entry.set_unused(); + frame_deallocator.deallocate_frame(frame); + } + } + } + } + + page_table.iter().all(PageTableEntry::is_unused) + } + + clean_up( + self.level_4_table, + &self.page_table_walker, + PageTableLevel::Four, + range, + frame_deallocator, + ); + } +} + #[derive(Debug)] struct PageTableWalker { page_table_frame_mapping: P, diff --git a/src/structures/paging/mapper/mod.rs b/src/structures/paging/mapper/mod.rs index 4bfef61da..5d205bdb2 100644 --- a/src/structures/paging/mapper/mod.rs +++ b/src/structures/paging/mapper/mod.rs @@ -7,8 +7,10 @@ pub use self::offset_page_table::OffsetPageTable; pub use self::recursive_page_table::{InvalidPageTable, RecursivePageTable}; use crate::structures::paging::{ - frame_alloc::FrameAllocator, page_table::PageTableFlags, Page, PageSize, PhysFrame, Size1GiB, - Size2MiB, Size4KiB, + frame_alloc::{FrameAllocator, FrameDeallocator}, + page::PageRangeInclusive, + page_table::PageTableFlags, + Page, PageSize, PhysFrame, Size1GiB, Size2MiB, Size4KiB, }; use crate::{PhysAddr, VirtAddr}; @@ -480,3 +482,45 @@ pub enum TranslateError { } static _ASSERT_OBJECT_SAFE: Option<&(dyn Translate + Sync)> = None; + +/// Provides methods for cleaning up unused entries. +pub trait CleanUp { + /// Remove all empty P1-P3 tables + /// + /// ## Safety + /// + /// The caller has to guarantee that it's safe to free page table frames: + /// All page table frames must only be used once and only in this page table + /// (e.g. no reference counted page tables or reusing the same page tables for different virtual addresses ranges in the same page table). + unsafe fn clean_up(&mut self, frame_deallocator: &mut D) + where + D: FrameDeallocator; + + /// Remove all empty P1-P3 tables in a certain range + /// ``` + /// # use core::ops::RangeInclusive; + /// # use x86_64::{VirtAddr, structures::paging::{ + /// # FrameDeallocator, Size4KiB, MappedPageTable, mapper::{RecursivePageTable, CleanUp}, page::{Page, PageRangeInclusive}, + /// # }}; + /// # unsafe fn test(page_table: &mut RecursivePageTable, frame_deallocator: &mut impl FrameDeallocator) { + /// // clean up all page tables in the lower half of the address space + /// let lower_half = Page::range_inclusive( + /// Page::containing_address(VirtAddr::new(0)), + /// Page::containing_address(VirtAddr::new(0x0000_7fff_ffff_ffff)), + /// ); + /// page_table.clean_up_addr_range(lower_half, frame_deallocator); + /// # } + /// ``` + /// + /// ## Safety + /// + /// The caller has to guarantee that it's safe to free page table frames: + /// All page table frames must only be used once and only in this page table + /// (e.g. no reference counted page tables or reusing the same page tables for different virtual addresses ranges in the same page table). + unsafe fn clean_up_addr_range( + &mut self, + range: PageRangeInclusive, + frame_deallocator: &mut D, + ) where + D: FrameDeallocator; +} diff --git a/src/structures/paging/mapper/offset_page_table.rs b/src/structures/paging/mapper/offset_page_table.rs index 6cfaf42f6..b7f04f6e4 100644 --- a/src/structures/paging/mapper/offset_page_table.rs +++ b/src/structures/paging/mapper/offset_page_table.rs @@ -1,7 +1,8 @@ #![cfg(target_pointer_width = "64")] use crate::structures::paging::{ - frame::PhysFrame, mapper::*, page_table::PageTable, Page, PageTableFlags, + frame::PhysFrame, mapper::*, page::PageRangeInclusive, page_table::PageTable, FrameDeallocator, + Page, PageTableFlags, }; /// A Mapper implementation that requires that the complete physically memory is mapped at some @@ -264,3 +265,24 @@ impl<'a> Translate for OffsetPageTable<'a> { self.inner.translate(addr) } } + +impl<'a> CleanUp for OffsetPageTable<'a> { + #[inline] + unsafe fn clean_up(&mut self, frame_deallocator: &mut D) + where + D: FrameDeallocator, + { + self.inner.clean_up(frame_deallocator) + } + + #[inline] + unsafe fn clean_up_addr_range( + &mut self, + range: PageRangeInclusive, + frame_deallocator: &mut D, + ) where + D: FrameDeallocator, + { + self.inner.clean_up_addr_range(range, frame_deallocator) + } +} diff --git a/src/structures/paging/mapper/recursive_page_table.rs b/src/structures/paging/mapper/recursive_page_table.rs index b66aec176..1ae8d2b26 100644 --- a/src/structures/paging/mapper/recursive_page_table.rs +++ b/src/structures/paging/mapper/recursive_page_table.rs @@ -4,12 +4,12 @@ use core::fmt; use super::*; use crate::registers::control::Cr3; -use crate::structures::paging::PageTableIndex; +use crate::structures::paging::page_table::PageTableLevel; use crate::structures::paging::{ frame_alloc::FrameAllocator, - page::{AddressNotAligned, NotGiantPageSize}, + page::{AddressNotAligned, NotGiantPageSize, PageRangeInclusive}, page_table::{FrameError, PageTable, PageTableEntry, PageTableFlags}, - Page, PageSize, PhysFrame, Size1GiB, Size2MiB, Size4KiB, + FrameDeallocator, Page, PageSize, PageTableIndex, PhysFrame, Size1GiB, Size2MiB, Size4KiB, }; use crate::VirtAddr; @@ -829,6 +829,97 @@ impl<'a> Translate for RecursivePageTable<'a> { } } +impl<'a> CleanUp for RecursivePageTable<'a> { + #[inline] + unsafe fn clean_up(&mut self, frame_deallocator: &mut D) + where + D: FrameDeallocator, + { + self.clean_up_addr_range( + PageRangeInclusive { + start: Page::from_start_address(VirtAddr::new(0)).unwrap(), + end: Page::from_start_address(VirtAddr::new(0xffff_ffff_ffff_f000)).unwrap(), + }, + frame_deallocator, + ) + } + + unsafe fn clean_up_addr_range( + &mut self, + range: PageRangeInclusive, + frame_deallocator: &mut D, + ) where + D: FrameDeallocator, + { + fn clean_up( + recursive_index: PageTableIndex, + page_table: &mut PageTable, + level: PageTableLevel, + range: PageRangeInclusive, + frame_deallocator: &mut impl FrameDeallocator, + ) -> bool { + if range.is_empty() { + return false; + } + + let table_addr = range + .start + .start_address() + .align_down(level.table_address_space_alignment()); + + let start = range.start.page_table_index(level); + let end = range.end.page_table_index(level); + + if let Some(next_level) = level.next_lower_level() { + let offset_per_entry = level.entry_address_space_alignment(); + for (i, entry) in page_table + .iter_mut() + .enumerate() + .take(usize::from(end) + 1) + .skip(usize::from(start)) + .filter(|(i, _)| { + !(level == PageTableLevel::Four && *i == recursive_index.into()) + }) + { + if let Ok(frame) = entry.frame() { + let start = table_addr + (offset_per_entry * (i as u64)); + let end = start + (offset_per_entry - 1); + let start = Page::::containing_address(start); + let start = start.max(range.start); + let end = Page::::containing_address(end); + let end = end.min(range.end); + let page_table = + [p1_ptr, p2_ptr, p3_ptr][level as usize - 2](start, recursive_index); + let page_table = unsafe { &mut *page_table }; + if clean_up( + recursive_index, + page_table, + next_level, + Page::range_inclusive(start, end), + frame_deallocator, + ) { + entry.set_unused(); + unsafe { + frame_deallocator.deallocate_frame(frame); + } + } + } + } + } + + page_table.iter().all(PageTableEntry::is_unused) + } + + clean_up( + self.recursive_index, + self.level_4_table(), + PageTableLevel::Four, + range, + frame_deallocator, + ); + } +} + /// The given page table was not suitable to create a `RecursivePageTable`. #[derive(Debug)] pub enum InvalidPageTable { diff --git a/src/structures/paging/page.rs b/src/structures/paging/page.rs index 1169946a5..0e9136dd0 100644 --- a/src/structures/paging/page.rs +++ b/src/structures/paging/page.rs @@ -1,5 +1,6 @@ //! Abstractions for default-sized and huge virtual memory pages. +use crate::structures::paging::page_table::PageTableLevel; use crate::structures::paging::PageTableIndex; use crate::VirtAddr; use core::fmt; @@ -130,6 +131,18 @@ impl Page { } } + const_fn! { + /// Returns the level 3 page table index of this page. + /// + /// ## Panics + /// + /// Panics if level is not between 1 and 4 + #[inline] + pub fn page_table_index(self, level: PageTableLevel) -> PageTableIndex { + self.start_address().page_table_index(level) + } + } + const_fn! { /// Returns a range of pages, exclusive `end`. #[inline] diff --git a/src/structures/paging/page_table.rs b/src/structures/paging/page_table.rs index ac83a2c52..3481b184a 100644 --- a/src/structures/paging/page_table.rs +++ b/src/structures/paging/page_table.rs @@ -364,3 +364,38 @@ impl From for usize { usize::from(offset.0) } } + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +/// A value between 1 and 4. +pub enum PageTableLevel { + /// Represents the level for a page table. + One = 1, + /// Represents the level for a page directory. + Two, + /// Represents the level for a page-directory pointer. + Three, + /// Represents the level for a page-map level-4. + Four, +} + +impl PageTableLevel { + /// Returns the next lower level or `None` for level 1 + pub const fn next_lower_level(self) -> Option { + match self { + PageTableLevel::Four => Some(PageTableLevel::Three), + PageTableLevel::Three => Some(PageTableLevel::Two), + PageTableLevel::Two => Some(PageTableLevel::One), + PageTableLevel::One => None, + } + } + + /// Returns the alignment for the address space described by a table of this level. + pub const fn table_address_space_alignment(self) -> u64 { + 1u64 << (self as u8 * 9 + 12) + } + + /// Returns the alignment for the address space described by an entry in a table of this level. + pub const fn entry_address_space_alignment(self) -> u64 { + 1u64 << (((self as u8 - 1) * 9) + 12) + } +}