From 75ad5045c147c26e9f6bac89284f32e776484149 Mon Sep 17 00:00:00 2001 From: Pawel Wieczorkiewicz Date: Wed, 15 Nov 2023 09:13:17 +0100 Subject: [PATCH 1/4] arch,pt: minor cleanup of pagetable.h header file Signed-off-by: Pawel Wieczorkiewicz --- include/arch/x86/pagetable.h | 62 ++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/include/arch/x86/pagetable.h b/include/arch/x86/pagetable.h index eceae80b..a0405b5e 100644 --- a/include/arch/x86/pagetable.h +++ b/include/arch/x86/pagetable.h @@ -136,11 +136,39 @@ union cr3 { }; typedef union cr3 cr3_t; -extern cr3_t cr3; -extern cr3_t user_cr3; - typedef unsigned int pt_index_t; +/* External declarations */ + +extern cr3_t cr3, user_cr3; + +extern pte_t l1_pt_entries1[L1_PT_ENTRIES]; +extern pte_t l1_pt_entries2[L1_PT_ENTRIES]; +extern pte_t l1_pt_entries3[L1_PT_ENTRIES]; +extern pde_t l2_pt_entries[L2_PT_ENTRIES]; +extern pdpe_t l3_pt_entries[L3_PT_ENTRIES]; +#if !defined(__i386__) +extern pml4_t l4_pt_entries[L4_PT_ENTRIES]; +#endif + +extern void init_pagetables(void); +extern void dump_pagetables(cr3_t *cr3_ptr); +extern void dump_kern_pagetable_va(void *va); +extern void dump_user_pagetable_va(void *va); + +extern int get_kern_va_mfn_order(void *va, mfn_t *mfn, unsigned int *order); +extern int get_user_va_mfn_order(void *va, mfn_t *mfn, unsigned int *order); + +extern frame_t *find_kern_va_frame(const void *va); +extern frame_t *find_user_va_frame(const void *va); + +extern void map_pagetables(cr3_t *to_cr3, cr3_t *from_cr3); +extern void unmap_pagetables(cr3_t *from_cr3, cr3_t *of_cr3); +extern int map_pagetables_va(cr3_t *cr3_ptr, void *va); +extern int unmap_pagetables_va(cr3_t *cr3_ptr, void *va); + +/* Static declarations */ + static inline pt_index_t l1_table_index(const void *va) { return (_ul(va) >> L1_PT_SHIFT) & (L1_PT_ENTRIES - 1); } @@ -292,34 +320,6 @@ static inline bool is_pgentry_present(pgentry_t e) { return !!(e & _PAGE_PRESENT); } -/* External declarations */ - -extern pte_t l1_pt_entries1[L1_PT_ENTRIES]; -extern pte_t l1_pt_entries2[L1_PT_ENTRIES]; -extern pte_t l1_pt_entries3[L1_PT_ENTRIES]; -extern pde_t l2_pt_entries[L2_PT_ENTRIES]; -extern pdpe_t l3_pt_entries[L3_PT_ENTRIES]; -#if defined(__x86_64__) -extern pml4_t l4_pt_entries[L4_PT_ENTRIES]; -#elif defined(__i386__) -#endif - -extern void init_pagetables(void); -extern void dump_pagetables(cr3_t *cr3_ptr); -extern void dump_kern_pagetable_va(void *va); -extern void dump_user_pagetable_va(void *va); - -extern int get_kern_va_mfn_order(void *va, mfn_t *mfn, unsigned int *order); -extern int get_user_va_mfn_order(void *va, mfn_t *mfn, unsigned int *order); - -extern frame_t *find_kern_va_frame(const void *va); -extern frame_t *find_user_va_frame(const void *va); - -extern void map_pagetables(cr3_t *to_cr3, cr3_t *from_cr3); -extern void unmap_pagetables(cr3_t *from_cr3, cr3_t *of_cr3); -extern int map_pagetables_va(cr3_t *cr3_ptr, void *va); -extern int unmap_pagetables_va(cr3_t *cr3_ptr, void *va); - #endif /* __ASSEMBLY__ */ #endif /* KTF_PAGETABLE_H */ From 37950a59ce0dc853511928f3abb8fa70fcdec7e5 Mon Sep 17 00:00:00 2001 From: Pawel Wieczorkiewicz Date: Fri, 17 Nov 2023 13:49:43 +0100 Subject: [PATCH 2/4] arch,pt: unify and standardize vmap API interface Summary of the changes: * Get rid of kmap() API - it was confusing and non-standard * Add set of generic vmap APIs and make them public - These functions allow to specify all parameters * All level PT flags * Address space (via CR3 pointer) * Add set of order-specific (4K, 2M, 1G) public vmap APIs - Use default PT flags for non-last level - Allow to propagate user bit on request * Add separate sets of APIs for kernel and user address spaces * Move all interface definitions from page.h to pagetable.h Signed-off-by: Pawel Wieczorkiewicz --- arch/x86/apic.c | 3 +- arch/x86/boot/multiboot.c | 5 +- arch/x86/ioapic.c | 6 +- arch/x86/pagetables.c | 117 ++++++++++++++++++---- common/setup.c | 11 +- drivers/acpi/acpica/osl.c | 3 +- drivers/fb/fb.c | 6 +- drivers/hpet.c | 3 +- drivers/vga.c | 6 +- include/arch/x86/page.h | 77 -------------- include/arch/x86/pagetable.h | 57 +++++++++++ mm/pmm.c | 12 ++- mm/vmm.c | 5 +- smp/mptables.c | 5 +- tests/test_cond_branch_mispredictions.c | 3 +- tests/test_uncond_branch_mispredictions.c | 3 +- tests/unittests.c | 7 +- 17 files changed, 202 insertions(+), 127 deletions(-) diff --git a/arch/x86/apic.c b/arch/x86/apic.c index 8bd9e8f9..17fa946b 100644 --- a/arch/x86/apic.c +++ b/arch/x86/apic.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -160,7 +161,7 @@ void init_apic(unsigned int cpu_id, apic_mode_t mode) { * X2APIC uses MSRs for accesses, so no mapping needed. */ if (apic_mode == APIC_MODE_XAPIC) - vmap_4k(apic_get_base(apic_base), apic_base.base, L1_PROT_NOCACHE); + vmap_kern_4k(apic_get_base(apic_base), apic_base.base, L1_PROT_NOCACHE); spiv.reg = apic_read(APIC_SPIV); spiv.vector = APIC_SPI_VECTOR; diff --git a/arch/x86/boot/multiboot.c b/arch/x86/boot/multiboot.c index 2a727d78..4a7e4426 100644 --- a/arch/x86/boot/multiboot.c +++ b/arch/x86/boot/multiboot.c @@ -29,6 +29,7 @@ #include #include #include +#include #define TAG_ADDR(tag) \ ((multiboot2_tag_t *) ((multiboot2_uint8_t *) (tag) + (((tag)->size + 7) & ~7))) @@ -186,8 +187,8 @@ void map_multiboot_areas(void) { paddr_t mbi_stop = mbi_start + multiboot2_hdr_size; for (mfn_t mfn = paddr_to_mfn(mbi_start); mfn <= paddr_to_mfn(mbi_stop); mfn++) { - vmap_4k(mfn_to_virt(mfn), mfn, L1_PROT_RO); - kmap_4k(mfn, L1_PROT_RO); + vmap_kern_4k(mfn_to_virt(mfn), mfn, L1_PROT_RO); + vmap_kern_4k(mfn_to_virt_kern(mfn), mfn, L1_PROT_RO); } } diff --git a/arch/x86/ioapic.c b/arch/x86/ioapic.c index 85d3cb18..2cff3bbd 100644 --- a/arch/x86/ioapic.c +++ b/arch/x86/ioapic.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #define IOAPIC_SYSTEM_ISA_BUS_NAME "ISA" @@ -217,8 +218,9 @@ ioapic_t *add_ioapic(uint8_t id, uint8_t version, bool enabled, uint64_t base_ad ioapic->base_address = base_address; ioapic->gsi_base = gsi_base; - ioapic->virt_address = vmap_4k(paddr_to_virt(ioapic->base_address), - paddr_to_mfn(ioapic->base_address), L1_PROT_NOCACHE); + ioapic->virt_address = + vmap_kern_4k(paddr_to_virt(ioapic->base_address), + paddr_to_mfn(ioapic->base_address), L1_PROT_NOCACHE); BUG_ON(!ioapic->virt_address); return ioapic; diff --git a/arch/x86/pagetables.c b/arch/x86/pagetables.c index 755b0b82..1fe57971 100644 --- a/arch/x86/pagetables.c +++ b/arch/x86/pagetables.c @@ -308,31 +308,112 @@ static void *_vmap(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned int order, return va; } -void *vmap_kern(void *va, mfn_t mfn, unsigned int order, +static inline void *__vmap_1g(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l4_flags, + unsigned long l3_flags) { + return _vmap(cr3_ptr, va, mfn, PAGE_ORDER_1G, l4_flags, l3_flags | _PAGE_PSE, + PT_NO_FLAGS, PT_NO_FLAGS); +} + +static inline void *__vmap_2m(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l4_flags, + unsigned long l3_flags, unsigned long l2_flags) { + return _vmap(cr3_ptr, va, mfn, PAGE_ORDER_2M, l4_flags, l3_flags, + l2_flags | _PAGE_PSE, PT_NO_FLAGS); +} + +static inline void *__vmap_4k(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l4_flags, + unsigned long l3_flags, unsigned long l2_flags, + unsigned long l1_flags) { + return _vmap(cr3_ptr, va, mfn, PAGE_ORDER_4K, l4_flags, l3_flags, l2_flags, l1_flags); +} + +static inline void *_vmap_1g(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l3_flags, + bool propagate_user) { + unsigned long l4_flags = L4_PROT; + + if (propagate_user) + l4_flags |= l3_flags & _PAGE_USER; + return __vmap_1g(cr3_ptr, va, mfn, l4_flags, l3_flags); +} + +static inline void *_vmap_2m(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l2_flags, + bool propagate_user) { + unsigned long l4_flags = L4_PROT; + unsigned long l3_flags = L3_PROT; + + if (propagate_user) { + unsigned long user_bit = l2_flags & _PAGE_USER; + l4_flags |= user_bit; + l3_flags |= user_bit; + } + return __vmap_2m(cr3_ptr, va, mfn, l4_flags, l3_flags, l2_flags); +} + +static inline void *_vmap_4k(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l1_flags, + bool propagate_user) { + unsigned long l4_flags = L4_PROT; + unsigned long l3_flags = L3_PROT; + unsigned long l2_flags = L2_PROT; + + if (propagate_user) { + unsigned long user_bit = l1_flags & _PAGE_USER; + l4_flags |= user_bit; + l3_flags |= user_bit; + l2_flags |= user_bit; + } + + return __vmap_4k(cr3_ptr, va, mfn, l4_flags, l3_flags, l2_flags, l1_flags); +} + +void *vmap(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned int order, #if defined(__x86_64__) - unsigned long l4_flags, + unsigned long l4_flags, #endif - unsigned long l3_flags, unsigned long l2_flags, unsigned long l1_flags) { - unsigned long _va = _ul(va) & PAGE_ORDER_TO_MASK(order); - + unsigned long l3_flags, unsigned long l2_flags, unsigned long l1_flags) { dprintk("%s: va: 0x%p mfn: 0x%lx (order: %u)\n", __func__, va, mfn, order); + + spin_lock(&vmap_lock); + va = _vmap(cr3_ptr, va, mfn, order, l4_flags, l3_flags, l2_flags, l1_flags); + spin_unlock(&vmap_lock); + + return va; +} + +void *vmap_1g(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l3_flags, + bool propagate_user) { + unsigned long _va = _ul(va) & PAGE_ORDER_TO_MASK(PAGE_ORDER_1G); + + dprintk("%s: va: 0x%p mfn: 0x%lx\n", __func__, va, mfn); + spin_lock(&vmap_lock); - va = _vmap(&cr3, _ptr(_va), mfn, order, l4_flags, l3_flags, l2_flags, l1_flags); + va = _vmap_1g(cr3_ptr, _ptr(_va), mfn, l3_flags, propagate_user); spin_unlock(&vmap_lock); + return va; } -void *vmap_user(void *va, mfn_t mfn, unsigned int order, -#if defined(__x86_64__) - unsigned long l4_flags, -#endif - unsigned long l3_flags, unsigned long l2_flags, unsigned long l1_flags) { - unsigned long _va = _ul(va) & PAGE_ORDER_TO_MASK(order); +void *vmap_2m(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l2_flags, + bool propagate_user) { + unsigned long _va = _ul(va) & PAGE_ORDER_TO_MASK(PAGE_ORDER_2M); + + dprintk("%s: va: 0x%p mfn: 0x%lx\n", __func__, va, mfn); + + spin_lock(&vmap_lock); + va = _vmap_2m(cr3_ptr, _ptr(_va), mfn, l2_flags, propagate_user); + spin_unlock(&vmap_lock); + + return va; +} + +void *vmap_4k(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l1_flags, + bool propagate_user) { + unsigned long _va = _ul(va) & PAGE_ORDER_TO_MASK(PAGE_ORDER_4K); + + dprintk("%s: va: 0x%p mfn: 0x%lx\n", __func__, va, mfn); - dprintk("%s: va: 0x%p mfn: 0x%lx (order: %u)\n", __func__, va, mfn, order); spin_lock(&vmap_lock); - va = _vmap(&user_cr3, _ptr(_va), mfn, order, l4_flags, l3_flags, l2_flags, l1_flags); + va = _vmap_4k(cr3_ptr, _ptr(_va), mfn, l1_flags, propagate_user); spin_unlock(&vmap_lock); + return va; } @@ -349,7 +430,7 @@ static void map_tmp_mapping_entry(void) { pte_t *entry = l1_table_entry(mfn_to_virt(l1e->mfn), _tmp_mapping); /* Map _tmp_mapping_entry PTE of new page tables */ - kmap_4k(l1e->mfn, L1_PROT); + vmap_kern_4k(mfn_to_virt_kern(l1e->mfn), l1e->mfn, L1_PROT); /* Point _tmp_mapping_entry at new page tables location */ _tmp_mapping_entry = paddr_to_virt_kern(_paddr(entry)); @@ -789,17 +870,17 @@ void init_pagetables(void) { switch (r->base) { case VIRT_IDENT_BASE: for (mfn_t mfn = virt_to_mfn(r->start); mfn < virt_to_mfn(r->end); mfn++) - vmap_4k(mfn_to_virt(mfn), mfn, r->flags); + vmap_kern_4k(mfn_to_virt(mfn), mfn, r->flags); break; case VIRT_KERNEL_BASE: for (mfn_t mfn = virt_to_mfn(r->start); mfn < virt_to_mfn(r->end); mfn++) - kmap_4k(mfn, r->flags); + vmap_kern_4k(mfn_to_virt_kern(mfn), mfn, r->flags); break; case VIRT_USER_BASE: for (mfn_t mfn = virt_to_mfn(r->start); mfn < virt_to_mfn(r->end); mfn++) { void *va = mfn_to_virt_user(mfn); - vmap_4k(va, mfn, r->flags); + vmap_kern_4k(va, mfn, r->flags); vmap_user_4k(va, mfn, r->flags); } break; diff --git a/common/setup.c b/common/setup.c index 7f8d2341..9bddd000 100644 --- a/common/setup.c +++ b/common/setup.c @@ -136,16 +136,17 @@ void zap_boot_mappings(void) { } static void __text_init map_bios_area(void) { - vmap_4k(paddr_to_virt(BDA_ADDR_START), paddr_to_mfn(BDA_ADDR_START), L1_PROT_RO); - kmap_4k(paddr_to_mfn(BDA_ADDR_START), L1_PROT_RO); + vmap_kern_4k(paddr_to_virt(BDA_ADDR_START), paddr_to_mfn(BDA_ADDR_START), L1_PROT_RO); + vmap_kern_4k(paddr_to_virt_kern(BDA_ADDR_START), paddr_to_mfn(BDA_ADDR_START), + L1_PROT_RO); uint32_t ebda_addr = get_bios_ebda_addr(); - vmap_4k(paddr_to_virt(ebda_addr), paddr_to_mfn(ebda_addr), L1_PROT_RO); - kmap_4k(paddr_to_mfn(ebda_addr), L1_PROT_RO); + vmap_kern_4k(paddr_to_virt(ebda_addr), paddr_to_mfn(ebda_addr), L1_PROT_RO); + vmap_kern_4k(paddr_to_virt_kern(ebda_addr), paddr_to_mfn(ebda_addr), L1_PROT_RO); for (mfn_t bios_mfn = paddr_to_mfn(BIOS_ACPI_ROM_START); bios_mfn < paddr_to_mfn(BIOS_ACPI_ROM_STOP); bios_mfn++) - kmap_4k(bios_mfn, L1_PROT_RO); + vmap_kern_4k(mfn_to_virt_kern(bios_mfn), bios_mfn, L1_PROT_RO); } static void display_cpu_info(void) { diff --git a/drivers/acpi/acpica/osl.c b/drivers/acpi/acpica/osl.c index d4fbd46d..e1862cf7 100644 --- a/drivers/acpi/acpica/osl.c +++ b/drivers/acpi/acpica/osl.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -306,7 +307,7 @@ void *AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Length) { void *_va; if (!frame) { - _va = vmap_4k(mfn_to_virt_map(mfn), mfn, L1_PROT); + _va = vmap_kern_4k(mfn_to_virt_map(mfn), mfn, L1_PROT); if (!_va) { spin_unlock(&map_lock); return NULL; diff --git a/drivers/fb/fb.c b/drivers/fb/fb.c index 8912adb3..c2e8c58d 100644 --- a/drivers/fb/fb.c +++ b/drivers/fb/fb.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include extern uint64_t fonts[]; @@ -56,8 +56,8 @@ static void (*put_pixel)(uint32_t x, uint32_t y, uint32_t color); static void map_fb_area(paddr_t start, size_t size) { for (mfn_t video_mfn = paddr_to_mfn(start); video_mfn < paddr_to_mfn(start + size); video_mfn++) { - vmap_4k(mfn_to_virt(video_mfn), video_mfn, L1_PROT_NOCACHE); - kmap_4k(video_mfn, L1_PROT_NOCACHE); + vmap_kern_4k(mfn_to_virt(video_mfn), video_mfn, L1_PROT_NOCACHE); + vmap_kern_4k(mfn_to_virt_kern(video_mfn), video_mfn, L1_PROT_NOCACHE); } } diff --git a/drivers/hpet.c b/drivers/hpet.c index 6333d15a..448358b9 100644 --- a/drivers/hpet.c +++ b/drivers/hpet.c @@ -26,6 +26,7 @@ #include #include #include +#include bool init_hpet(const cpu_t *cpu) { #ifndef KTF_ACPICA @@ -59,7 +60,7 @@ bool init_hpet(const cpu_t *cpu) { #endif hpet_base_mfn = paddr_to_mfn(address); - vmap_4k(_ptr(address), hpet_base_mfn, L1_PROT_NOCACHE); + vmap_kern_4k(_ptr(address), hpet_base_mfn, L1_PROT_NOCACHE); config = (acpi_hpet_timer_t *) (address + HPET_OFFSET_TIMER_0_CONFIG_CAP_REG); general = (acpi_hpet_general_t *) (address + HPET_OFFSET_GENERAL_CAP_REG); main_counter = (uint64_t *) (address + HPET_OFFSET_GENERAL_MAIN_COUNTER_REG); diff --git a/drivers/vga.c b/drivers/vga.c index 12c54092..d679bf73 100644 --- a/drivers/vga.c +++ b/drivers/vga.c @@ -24,7 +24,7 @@ */ #include #include -#include +#include #include #define MAX_ROWS VGA_ROWS @@ -83,7 +83,7 @@ void vga_write(void *vga_memory, const char *buf, size_t len, vga_color_t color) void map_vga_area(void) { for (mfn_t vga_mfn = paddr_to_mfn(VGA_START_ADDR); vga_mfn < paddr_to_mfn(VGA_END_ADDR); vga_mfn++) { - vmap_4k(mfn_to_virt(vga_mfn), vga_mfn, L1_PROT_NOCACHE); - kmap_4k(vga_mfn, L1_PROT_NOCACHE); + vmap_kern_4k(mfn_to_virt(vga_mfn), vga_mfn, L1_PROT_NOCACHE); + vmap_kern_4k(mfn_to_virt_kern(vga_mfn), vga_mfn, L1_PROT_NOCACHE); } } diff --git a/include/arch/x86/page.h b/include/arch/x86/page.h index 7b3b8f30..13194646 100644 --- a/include/arch/x86/page.h +++ b/include/arch/x86/page.h @@ -175,22 +175,6 @@ typedef unsigned long mfn_t; /* External declarations */ -extern void *vmap_kern(void *va, mfn_t mfn, unsigned int order, -#if defined(__x86_64__) - unsigned long l4_flags, -#endif - unsigned long l3_flags, unsigned long l2_flags, - unsigned long l1_flags); - -extern void *vmap_user(void *va, mfn_t mfn, unsigned int order, -#if defined(__x86_64__) - unsigned long l4_flags, -#endif - unsigned long l3_flags, unsigned long l2_flags, - unsigned long l1_flags); - -extern int vunmap_kern(void *va, mfn_t *mfn, unsigned int *order); -extern int vunmap_user(void *va, mfn_t *mfn, unsigned int *order); extern void pat_set_type(pat_field_t field, pat_memory_type_t type); extern pat_memory_type_t pat_get_type(pat_field_t field); @@ -257,67 +241,6 @@ static inline mfn_t virt_to_mfn(const void *va) { return paddr_to_mfn(virt_to_paddr(va)); } -static inline void *kmap(mfn_t mfn, unsigned int order, -#if defined(__x86_64__) - unsigned long l4_flags, -#endif - unsigned long l3_flags, unsigned long l2_flags, - unsigned long l1_flags) { - return vmap_kern(mfn_to_virt_kern(mfn), mfn, order, -#if defined(__x86_64__) - l4_flags, -#endif - l3_flags, l2_flags, l1_flags); -} - -static inline void *vmap_1g(void *va, mfn_t mfn, unsigned long l3_flags) { - return vmap_kern(va, mfn, PAGE_ORDER_1G, L4_PROT, l3_flags | _PAGE_PSE, PT_NO_FLAGS, - PT_NO_FLAGS); -} - -static inline void *vmap_2m(void *va, mfn_t mfn, unsigned long l2_flags) { - return vmap_kern(va, mfn, PAGE_ORDER_2M, L4_PROT, L3_PROT, l2_flags | _PAGE_PSE, - PT_NO_FLAGS); -} - -static inline void *vmap_4k(void *va, mfn_t mfn, unsigned long l1_flags) { - return vmap_kern(va, mfn, PAGE_ORDER_4K, L4_PROT, L3_PROT, L2_PROT, l1_flags); -} - -static inline void *kmap_1g(mfn_t mfn, unsigned long l3_flags) { - return kmap(mfn, PAGE_ORDER_1G, L4_PROT, l3_flags | _PAGE_PSE, PT_NO_FLAGS, - PT_NO_FLAGS); -} - -static inline void *kmap_2m(mfn_t mfn, unsigned long l2_flags) { - return kmap(mfn, PAGE_ORDER_2M, L4_PROT, L3_PROT, l2_flags | _PAGE_PSE, PT_NO_FLAGS); -} - -static inline void *kmap_4k(mfn_t mfn, unsigned long l1_flags) { - return kmap(mfn, PAGE_ORDER_4K, L4_PROT, L3_PROT, L2_PROT, l1_flags); -} - -static inline void *vmap_user_1g(void *va, mfn_t mfn, unsigned long l3_flags) { - unsigned long user = l3_flags & _PAGE_USER; - - return vmap_user(va, mfn, PAGE_ORDER_1G, L4_PROT | user, - l3_flags | (user | _PAGE_PSE), PT_NO_FLAGS, PT_NO_FLAGS); -} - -static inline void *vmap_user_2m(void *va, mfn_t mfn, unsigned long l2_flags) { - unsigned long user = l2_flags & _PAGE_USER; - - return vmap_user(va, mfn, PAGE_ORDER_2M, L4_PROT | user, L3_PROT | user, - l2_flags | (user | _PAGE_PSE), PT_NO_FLAGS); -} - -static inline void *vmap_user_4k(void *va, mfn_t mfn, unsigned long l1_flags) { - unsigned long user = l1_flags & _PAGE_USER; - - return vmap_user(va, mfn, PAGE_ORDER_4K, L4_PROT | user, L3_PROT | user, - L2_PROT | user, l1_flags); -} - #endif /* __ASSEMBLY__ */ #endif /* KTF_PAGE_H */ diff --git a/include/arch/x86/pagetable.h b/include/arch/x86/pagetable.h index a0405b5e..a404000f 100644 --- a/include/arch/x86/pagetable.h +++ b/include/arch/x86/pagetable.h @@ -167,6 +167,21 @@ extern void unmap_pagetables(cr3_t *from_cr3, cr3_t *of_cr3); extern int map_pagetables_va(cr3_t *cr3_ptr, void *va); extern int unmap_pagetables_va(cr3_t *cr3_ptr, void *va); +extern void *vmap(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned int order, +#if defined(__x86_64__) + unsigned long l4_flags, +#endif + unsigned long l3_flags, unsigned long l2_flags, unsigned long l1_flags); +extern void *vmap_1g(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l3_flags, + bool propagate_user); +extern void *vmap_2m(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l2_flags, + bool propagate_user); +extern void *vmap_4k(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l1_flags, + bool propagate_user); + +extern int vunmap_kern(void *va, mfn_t *mfn, unsigned int *order); +extern int vunmap_user(void *va, mfn_t *mfn, unsigned int *order); + /* Static declarations */ static inline pt_index_t l1_table_index(const void *va) { @@ -320,6 +335,48 @@ static inline bool is_pgentry_present(pgentry_t e) { return !!(e & _PAGE_PRESENT); } +static inline void *vmap_kern(void *va, mfn_t mfn, unsigned int order, +#if defined(__x86_64__) + unsigned long l4_flags, +#endif + unsigned long l3_flags, unsigned long l2_flags, + unsigned long l1_flags) { + return vmap(&cr3, va, mfn, order, l4_flags, l3_flags, l2_flags, l1_flags); +} + +static inline void *vmap_kern_1g(void *va, mfn_t mfn, unsigned long l3_flags) { + return vmap_1g(&cr3, va, mfn, l3_flags, false); +} + +static inline void *vmap_kern_2m(void *va, mfn_t mfn, unsigned long l2_flags) { + return vmap_2m(&cr3, va, mfn, l2_flags, false); +} + +static inline void *vmap_kern_4k(void *va, mfn_t mfn, unsigned long l1_flags) { + return vmap_4k(&cr3, va, mfn, l1_flags, false); +} + +static inline void *vmap_user(void *va, mfn_t mfn, unsigned int order, +#if defined(__x86_64__) + unsigned long l4_flags, +#endif + unsigned long l3_flags, unsigned long l2_flags, + unsigned long l1_flags) { + return vmap(&user_cr3, va, mfn, order, l4_flags, l3_flags, l2_flags, l1_flags); +} + +static inline void *vmap_user_1g(void *va, mfn_t mfn, unsigned long l3_flags) { + return vmap_1g(&user_cr3, va, mfn, l3_flags, true); +} + +static inline void *vmap_user_2m(void *va, mfn_t mfn, unsigned long l2_flags) { + return vmap_2m(&user_cr3, va, mfn, l2_flags, true); +} + +static inline void *vmap_user_4k(void *va, mfn_t mfn, unsigned long l1_flags) { + return vmap_4k(&user_cr3, va, mfn, l1_flags, true); +} + #endif /* __ASSEMBLY__ */ #endif /* KTF_PAGETABLE_H */ diff --git a/mm/pmm.c b/mm/pmm.c index 6622f805..089d9101 100644 --- a/mm/pmm.c +++ b/mm/pmm.c @@ -23,12 +23,12 @@ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include -#include -#include - #include #include #include +#include +#include +#include size_t total_phys_memory; @@ -641,5 +641,9 @@ void put_free_frames(mfn_t mfn, unsigned int order) { void map_frames_array(void) { frames_array_t *array; - list_for_each_entry (array, &frames, list) { kmap_4k(virt_to_mfn(array), L1_PROT); } + list_for_each_entry (array, &frames, list) { + mfn_t mfn = virt_to_mfn(array); + + BUG_ON(!vmap_kern_4k(mfn_to_virt_kern(mfn), mfn, L1_PROT)); + } } diff --git a/mm/vmm.c b/mm/vmm.c index ba7bc65b..193f63b1 100644 --- a/mm/vmm.c +++ b/mm/vmm.c @@ -24,7 +24,7 @@ */ #include #include -#include +#include #include #include @@ -61,7 +61,8 @@ void *get_free_pages(unsigned int order, gfp_flags_t flags) { } if (flags & GFP_KERNEL) { - va = kmap(mfn, order, L4_PROT, L3_PROT, L2_PROT, L1_PROT); + va = vmap_kern(mfn_to_virt_kern(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT, + L1_PROT); if (flags & GFP_USER) vmap_user(mfn_to_virt_kern(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT, L1_PROT); diff --git a/smp/mptables.c b/smp/mptables.c index e3c8baf3..fe5f6f26 100644 --- a/smp/mptables.c +++ b/smp/mptables.c @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include @@ -88,7 +88,8 @@ static mpf_t *get_mpf_addr(void) { if (get_memory_range(KB(512), &range) < 0) return NULL; - sysm_addr = kmap_4k(paddr_to_mfn(_paddr(range.end) - KB(1)), L1_PROT_RO); + mfn_t mfn = paddr_to_mfn(_paddr(range.end) - KB(1)); + sysm_addr = vmap_kern_4k(mfn_to_virt_kern(mfn), mfn, L1_PROT_RO); if (sysm_addr) { ptr = find_mpf(sysm_addr, sysm_addr + KB(1)); if (ptr) diff --git a/tests/test_cond_branch_mispredictions.c b/tests/test_cond_branch_mispredictions.c index c9b9c15b..7543187d 100644 --- a/tests/test_cond_branch_mispredictions.c +++ b/tests/test_cond_branch_mispredictions.c @@ -26,6 +26,7 @@ */ #include #include +#include #include #include @@ -204,7 +205,7 @@ void __aligned(PAGE_SIZE) test_cond_forward_branch_cl1(unsigned iterations) { int __aligned(PAGE_SIZE) test_cond_branch_mispredictions(void *unused) { frame_t *frame = get_free_frame(); - channel = vmap_4k(_ptr(CHANNEL_ADDR), frame->mfn, L1_PROT); + channel = vmap_kern_4k(_ptr(CHANNEL_ADDR), frame->mfn, L1_PROT); cl0 = &channel->lines[CACHE_LINE1]; /* CACHE_LINE_0_ADDR */ cl1 = &channel->lines[CACHE_LINE2]; /* CACHE_LINE_1_ADDR */ diff --git a/tests/test_uncond_branch_mispredictions.c b/tests/test_uncond_branch_mispredictions.c index 6be92edf..d8afd39f 100644 --- a/tests/test_uncond_branch_mispredictions.c +++ b/tests/test_uncond_branch_mispredictions.c @@ -26,6 +26,7 @@ */ #include #include +#include #include #include @@ -220,7 +221,7 @@ void __aligned(PAGE_SIZE) test_uncond_forward_branch_cl1(unsigned iterations) { int __aligned(PAGE_SIZE) test_uncond_branch_mispredictions(void *unused) { frame_t *frame = get_free_frame(); - channel = vmap_4k(_ptr(CHANNEL_ADDR), frame->mfn, L1_PROT); + channel = vmap_kern_4k(_ptr(CHANNEL_ADDR), frame->mfn, L1_PROT); cl0 = &channel->lines[CACHE_LINE1]; /* CACHE_LINE_0_ADDR */ cl1 = &channel->lines[CACHE_LINE2]; /* CACHE_LINE_1_ADDR */ diff --git a/tests/unittests.c b/tests/unittests.c index f0b321fb..04837823 100644 --- a/tests/unittests.c +++ b/tests/unittests.c @@ -26,17 +26,16 @@ #include #include #include +#include #include #include #include +#include #include #include #include #include -#include -#include - static char opt_string[4]; string_cmd("string", opt_string); @@ -217,7 +216,7 @@ int unit_tests(void *_unused) { task_user4 = new_user_task("test4 user", test_user_task_func4, NULL); frame_t *frame = get_free_frame(); - vmap_4k(HIGH_USER_PTR + 0x1000, frame->mfn, L1_PROT); + vmap_kern_4k(HIGH_USER_PTR + 0x1000, frame->mfn, L1_PROT); memset(HIGH_USER_PTR + 0x1000, 0, 0x1000); vmap_user_4k(HIGH_USER_PTR, frame->mfn, L1_PROT_USER); From 4162c09d0e07a450e0859a13f5d5090c66ecbd4f Mon Sep 17 00:00:00 2001 From: Pawel Wieczorkiewicz Date: Tue, 21 Nov 2023 16:12:52 +0100 Subject: [PATCH 3/4] arch,pt: add vmap_range() and vunmap_range() API This API allows to automatically map (or unmap) a range of physical addresses specified with a start address and length. Both vmap_range() and vunmap_range() allow to automatically map the physical address range into multiple different memory areas specified via vmap_flags parameter. Support memory areas via vmap_flags: * VMAP_IDENT - kernel address space, identity mapping * VMAP_KERNEL - high kernel addresses (small area) * VMAP_KERNEL_MAP - low kernel addresses (large area) * VMAP_KERNEL_USER - kernel address space, but user memory area * VMAP_KERNEL_USER_ACCESS - kernel address space, but user memory area with user access permission * VMAP_USER - user address space, user memory area * VMAP_USER_IDENT - user address space, identity mapping, no user access * VMAP_USER_KERNEL - user address space, high kernel addresses, no user access * VMAP_USER_KERNEL_MAP - user address space, low kernel addresses, no user access The vmap_range() tries to use as many huge pages as possible for the mapping. Signed-off-by: Pawel Wieczorkiewicz --- arch/x86/boot/multiboot.c | 7 +- arch/x86/pagetables.c | 251 +++++++++++++++++++++++++++++++++-- common/acpi.c | 57 +++----- common/setup.c | 31 +---- drivers/fb/fb.c | 6 +- drivers/vga.c | 7 +- include/arch/x86/page.h | 34 +++++ include/arch/x86/pagetable.h | 4 + 8 files changed, 307 insertions(+), 90 deletions(-) diff --git a/arch/x86/boot/multiboot.c b/arch/x86/boot/multiboot.c index 4a7e4426..e05db83f 100644 --- a/arch/x86/boot/multiboot.c +++ b/arch/x86/boot/multiboot.c @@ -184,12 +184,7 @@ void init_multiboot(unsigned long *addr, const char **cmdline) { void map_multiboot_areas(void) { paddr_t mbi_start = _paddr(multiboot2_hdr); - paddr_t mbi_stop = mbi_start + multiboot2_hdr_size; - - for (mfn_t mfn = paddr_to_mfn(mbi_start); mfn <= paddr_to_mfn(mbi_stop); mfn++) { - vmap_kern_4k(mfn_to_virt(mfn), mfn, L1_PROT_RO); - vmap_kern_4k(mfn_to_virt_kern(mfn), mfn, L1_PROT_RO); - } + vmap_range(mbi_start, multiboot2_hdr_size, L1_PROT_RO, VMAP_KERNEL | VMAP_IDENT); } unsigned mbi_get_avail_memory_ranges_num(void) { diff --git a/arch/x86/pagetables.c b/arch/x86/pagetables.c index 1fe57971..684a106b 100644 --- a/arch/x86/pagetables.c +++ b/arch/x86/pagetables.c @@ -618,6 +618,238 @@ frame_t *find_user_va_frame(const void *va) { return find_va_frame(&user_cr3, va); } +static inline void *_vmap_range_chunk(cr3_t *cr3_ptr, void *va, mfn_t mfn, + unsigned int order, unsigned long flags, + bool propagate_user) { + switch (order) { + case PAGE_ORDER_4K: + return _vmap_4k(cr3_ptr, va, mfn, flags, propagate_user); + case PAGE_ORDER_2M: + return _vmap_2m(cr3_ptr, va, mfn, flags, propagate_user); + case PAGE_ORDER_1G: + return _vmap_1g(cr3_ptr, va, mfn, flags, propagate_user); + default: + BUG(); + } + + return NULL; +} + +static inline int _vmap_range(mfn_t mfn, unsigned int order, unsigned long flags, + vmap_flags_t vmap_flags) { + const int err = -EFAULT; + + /* NOTE: It might make sense to unmap partial completed mappings in case of an + * error. For now, we just return an error and let the caller handle it. + */ + if (vmap_flags & VMAP_KERNEL) { + if (!_vmap_range_chunk(&cr3, mfn_to_virt_kern(mfn), mfn, order, flags, false)) + return err; + } + + if (vmap_flags & VMAP_IDENT) { + if (virt_invalid( + _vmap_range_chunk(&cr3, mfn_to_virt(mfn), mfn, order, flags, false))) + return err; + } + + if (vmap_flags & VMAP_KERNEL_MAP) { + if (!_vmap_range_chunk(&cr3, mfn_to_virt_map(mfn), mfn, order, flags, false)) + return err; + } + + if (vmap_flags & (VMAP_KERNEL_USER | VMAP_KERNEL_USER_ACCESS)) { + unsigned long _flags = flags & ~_PAGE_USER; + if (vmap_flags & VMAP_KERNEL_USER_ACCESS) + _flags |= _PAGE_USER; + + if (!_vmap_range_chunk(&cr3, mfn_to_virt_user(mfn), mfn, order, _flags, false)) + return err; + } + + if (vmap_flags & VMAP_USER) { + if (!_vmap_range_chunk(&user_cr3, mfn_to_virt_user(mfn), mfn, order, + flags | _PAGE_USER, true)) + return err; + } + + if (vmap_flags & VMAP_USER_IDENT) { + if (!_vmap_range_chunk(&user_cr3, mfn_to_virt(mfn), mfn, order, + flags & ~_PAGE_USER, false)) + return err; + } + + if (vmap_flags & VMAP_USER_KERNEL) { + if (!_vmap_range_chunk(&user_cr3, mfn_to_virt_kern(mfn), mfn, order, + flags & ~_PAGE_USER, false)) + return err; + } + + if (vmap_flags & VMAP_USER_KERNEL_MAP) { + if (!_vmap_range_chunk(&user_cr3, mfn_to_virt_map(mfn), mfn, order, + flags & ~_PAGE_USER, false)) + return err; + } + + return 0; +} + +int vmap_range(paddr_t paddr, size_t size, unsigned long flags, vmap_flags_t vmap_flags) { + paddr_t cur = paddr; + paddr_t end = cur + size; + mfn_t mfn; + int err; + + dprintk("%s: paddr: 0x%p, size: %lx\n", __func__, paddr, size); + + if (!has_vmap_flags(vmap_flags)) + return -EINVAL; + + /* Round up to the next page boundary unless it is page aligned */ + end = paddr_round_up(end); + if (end <= cur) + return -EINVAL; + + spin_lock(&vmap_lock); + while (cur < end) { + mfn = paddr_to_mfn(cur); + + if (size >= PAGE_SIZE_1G && !(cur % PAGE_SIZE_1G)) { + err = _vmap_range(mfn, PAGE_ORDER_1G, flags, vmap_flags); + if (err < 0) + goto unlock; + + cur += PAGE_SIZE_1G; + size -= PAGE_SIZE_1G; + continue; + } + + if (size >= PAGE_SIZE_2M && !(cur % PAGE_SIZE_2M)) { + err = _vmap_range(mfn, PAGE_ORDER_2M, flags, vmap_flags); + if (err < 0) + goto unlock; + + cur += PAGE_SIZE_2M; + size -= PAGE_SIZE_2M; + continue; + } + + err = _vmap_range(mfn, PAGE_ORDER_4K, flags, vmap_flags); + if (err < 0) + goto unlock; + + cur += PAGE_SIZE; + size -= PAGE_SIZE; + } + + BUG_ON(paddr_to_mfn(cur) != paddr_to_mfn(end)); + err = 0; + +unlock: + spin_unlock(&vmap_lock); + return err; +} + +static inline int _vunmap_range_chunk(cr3_t *cr3_ptr, void *va, unsigned int *order) { + mfn_t mfn = MFN_INVALID; + int err; + + err = _vunmap(cr3_ptr, va, &mfn, order); + if (err) + return err; + + if (mfn_invalid(mfn)) + return -ENOENT; + + BUG_ON(*order != PAGE_ORDER_4K && *order != PAGE_ORDER_2M && *order != PAGE_ORDER_1G); + return 0; +} + +static inline int _vunmap_range(cr3_t *cr3_ptr, void *start, void *end) { + unsigned int order = PAGE_ORDER_4K; + + for (void *cur = start; cur < end; cur += ORDER_TO_SIZE(order)) { + int err = _vunmap_range_chunk(cr3_ptr, cur, &order); + if (err) + return err; + } + + return 0; +} + +int vunmap_range(paddr_t paddr, size_t size, vmap_flags_t vmap_flags) { + paddr_t start = paddr; + paddr_t end = start + size; + int err; + + dprintk("%s: paddr: 0x%p, size: %lx\n", __func__, paddr, size); + + if (!has_vmap_flags(vmap_flags)) + return -EINVAL; + + /* Round up to the next page boundary unless it is page aligned */ + end = paddr_round_up(end); + if (end <= start) + return -EINVAL; + + spin_lock(&vmap_lock); + + if (vmap_flags & VMAP_KERNEL) { + err = _vunmap_range(&cr3, paddr_to_virt_kern(start), paddr_to_virt_kern(end)); + if (err < 0) + goto unlock; + } + + if (vmap_flags & VMAP_IDENT) { + err = _vunmap_range(&cr3, paddr_to_virt(start), paddr_to_virt(end)); + if (err < 0) + goto unlock; + } + + if (vmap_flags & VMAP_KERNEL_MAP) { + err = _vunmap_range(&cr3, paddr_to_virt_map(start), paddr_to_virt_map(end)); + if (err < 0) + goto unlock; + } + + if (vmap_flags & (VMAP_KERNEL_USER | VMAP_KERNEL_USER_ACCESS)) { + err = _vunmap_range(&cr3, paddr_to_virt_user(start), paddr_to_virt_user(end)); + if (err < 0) + goto unlock; + } + + if (vmap_flags & VMAP_USER) { + err = + _vunmap_range(&user_cr3, paddr_to_virt_user(start), paddr_to_virt_user(end)); + if (err < 0) + goto unlock; + } + + if (vmap_flags & VMAP_USER_IDENT) { + err = _vunmap_range(&user_cr3, paddr_to_virt(start), paddr_to_virt(end)); + if (err < 0) + goto unlock; + } + + if (vmap_flags & VMAP_USER_KERNEL) { + err = + _vunmap_range(&user_cr3, paddr_to_virt_kern(start), paddr_to_virt_kern(end)); + if (err < 0) + goto unlock; + } + + if (vmap_flags & VMAP_USER_KERNEL_MAP) { + err = _vunmap_range(&user_cr3, paddr_to_virt_map(start), paddr_to_virt_map(end)); + if (err < 0) + goto unlock; + } + + err = 0; +unlock: + spin_unlock(&vmap_lock); + return err; +} + static inline void init_cr3(cr3_t *cr3_ptr) { memset(cr3_ptr, 0, sizeof(*cr3_ptr)); cr3_ptr->mfn = MFN_INVALID; @@ -867,26 +1099,23 @@ void init_pagetables(void) { init_tmp_mapping(); for_each_memory_range (r) { + vmap_flags_t flags = VMAP_NONE; + switch (r->base) { case VIRT_IDENT_BASE: - for (mfn_t mfn = virt_to_mfn(r->start); mfn < virt_to_mfn(r->end); mfn++) - vmap_kern_4k(mfn_to_virt(mfn), mfn, r->flags); + flags = VMAP_IDENT; break; case VIRT_KERNEL_BASE: - for (mfn_t mfn = virt_to_mfn(r->start); mfn < virt_to_mfn(r->end); mfn++) - vmap_kern_4k(mfn_to_virt_kern(mfn), mfn, r->flags); + flags = VMAP_KERNEL; break; case VIRT_USER_BASE: - for (mfn_t mfn = virt_to_mfn(r->start); mfn < virt_to_mfn(r->end); mfn++) { - void *va = mfn_to_virt_user(mfn); - - vmap_kern_4k(va, mfn, r->flags); - vmap_user_4k(va, mfn, r->flags); - } + flags = VMAP_USER | VMAP_KERNEL_USER; break; default: - break; + continue; } + + vmap_range(virt_to_paddr(r->start), r->end - r->start, r->flags, flags); } map_frames_array(); diff --git a/common/acpi.c b/common/acpi.c index 1919c70f..5d2a3d55 100644 --- a/common/acpi.c +++ b/common/acpi.c @@ -135,29 +135,14 @@ static rsdp_rev1_t *acpi_find_rsdp(void) { return NULL; } -static unsigned acpi_table_map_pages(paddr_t pa, size_t len) { - unsigned offset = pa & ~PAGE_MASK; - unsigned num_pages = ((offset + len) / PAGE_SIZE) + 1; - mfn_t mfn = paddr_to_mfn(pa); - - for (unsigned i = 0; i < num_pages; i++, mfn++) { - if (mfn_invalid(mfn)) { - panic("ACPI table at %p of length %lx has invalid MFN: %lx", _ptr(pa), len, - mfn); - } - - BUG_ON(!kmap_4k(mfn, L1_PROT)); - } - - return num_pages; +static void *acpi_table_map_pages(paddr_t pa, size_t len) { + if (vmap_range(pa, len, L1_PROT, VMAP_KERNEL_MAP) < 0) + return NULL; + return paddr_to_virt_map(pa); } -static void acpi_table_unmap_pages(void *addr, unsigned mapped_pages) { - mfn_t mfn = virt_to_mfn(addr); - - for (unsigned i = 0; i < mapped_pages; i++, mfn++) { - vunmap_kern(mfn_to_virt_kern(mfn), NULL, NULL); - } +static void acpi_table_unmap_pages(void *addr, size_t len) { + vunmap_range(virt_to_paddr(addr), len, VMAP_KERNEL_MAP); } static inline void acpi_dump_table(const void *tab, const acpi_table_hdr_t *hdr) { @@ -170,38 +155,35 @@ static inline void acpi_dump_table(const void *tab, const acpi_table_hdr_t *hdr) static rsdt_t *acpi_find_rsdt(const rsdp_rev1_t *rsdp) { paddr_t pa = rsdp->rsdt_paddr; - unsigned mapped_pages; + size_t map_size = PAGE_SIZE; rsdt_t *rsdt; - mapped_pages = acpi_table_map_pages(pa, PAGE_SIZE); - rsdt = paddr_to_virt_kern(pa); + rsdt = acpi_table_map_pages(pa, map_size); BUG_ON(!rsdt); if (RSDT_SIGNATURE != rsdt->header.signature) goto error; - mapped_pages = acpi_table_map_pages(pa, rsdt->header.length); - rsdt = paddr_to_virt_kern(pa); + map_size = rsdt->header.length; + rsdt = acpi_table_map_pages(pa, map_size); BUG_ON(!rsdt); - if (get_checksum(rsdt, rsdt->header.length) != 0x0) + if (get_checksum(rsdt, map_size) != 0x0) goto error; acpi_dump_table(rsdt, &rsdt->header); return rsdt; error: - acpi_table_unmap_pages(rsdt, mapped_pages); + acpi_table_unmap_pages(rsdt, map_size); return NULL; } static xsdt_t *acpi_find_xsdt(const rsdp_rev2_t *rsdp) { uint32_t tab_len = rsdp->length; paddr_t pa = rsdp->xsdt_paddr; - unsigned mapped_pages; xsdt_t *xsdt; - mapped_pages = acpi_table_map_pages(pa, tab_len); - xsdt = paddr_to_virt_kern(pa); + xsdt = acpi_table_map_pages(pa, tab_len); if (XSDT_SIGNATURE != xsdt->header.signature) goto error; @@ -212,7 +194,7 @@ static xsdt_t *acpi_find_xsdt(const rsdp_rev2_t *rsdp) { acpi_dump_table(xsdt, &xsdt->header); return xsdt; error: - acpi_table_unmap_pages(xsdt, mapped_pages); + acpi_table_unmap_pages(xsdt, tab_len); return NULL; } @@ -410,26 +392,23 @@ int init_acpi(void) { for (unsigned int i = 0; i < acpi_nr_tables; i++) { paddr_t pa = (rsdt) ? rsdt->entry[i] : xsdt->entry[i]; - unsigned mapped_pages; + uint32_t tab_len = PAGE_SIZE; acpi_table_t *tab; - uint32_t tab_len; /* Map at least a header of the ACPI table */ - mapped_pages = acpi_table_map_pages(pa, PAGE_SIZE); - tab = paddr_to_virt_kern(pa); + tab = acpi_table_map_pages(pa, tab_len); BUG_ON(!tab); /* Find ACPI table actual length */ tab_len = tab->header.length; /* Map entire ACPI table */ - mapped_pages = acpi_table_map_pages(pa, tab_len); - tab = paddr_to_virt_kern(pa); + tab = acpi_table_map_pages(pa, tab_len); BUG_ON(!tab); /* Verify ACPI table checksum and unmap when invalid */ if (get_checksum(tab, tab->header.length) != 0x0) - acpi_table_unmap_pages(tab, mapped_pages); + acpi_table_unmap_pages(tab, tab_len); acpi_tables[max_acpi_tables++] = tab; } diff --git a/common/setup.c b/common/setup.c index 9bddd000..f6037198 100644 --- a/common/setup.c +++ b/common/setup.c @@ -117,36 +117,19 @@ static __always_inline void zero_bss(void) { void zap_boot_mappings(void) { for_each_memory_range (r) { if (r->base == VIRT_IDENT_BASE && IS_INIT_SECTION(r->name)) { - unsigned int order = PAGE_ORDER_4K; - memset(r->start, 0, r->end - r->start); - for (void *va = r->start; va < r->end; va += ORDER_TO_SIZE(order)) { - mfn_t mfn; - - if (vunmap_kern(va, &mfn, &order)) { - /* FIXME: Use warning */ - printk("Unable to unmap kernel boot mapping at %p\n", va); - order = PAGE_ORDER_4K; - continue; - } - reclaim_frame(mfn, order); - } + vunmap_range(_paddr(r->start), _paddr(r->end - r->start), VMAP_IDENT); + for (mfn_t mfn = virt_to_mfn(r->start); mfn < virt_to_mfn(r->end); mfn++) + reclaim_frame(mfn, PAGE_ORDER_4K); } } } static void __text_init map_bios_area(void) { - vmap_kern_4k(paddr_to_virt(BDA_ADDR_START), paddr_to_mfn(BDA_ADDR_START), L1_PROT_RO); - vmap_kern_4k(paddr_to_virt_kern(BDA_ADDR_START), paddr_to_mfn(BDA_ADDR_START), - L1_PROT_RO); - - uint32_t ebda_addr = get_bios_ebda_addr(); - vmap_kern_4k(paddr_to_virt(ebda_addr), paddr_to_mfn(ebda_addr), L1_PROT_RO); - vmap_kern_4k(paddr_to_virt_kern(ebda_addr), paddr_to_mfn(ebda_addr), L1_PROT_RO); - - for (mfn_t bios_mfn = paddr_to_mfn(BIOS_ACPI_ROM_START); - bios_mfn < paddr_to_mfn(BIOS_ACPI_ROM_STOP); bios_mfn++) - vmap_kern_4k(mfn_to_virt_kern(bios_mfn), bios_mfn, L1_PROT_RO); + vmap_range(BDA_ADDR_START, PAGE_SIZE, L1_PROT_RO, VMAP_IDENT | VMAP_KERNEL); + vmap_range(get_bios_ebda_addr(), PAGE_SIZE, L1_PROT_RO, VMAP_IDENT | VMAP_KERNEL); + vmap_range(BIOS_ACPI_ROM_START, BIOS_ACPI_ROM_STOP - BIOS_ACPI_ROM_START, L1_PROT_RO, + VMAP_KERNEL); } static void display_cpu_info(void) { diff --git a/drivers/fb/fb.c b/drivers/fb/fb.c index c2e8c58d..e1dcd0a6 100644 --- a/drivers/fb/fb.c +++ b/drivers/fb/fb.c @@ -54,11 +54,7 @@ static uint64_t line_width; static void (*put_pixel)(uint32_t x, uint32_t y, uint32_t color); static void map_fb_area(paddr_t start, size_t size) { - for (mfn_t video_mfn = paddr_to_mfn(start); video_mfn < paddr_to_mfn(start + size); - video_mfn++) { - vmap_kern_4k(mfn_to_virt(video_mfn), video_mfn, L1_PROT_NOCACHE); - vmap_kern_4k(mfn_to_virt_kern(video_mfn), video_mfn, L1_PROT_NOCACHE); - } + vmap_range(start, size, L1_PROT_NOCACHE, VMAP_KERNEL | VMAP_IDENT); } static void put_pixel8(uint32_t x, uint32_t y, uint32_t color) { diff --git a/drivers/vga.c b/drivers/vga.c index d679bf73..42b35363 100644 --- a/drivers/vga.c +++ b/drivers/vga.c @@ -81,9 +81,6 @@ void vga_write(void *vga_memory, const char *buf, size_t len, vga_color_t color) } void map_vga_area(void) { - for (mfn_t vga_mfn = paddr_to_mfn(VGA_START_ADDR); - vga_mfn < paddr_to_mfn(VGA_END_ADDR); vga_mfn++) { - vmap_kern_4k(mfn_to_virt(vga_mfn), vga_mfn, L1_PROT_NOCACHE); - vmap_kern_4k(mfn_to_virt_kern(vga_mfn), vga_mfn, L1_PROT_NOCACHE); - } + vmap_range(VGA_START_ADDR, VGA_END_ADDR - VGA_START_ADDR, L1_PROT_NOCACHE, + VMAP_IDENT | VMAP_KERNEL); } diff --git a/include/arch/x86/page.h b/include/arch/x86/page.h index 13194646..35cfd688 100644 --- a/include/arch/x86/page.h +++ b/include/arch/x86/page.h @@ -147,6 +147,24 @@ enum pat_memory_type { }; typedef enum pat_memory_type pat_memory_type_t; +/* clang-format off */ +enum vmap_flags { + VMAP_NONE = 0x00000000, + VMAP_IDENT = 0x00000001, + VMAP_KERNEL = 0x00000002, + VMAP_KERNEL_MAP = 0x00000004, + VMAP_KERNEL_USER = 0x00000008, + VMAP_KERNEL_USER_ACCESS = 0x00000010, + VMAP_USER = 0x00000020, + VMAP_USER_IDENT = 0x00000040, + VMAP_USER_KERNEL = 0x00000080, + VMAP_USER_KERNEL_MAP = 0x00000100, + + VMAP_ALL = 0x000001ff, +}; +/* clang-format on */ +typedef enum vmap_flags vmap_flags_t; + typedef unsigned long paddr_t; typedef unsigned long mfn_t; @@ -180,6 +198,22 @@ extern pat_memory_type_t pat_get_type(pat_field_t field); /* Static declarations */ +static inline bool virt_invalid(const void *va) { + return !va || va == MAP_FAILED; +} + +static inline paddr_t paddr_round_up(paddr_t pa) { + return (pa + PAGE_SIZE - 1) & PAGE_MASK; +} + +static inline void *virt_round_up(void *va) { + return (void *) paddr_round_up(_paddr(va)); +} + +static inline bool has_vmap_flags(vmap_flags_t flags) { + return (flags & VMAP_ALL) != VMAP_NONE; +} + static inline mfn_t paddr_to_mfn(paddr_t pa) { return (mfn_t)(pa >> PAGE_SHIFT); } diff --git a/include/arch/x86/pagetable.h b/include/arch/x86/pagetable.h index a404000f..92881032 100644 --- a/include/arch/x86/pagetable.h +++ b/include/arch/x86/pagetable.h @@ -182,6 +182,10 @@ extern void *vmap_4k(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned long l1_flags extern int vunmap_kern(void *va, mfn_t *mfn, unsigned int *order); extern int vunmap_user(void *va, mfn_t *mfn, unsigned int *order); +extern int vmap_range(paddr_t paddr, size_t size, unsigned long flags, + vmap_flags_t vmap_flags); +extern int vunmap_range(paddr_t paddr, size_t size, vmap_flags_t vmap_flags); + /* Static declarations */ static inline pt_index_t l1_table_index(const void *va) { From 3ae96f1e600f3e13e948f111c513cee278c5f186 Mon Sep 17 00:00:00 2001 From: Pawel Wieczorkiewicz Date: Tue, 21 Nov 2023 16:13:13 +0100 Subject: [PATCH 4/4] mm,vmm: improve get_free_pages() implementation Use vmap_range() to handle multiple mapping areas automatically and consistently. Standardize rules for virtual address returned by get_free_pages(). Signed-off-by: Pawel Wieczorkiewicz --- include/arch/x86/page.h | 12 +++++ include/mm/vmm.h | 9 ++-- mm/vmm.c | 98 ++++++++++++++++++++++++++++------------- 3 files changed, 86 insertions(+), 33 deletions(-) diff --git a/include/arch/x86/page.h b/include/arch/x86/page.h index 35cfd688..3de15428 100644 --- a/include/arch/x86/page.h +++ b/include/arch/x86/page.h @@ -275,6 +275,18 @@ static inline mfn_t virt_to_mfn(const void *va) { return paddr_to_mfn(virt_to_paddr(va)); } +static inline unsigned long order_to_flags(unsigned int order) { + switch (order) { + case PAGE_ORDER_2M: + return L2_PROT; + case PAGE_ORDER_1G: + return L3_PROT; + case PAGE_ORDER_4K: + default: + return L1_PROT; + } +} + #endif /* __ASSEMBLY__ */ #endif /* KTF_PAGE_H */ diff --git a/include/mm/vmm.h b/include/mm/vmm.h index 3d14b4e8..1baf0587 100644 --- a/include/mm/vmm.h +++ b/include/mm/vmm.h @@ -27,12 +27,15 @@ #include +/* clang-format off */ enum gfp_flags { - GFP_KERNEL = 0x00000001, - GFP_USER = 0x00000002, - GFP_IDENT = 0x00000004, + GFP_NONE = 0x00000000, + GFP_IDENT = 0x00000001, + GFP_USER = 0x00000002, + GFP_KERNEL = 0x00000004, GFP_KERNEL_MAP = 0x00000008, }; +/* clang-format on */ typedef enum gfp_flags gfp_flags_t; /* External definitions */ diff --git a/mm/vmm.c b/mm/vmm.c index 193f63b1..ff3ae70d 100644 --- a/mm/vmm.c +++ b/mm/vmm.c @@ -33,10 +33,70 @@ /* Used by higher level mmap_range() functions - must be taken before vmap_lock */ static spinlock_t mmap_lock = SPINLOCK_INIT; -void *get_free_pages(unsigned int order, gfp_flags_t flags) { - frame_t *frame; +static inline vmap_flags_t gfp_to_vmap_flags(gfp_flags_t gfp_flags) { + vmap_flags_t vmap_flags = VMAP_NONE; + + if (gfp_flags == GFP_USER) + return VMAP_KERNEL_USER | VMAP_USER; + + if (gfp_flags & GFP_IDENT) { + vmap_flags |= VMAP_IDENT; + if (gfp_flags & GFP_USER) + vmap_flags |= VMAP_USER_IDENT; + } + + if (gfp_flags & GFP_KERNEL) { + vmap_flags |= VMAP_KERNEL; + if (gfp_flags & GFP_USER) + vmap_flags |= VMAP_USER_KERNEL; + } + + if (gfp_flags & GFP_KERNEL_MAP) { + vmap_flags |= VMAP_KERNEL_MAP; + if (gfp_flags & GFP_USER) + vmap_flags |= VMAP_USER_KERNEL_MAP; + } + + return vmap_flags; +} + +static inline void *gfp_mfn_to_virt(gfp_flags_t gfp_flags, mfn_t mfn) { + /* Return virtual address if a single area is specified ... */ + switch (gfp_flags) { + case GFP_IDENT: + return mfn_to_virt(mfn); + case GFP_KERNEL_MAP: + return mfn_to_virt_map(mfn); + case GFP_USER: + return mfn_to_virt_user(mfn); + case GFP_KERNEL: + return mfn_to_virt_kern(mfn); + default: + /* Otherwise, return kernel addresses if specified before identity + * mapping or user. The below order reflects most common uses. + */ + if (gfp_flags & GFP_KERNEL_MAP) + return mfn_to_virt_map(mfn); + else if (gfp_flags & GFP_KERNEL) + return mfn_to_virt_kern(mfn); + else if (gfp_flags & GFP_IDENT) + return mfn_to_virt(mfn); + else if (gfp_flags & GFP_USER) + return mfn_to_virt_user(mfn); + } + + return NULL; +} + +void *get_free_pages(unsigned int order, gfp_flags_t gfp_flags) { void *va = NULL; + frame_t *frame; mfn_t mfn; + size_t size; + unsigned long pt_flags; + vmap_flags_t vmap_flags; + + ASSERT(gfp_flags != GFP_NONE); if (!boot_flags.virt) panic("Unable to use %s() before final page tables are set", __func__); @@ -46,35 +106,13 @@ void *get_free_pages(unsigned int order, gfp_flags_t flags) { return va; mfn = frame->mfn; - spin_lock(&mmap_lock); - if (flags == GFP_USER) { - va = vmap_kern(mfn_to_virt_user(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT, - L1_PROT); - vmap_user(mfn_to_virt_user(mfn), mfn, order, L4_PROT_USER, L3_PROT_USER, - L2_PROT_USER, L1_PROT_USER); - } - - if (flags & GFP_IDENT) { - va = vmap_kern(mfn_to_virt(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT, L1_PROT); - if (flags & GFP_USER) - vmap_user(mfn_to_virt(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT, L1_PROT); - } - - if (flags & GFP_KERNEL) { - va = vmap_kern(mfn_to_virt_kern(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT, - L1_PROT); - if (flags & GFP_USER) - vmap_user(mfn_to_virt_kern(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT, - L1_PROT); - } + size = ORDER_TO_SIZE(order); + pt_flags = order_to_flags(order); + vmap_flags = gfp_to_vmap_flags(gfp_flags); - if (flags & GFP_KERNEL_MAP) { - va = vmap_kern(mfn_to_virt_map(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT, - L1_PROT); - if (flags & GFP_USER) - vmap_user(mfn_to_virt_map(mfn), mfn, order, L4_PROT, L3_PROT, L2_PROT, - L1_PROT); - } + spin_lock(&mmap_lock); + if (vmap_range(mfn_to_paddr(mfn), size, pt_flags, vmap_flags) == 0) + va = gfp_mfn_to_virt(gfp_flags, mfn); spin_unlock(&mmap_lock); return va;