From 7ff75e21fad3e851090a9f9fcb14d0411949f2f8 Mon Sep 17 00:00:00 2001 From: Shell Date: Mon, 2 Dec 2024 18:19:47 +0800 Subject: [PATCH] feat: arm64: mmu: auto-sensing of best paging stride Improves the memory mapping process by dynamically selecting the optimal paging stride (4K or 2M) based on virtual address alignment and mapping size. This eliminates the need for upfront stride determination, enhancing flexibility and maintainability in memory management. Changes: - Replaced fixed stride selection logic with a dynamic decision loop. - Removed `npages` calculation and replaced with `remaining_sz` to track unprocessed memory size. - Added assertions to ensure `size` is properly aligned to the smallest page size. - Adjusted loop to dynamically determine and apply the appropriate stride (4K or 2M) for each mapping iteration. - Updated virtual and physical address increments to use the dynamically selected stride. Signed-off-by: Shell --- libcpu/aarch64/common/mmu.c | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/libcpu/aarch64/common/mmu.c b/libcpu/aarch64/common/mmu.c index 0bc8a0cf7f8..96722b09de0 100644 --- a/libcpu/aarch64/common/mmu.c +++ b/libcpu/aarch64/common/mmu.c @@ -275,27 +275,27 @@ void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size, int ret = -1; void *unmap_va = v_addr; - size_t npages; + size_t remaining_sz = size; size_t stride; int (*mapper)(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr); - if (((rt_ubase_t)v_addr & ARCH_SECTION_MASK) || (size & ARCH_SECTION_MASK)) - { - /* legacy 4k mapping */ - npages = size >> ARCH_PAGE_SHIFT; - stride = ARCH_PAGE_SIZE; - mapper = _kernel_map_4K; - } - else - { - /* 2m huge page */ - npages = size >> ARCH_SECTION_SHIFT; - stride = ARCH_SECTION_SIZE; - mapper = _kernel_map_2M; - } + RT_ASSERT(!(size & ARCH_PAGE_MASK)); - while (npages--) + while (remaining_sz) { + if (((rt_ubase_t)v_addr & ARCH_SECTION_MASK) || (remaining_sz < ARCH_SECTION_SIZE)) + { + /* legacy 4k mapping */ + stride = ARCH_PAGE_SIZE; + mapper = _kernel_map_4K; + } + else + { + /* 2m huge page */ + stride = ARCH_SECTION_SIZE; + mapper = _kernel_map_2M; + } + MM_PGTBL_LOCK(aspace); ret = mapper(aspace->page_table, v_addr, p_addr, attr); MM_PGTBL_UNLOCK(aspace); @@ -314,6 +314,8 @@ void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size, } break; } + + remaining_sz -= stride; v_addr = (char *)v_addr + stride; p_addr = (char *)p_addr + stride; }