Skip to content

Commit

Permalink
arch,pt: enable (void *) 0 address mapping
Browse files Browse the repository at this point in the history
Signed-off-by: Pawel Wieczorkiewicz <[email protected]>
  • Loading branch information
wipawel committed Nov 14, 2023
1 parent ccabf28 commit 269f265
Show file tree
Hide file tree
Showing 9 changed files with 51 additions and 18 deletions.
24 changes: 20 additions & 4 deletions arch/x86/pagetables.c
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,11 @@ void dump_user_pagetable_va(void *va) {
dump_pagetable_va(&user_cr3, va);
}

static inline void clean_pagetable(void *tab) {
for (pgentry_t *e = tab; e < (pgentry_t *) (tab + PAGE_SIZE); e++)
set_pgentry(e, MFN_INVALID, PT_NO_FLAGS);
}

static mfn_t get_cr3_mfn(cr3_t *cr3_entry) {
void *cr3_mapped = NULL;

Expand All @@ -205,7 +210,7 @@ static mfn_t get_cr3_mfn(cr3_t *cr3_entry) {

cr3_entry->mfn = frame->mfn;
cr3_mapped = tmp_map_mfn(cr3_entry->mfn);
memset(cr3_mapped, 0, PAGE_SIZE);
clean_pagetable(cr3_mapped);
}

return cr3_entry->mfn;
Expand Down Expand Up @@ -248,7 +253,7 @@ static mfn_t get_pgentry_mfn(mfn_t tab_mfn, pt_index_t index, unsigned long flag
mfn = frame->mfn;
set_pgentry(entry, mfn, flags);
tab = tmp_map_mfn(mfn);
memset(tab, 0, PAGE_SIZE);
clean_pagetable(tab);
}
else {
/* Page table already exists but its flags may conflict with our. Maybe fixup */
Expand All @@ -258,6 +263,10 @@ static mfn_t get_pgentry_mfn(mfn_t tab_mfn, pt_index_t index, unsigned long flag
return mfn;
}

/* This function returns NULL when failed to map a non-NULL virtual address,
* MAP_FAILED when failed to map a NULL (0x0) virtual address and otherwise
* it returns the same virtual address passed as argument.
*/
static void *_vmap(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned int order,
#if defined(__x86_64__)
unsigned long l4_flags,
Expand All @@ -267,8 +276,8 @@ static void *_vmap(cr3_t *cr3_ptr, void *va, mfn_t mfn, unsigned int order,
mfn_t l1t_mfn, l2t_mfn, l3t_mfn;
pgentry_t *tab, *entry;

if (!va || (_ul(va) & ~PAGE_ORDER_TO_MASK(order)) || !is_canon_va(va))
return NULL;
if ((_ul(va) & ~PAGE_ORDER_TO_MASK(order)) || !is_canon_va(va))
return va ? NULL : MAP_FAILED;

dprintk("%s: va: 0x%p mfn: 0x%lx (order: %u)\n", __func__, va, mfn, order);

Expand Down Expand Up @@ -352,7 +361,14 @@ static void map_tmp_mapping_entry(void) {
_tmp_mapping_entry = paddr_to_virt_kern(_paddr(entry));
}

static inline void init_cr3(cr3_t *cr3_ptr) {
memset(cr3_ptr, 0, sizeof(*cr3_ptr));
cr3_ptr->mfn = MFN_INVALID;
}

void init_pagetables(void) {
init_cr3(&cr3);
init_cr3(&user_cr3);
init_tmp_mapping();

for_each_memory_range (r) {
Expand Down
4 changes: 4 additions & 0 deletions arch/x86/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ static void init_tss(percpu_t *percpu) {
#if defined(__i386__)
percpu->tss_df.iopb = sizeof(percpu->tss_df);
percpu->tss_df.esp0 = _ul(get_free_page_top(GFP_KERNEL));
BUG_ON(!percpu->tss_df.esp0);
percpu->tss_df.ss = __KERN_DS;
percpu->tss_df.ds = __KERN_DS;
percpu->tss_df.es = __KERN_DS;
Expand All @@ -68,11 +69,14 @@ static void init_tss(percpu_t *percpu) {

/* FIXME */
percpu->tss.esp0 = _ul(get_free_page_top(GFP_KERNEL));
BUG_ON(!percpu->tss.esp0);
percpu->tss.ss0 = __KERN_DS;
percpu->tss.cr3 = _ul(cr3.reg);
#elif defined(__x86_64__)
percpu->tss.rsp0 = _ul(get_free_page_top(GFP_KERNEL | GFP_USER));
BUG_ON(!percpu->tss.rsp0);
percpu->tss.ist[0] = _ul(get_free_page_top(GFP_KERNEL | GFP_USER));
BUG_ON(!percpu->tss.ist[0]);
#endif
percpu->tss.iopb = sizeof(percpu->tss);

Expand Down
2 changes: 1 addition & 1 deletion common/percpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,4 +65,4 @@ void for_each_percpu(void (*func)(percpu_t *percpu)) {

list_for_each_entry (percpu, &percpu_frames, list)
func(percpu);
}
}
5 changes: 4 additions & 1 deletion common/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,11 @@ static int prepare_task(task_t *task, const char *name, task_func_t func, void *
task->func = func;
task->arg = arg;
task->type = type;
if (task->type == TASK_TYPE_USER)
if (task->type == TASK_TYPE_USER) {
task->stack = get_free_page_top(GFP_USER);
if (!task->stack)
return -ENOMEM;
}
set_task_state(task, TASK_STATE_READY);
return ESUCCESS;
}
Expand Down
6 changes: 4 additions & 2 deletions include/arch/x86/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,10 @@ typedef unsigned long mfn_t;

#define _paddr(addr) ((paddr_t) _ul(addr))

#define PADDR_INVALID (0UL)
#define MFN_INVALID (0UL)
#define PADDR_INVALID (~0x0UL)
#define MFN_INVALID (paddr_to_mfn(PADDR_INVALID))

#define MAP_FAILED ((void *) 1)

#define IS_ADDR_SPACE_VA(va, as) (_ul(va) >= (as))

Expand Down
15 changes: 10 additions & 5 deletions mm/pmm.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,20 +77,25 @@ static frames_array_t *new_frames_array(void) {

if (!boot_flags.virt) {
frame_t *frame = get_free_frame();
if (!frame)
goto error;
array = (frames_array_t *) mfn_to_virt_kern(frame->mfn);
}
else
else {
array = get_free_page(GFP_KERNEL);

if (!array)
panic("PMM: Unable to allocate new page for frame array");
if (!array)
goto error;
}

dprintk("%s: allocated new frames array: %p\n", __func__, array);

init_frames_array(array);

total_free_frames += array->meta.free_count;
return array;
error:
panic("PMM: Unable to allocate new page for frame array");
UNREACHABLE();
}

static void del_frames_array(frames_array_t *array) {
Expand Down Expand Up @@ -539,7 +544,7 @@ frame_t *get_free_frames(unsigned int order) {
void put_free_frames(mfn_t mfn, unsigned int order) {
frame_t *frame;

BUG_ON(mfn_invalid(mfn) || order > MAX_PAGE_ORDER);
ASSERT(order <= MAX_PAGE_ORDER);

spin_lock(&lock);
frame = find_mfn_frame(busy_frames, mfn, order);
Expand Down
4 changes: 2 additions & 2 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ meta_slab_t *slab_meta_alloc() {
* Allocate a 4K page
*/
free_page = get_free_page(GFP_KERNEL_MAP);
if (free_page == NULL) {
if (!free_page) {
dprintk("slab_meta_alloc failed, not enough free pages\n");
return NULL;
}
Expand Down Expand Up @@ -234,7 +234,7 @@ static void *ktf_alloc(size_t size) {
*/

free_page = get_free_page(GFP_KERNEL_MAP);
if (free_page == NULL) {
if (!free_page) {
dprintk("ktf_alloc failed, not enough free pages\n");
slab_free(META_SLAB_PAGE_ENTRY(meta_slab), meta_slab);
alloc = NULL;
Expand Down
8 changes: 5 additions & 3 deletions smp/mptables.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,11 @@ static mpf_t *get_mpf_addr(void) {
return NULL;

sysm_addr = kmap_4k(paddr_to_mfn(_paddr(range.end) - KB(1)), L1_PROT_RO);
ptr = find_mpf(sysm_addr, sysm_addr + KB(1));
if (ptr)
return ptr;
if (sysm_addr) {
ptr = find_mpf(sysm_addr, sysm_addr + KB(1));
if (ptr)
return ptr;
}

return find_mpf(paddr_to_virt_kern(BIOS_ROM_ADDR_START),
paddr_to_virt_kern(BIOS_ROM_ADDR_START + KB(64)));
Expand Down
1 change: 1 addition & 0 deletions smp/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ static __text_init void boot_cpu(cpu_t *cpu) {
return;

ap_new_sp = get_free_pages_top(PAGE_ORDER_2M, GFP_KERNEL_MAP);
BUG_ON(!ap_new_sp);
ap_cpuid = cpu->id;
ap_callin = false;
smp_wmb();
Expand Down

0 comments on commit 269f265

Please sign in to comment.