Skip to content

Commit

Permalink
riscv: Map the kernel with correct permissions the first time
Browse files Browse the repository at this point in the history
For 64-bit kernels, we map all the kernel with write and execute
permissions and afterwards remove writability from text and executability
from data.

For 32-bit kernels, the kernel mapping resides in the linear mapping, so we
map all the linear mapping as writable and executable and afterwards we
remove those properties for unused memory and kernel mapping as
described above.

Change this behavior to directly map the kernel with correct permissions
and avoid going through the whole mapping to fix the permissions.

At the same time, this fixes an issue introduced by commit 2bfc6cd
("riscv: Move kernel mapping outside of linear mapping") as reported
here starfive-tech/linux#17.

Signed-off-by: Alexandre Ghiti <[email protected]>
Reviewed-by: Anup Patel <[email protected]>
  • Loading branch information
AlexGhiti committed Jun 24, 2021
1 parent 3f20285 commit 7fca812
Show file tree
Hide file tree
Showing 5 changed files with 81 additions and 81 deletions.
13 changes: 12 additions & 1 deletion arch/riscv/include/asm/page.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ extern unsigned long va_kernel_pa_offset;
extern unsigned long va_kernel_xip_pa_offset;
#endif
extern unsigned long pfn_base;
extern uintptr_t load_sz;
#define ARCH_PFN_OFFSET (pfn_base)
#else
#define va_pa_offset 0
Expand All @@ -99,6 +100,11 @@ extern unsigned long pfn_base;
extern unsigned long kernel_virt_addr;

#ifdef CONFIG_64BIT
#define is_kernel_mapping(x) \
((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
#define is_linear_mapping(x) \
((x) >= PAGE_OFFSET && (x) < kernel_virt_addr)

#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_pa_to_va(y) ({ \
Expand All @@ -125,10 +131,15 @@ extern unsigned long kernel_virt_addr;
#endif
#define __va_to_pa_nodebug(x) ({ \
unsigned long _x = x; \
(_x < kernel_virt_addr) ? \
is_linear_mapping(_x) ? \
linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
})
#else
#define is_kernel_mapping(x) \
((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
#define is_linear_mapping(x) \
((x) >= PAGE_OFFSET)

#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
#endif
Expand Down
17 changes: 17 additions & 0 deletions arch/riscv/include/asm/sections.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,28 @@
#define __ASM_SECTIONS_H

#include <asm-generic/sections.h>
#include <linux/mm.h>

extern char _start[];
extern char _start_kernel[];
extern char __init_data_begin[], __init_data_end[];
extern char __init_text_begin[], __init_text_end[];
extern char __alt_start[], __alt_end[];

static inline bool is_va_kernel_text(uintptr_t va)
{
uintptr_t start = (uintptr_t)_start;
uintptr_t end = (uintptr_t)__init_data_begin;

return va >= start && va < end;
}

static inline bool is_va_kernel_lm_alias_text(uintptr_t va)
{
uintptr_t start = (uintptr_t)lm_alias(_start);
uintptr_t end = (uintptr_t)lm_alias(__init_data_begin);

return va >= start && va < end;
}

#endif /* __ASM_SECTIONS_H */
8 changes: 0 additions & 8 deletions arch/riscv/include/asm/set_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_rw_nx(unsigned long addr, int numpages);
void protect_kernel_text_data(void);
static __always_inline int set_kernel_memory(char *startp, char *endp,
int (*set_memory)(unsigned long start,
int num_pages))
Expand All @@ -32,7 +31,6 @@ static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
static inline void protect_kernel_text_data(void) {}
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
static inline int set_kernel_memory(char *startp, char *endp,
int (*set_memory)(unsigned long start,
Expand All @@ -42,12 +40,6 @@ static inline int set_kernel_memory(char *startp, char *endp,
}
#endif

#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
void __init protect_kernel_linear_mapping_text_rodata(void);
#else
static inline void protect_kernel_linear_mapping_text_rodata(void) {}
#endif

int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
bool kernel_page_present(struct page *page);
Expand Down
12 changes: 3 additions & 9 deletions arch/riscv/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -289,11 +289,6 @@ void __init setup_arch(char **cmdline_p)
init_resources();
sbi_init();

if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
protect_kernel_text_data();
protect_kernel_linear_mapping_text_rodata();
}

#ifdef CONFIG_KASAN
kasan_init();
#endif
Expand Down Expand Up @@ -328,11 +323,10 @@ subsys_initcall(topology_init);

void free_initmem(void)
{
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;

if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end),
IS_ENABLED(CONFIG_64BIT) ?
set_memory_rw : set_memory_rw_nx);

free_initmem_default(POISON_FREE_INITMEM);
}
112 changes: 49 additions & 63 deletions arch/riscv/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -455,6 +455,43 @@ asmlinkage void __init __copy_data(void)
}
#endif

#ifdef CONFIG_STRICT_KERNEL_RWX
static __init pgprot_t pgprot_from_va(uintptr_t va)
{
if (is_va_kernel_text(va))
return PAGE_KERNEL_READ_EXEC;

/*
* In 64-bit kernel, the kernel mapping is outside the linear mapping so
* we must protect its linear mapping alias from being executed and
* written.
* And rodata section is marked readonly in mark_rodata_ro.
*/
if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
return PAGE_KERNEL_READ;

return PAGE_KERNEL;
}

void mark_rodata_ro(void)
{
set_kernel_memory(__start_rodata, _data, set_memory_ro);
if (IS_ENABLED(CONFIG_64BIT))
set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
set_memory_ro);

debug_checkwx();
}
#else
static __init pgprot_t pgprot_from_va(uintptr_t va)
{
if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
return PAGE_KERNEL;

return PAGE_KERNEL_EXEC;
}
#endif /* CONFIG_STRICT_KERNEL_RWX */

/*
* setup_vm() is called from head.S with MMU-off.
*
Expand All @@ -474,7 +511,7 @@ asmlinkage void __init __copy_data(void)
#endif

static uintptr_t load_pa __initdata;
static uintptr_t load_sz __initdata;
uintptr_t load_sz;
#ifdef CONFIG_XIP_KERNEL
#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa)))
#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz)))
Expand All @@ -486,7 +523,8 @@ static uintptr_t xiprom_sz __initdata;
#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))

static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
__always_unused bool early)
{
uintptr_t va, end_va;

Expand All @@ -505,15 +543,18 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
map_size, PAGE_KERNEL);
}
#else
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
bool early)
{
uintptr_t va, end_va;

end_va = kernel_virt_addr + load_sz;
for (va = kernel_virt_addr; va < end_va; va += map_size)
create_pgd_mapping(pgdir, va,
load_pa + (va - kernel_virt_addr),
map_size, PAGE_KERNEL_EXEC);
map_size,
early ?
PAGE_KERNEL_EXEC : pgprot_from_va(va));
}
#endif

Expand Down Expand Up @@ -590,7 +631,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
create_kernel_page_table(early_pg_dir, map_size);
create_kernel_page_table(early_pg_dir, map_size, true);

#ifndef __PAGETABLE_PMD_FOLDED
/* Setup early PMD for DTB */
Expand Down Expand Up @@ -666,22 +707,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif
}

#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
void __init protect_kernel_linear_mapping_text_rodata(void)
{
unsigned long text_start = (unsigned long)lm_alias(_start);
unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
unsigned long data_start = (unsigned long)lm_alias(_data);

set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);

set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
}
#endif

static void __init setup_vm_final(void)
{
uintptr_t va, map_size;
Expand Down Expand Up @@ -714,21 +739,15 @@ static void __init setup_vm_final(void)
map_size = best_map_size(start, end - start);
for (pa = start; pa < end; pa += map_size) {
va = (uintptr_t)__va(pa);
create_pgd_mapping(swapper_pg_dir, va, pa,
map_size,
#ifdef CONFIG_64BIT
PAGE_KERNEL
#else
PAGE_KERNEL_EXEC
#endif
);

create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
pgprot_from_va(va));
}
}

#ifdef CONFIG_64BIT
/* Map the kernel */
create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
#endif

/* Clear fixmap PTE and PMD mappings */
Expand Down Expand Up @@ -759,39 +778,6 @@ static inline void setup_vm_final(void)
}
#endif /* CONFIG_MMU */

#ifdef CONFIG_STRICT_KERNEL_RWX
void __init protect_kernel_text_data(void)
{
unsigned long text_start = (unsigned long)_start;
unsigned long init_text_start = (unsigned long)__init_text_begin;
unsigned long init_data_start = (unsigned long)__init_data_begin;
unsigned long rodata_start = (unsigned long)__start_rodata;
unsigned long data_start = (unsigned long)_data;
#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
unsigned long end_va = kernel_virt_addr + load_sz;
#else
unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
#endif

set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
/* rodata section is marked readonly in mark_rodata_ro */
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
}

void mark_rodata_ro(void)
{
unsigned long rodata_start = (unsigned long)__start_rodata;
unsigned long data_start = (unsigned long)_data;

set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);

debug_checkwx();
}
#endif

#ifdef CONFIG_KEXEC_CORE
/*
* reserve_crashkernel() - reserves memory for crash kernel
Expand Down

0 comments on commit 7fca812

Please sign in to comment.