From 2e3ee8d201f0a7b2d4ef9d6823430b6bc131ea24 Mon Sep 17 00:00:00 2001 From: Joe Richey Date: Fri, 28 Feb 2020 21:15:52 -0800 Subject: [PATCH] pvh: Support booting via PVH ELFNOTE This PR adds support for booting via the Xen HVM direct boot ABI. See: https://xenbits.xen.org/docs/4.12-testing/misc/pvh.html This uses a 32-bit unpaged entry point, so we just point it at ram32_start. This allows our firmware to be used with QEMU's -kernel option. Signed-off-by: Joe Richey --- layout.ld | 8 ++++++ src/asm/mod.rs | 2 ++ src/asm/note.s | 20 ++++++++++++++ src/asm/ram32.s | 71 +++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 101 insertions(+) create mode 100644 src/asm/note.s create mode 100644 src/asm/ram32.s diff --git a/layout.ld b/layout.ld index 5403d7d1..9b1ab3ee 100644 --- a/layout.ld +++ b/layout.ld @@ -3,6 +3,7 @@ ENTRY(ram64_start) PHDRS { program PT_LOAD FILEHDR PHDRS ; + note PT_NOTE ; } /* Loaders like to put stuff in low memory (< 1M), so we don't use it. */ @@ -10,6 +11,10 @@ ram_min = 1M; ram_max = 2M; /* Our stack grows down from ram_max. TODO: Add a guard for stack overflows. */ stack_size = 64K; +/* ram32.s needs 3 pages for initial page tables, use the space after ram_max */ +pml2 = ram_max; +pml3 = pml2 + 0x1000; +pml4 = pml3 + 0x1000; SECTIONS { @@ -26,6 +31,9 @@ SECTIONS ASSERT((. <= ram_max - stack_size), "firmware size too big for RAM region") + /* These sections are not mapped into RAM */ + .note : { *(.note) } :note + /* Match edk2's GccBase.lds DISCARD section */ /DISCARD/ : { *(.note.GNU-stack) diff --git a/src/asm/mod.rs b/src/asm/mod.rs index e1b44166..e4711c2f 100644 --- a/src/asm/mod.rs +++ b/src/asm/mod.rs @@ -1 +1,3 @@ +global_asm!(include_str!("note.s")); +global_asm!(include_str!("ram32.s")); global_asm!(include_str!("ram64.s")); diff --git a/src/asm/note.s b/src/asm/note.s new file mode 100644 index 00000000..7782a5c5 --- /dev/null +++ b/src/asm/note.s @@ -0,0 +1,20 @@ +.section .note, "a" + +# From xen/include/public/elfnote.h, "Physical entry point into the kernel." +XEN_ELFNOTE_PHYS32_ENTRY = 18 + +# We don't bother defining an ELFNOTE macro, as we only have one note. +# This is equialent to the kernel's: +# ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY, .long ram32_start) +.align 4 + .long name_end - name_start # namesz + .long desc_end - desc_start # descsz + .long XEN_ELFNOTE_PHYS32_ENTRY # type +name_start: + .asciz "Xen" +name_end: +.align 4 +desc_start: + .long ram32_start +desc_end: +.align 4 diff --git a/src/asm/ram32.s b/src/asm/ram32.s new file mode 100644 index 00000000..e6177a42 --- /dev/null +++ b/src/asm/ram32.s @@ -0,0 +1,71 @@ +.section .text, "ax" +.code32 + +ram32_start: + # Indicate (via serial) that we are executing out of RAM + movw $0x3f8, %dx + movb $'R', %al + outb %al, %dx + +setup_page_tables: + # First PML2 entry identity maps [0, 2 MiB) + movl $0b10000011, (pml2) # huge (bit 7), writable (bit 1), present (bit 0) + # First PML3 entry points to PML2 table + movl $pml2, %eax + orb $0b00000011, %al # writable (bit 1), present (bit 0) + movl %eax, (pml3) + # First PML4 entry points to PML3 table + movl $pml3, %eax + orb $0b00000011, %al # writable (bit 1), present (bit 0) + movl %eax, (pml4) + +enable_paging: + # Load page table root into CR3 + movl $pml4, %eax + movl %eax, %cr3 + + # Set CR4.PAE (Physical Address Extension) + movl %cr4, %eax + orb $0b00100000, %al # Set bit 5 + movl %eax, %cr4 + # Set EFER.LME (Long Mode Enable) + movl $0xC0000080, %ecx + rdmsr + orb $0b00000001, %ah # Set bit 8 + wrmsr + # Set CRO.PG (Paging) + movl %cr0, %eax + orl $(1 << 31), %eax + movl %eax, %cr0 + + # Indicate (via serial) that we have enabled paging + movw $0x3f8, %dx + movb $'P', %al + outb %al, %dx + +jump_to_64bit: + # We are now in 32-bit compatibility mode. To enter 64-bit mode, we need to + # load a 64-bit code segment into our GDT. + lgdtl gdt64_ptr + # Set CS to a 64-bit segment and jump to 64-bit code. + ljmpl $(code64_desc - gdt64_start), $ram64_start + +gdt64_ptr: + .short gdt64_end - gdt64_start - 1 # GDT length is actually (length - 1) + .long gdt64_start +gdt64_start: + # First descriptor is null + .quad 0 +code64_desc: + # For 64-bit code descriptors, all bits except the following are ignored: + # - CS.A=1 (bit 40) segment is accessed, prevents a write on first use. + # - CS.R=1 (bit 41) segment is readable. (this might not be necessary) + # - CS.C=1 (bit 42) segment is conforming. (this might not be necessary) + # - CS.E=1 (bit 43) required, we are a executable code segment. + # - CS.S=1 (bit 44) required, we are not a system segment. + # - CS.DPL=0 (bits 45/46) we are using this segment in Ring 0. + # - CS.P=1 (bit 47) required, the segment is present. + # - CS.L=1 (bit 53) required, we are a 64-bit (long mode) segment. + # - CS.D=0 (bit 54) required, CS.L=1 && CS.D=1 is resevered for future use. + .quad (1<<40) | (1<<41) | (1<<42) | (1<<43) | (1<<44) | (1<<47) | (1<<53) +gdt64_end: