From 7d9e7282c9a77e0b6b8910c5326e101da0c60e91 Mon Sep 17 00:00:00 2001 From: Taiki Yamaguchi Date: Thu, 16 Feb 2023 22:48:08 +0800 Subject: [PATCH] Address alignment --- doc/protected_mode_development_2.md | 15 +++++++++++++++ src/boot/boot.asm | 4 ++-- src/kernel.asm | 6 +++++- src/linker.ld | 13 +++++++++---- 4 files changed, 31 insertions(+), 7 deletions(-) diff --git a/doc/protected_mode_development_2.md b/doc/protected_mode_development_2.md index 0c3a28d..fd3489f 100644 --- a/doc/protected_mode_development_2.md +++ b/doc/protected_mode_development_2.md @@ -97,3 +97,18 @@ Breakpoint 1, _start () at ./src/kernel.asm:9 ``` https://asciinema.org/a/n3qfuN4AV8u3GWKHAqFxtGR15 + +## 3. Memory alignment + +Quick summary of how our code works with memory. + +- Bootloader is loaded at address 0x7c00 and must be 512 bytes (boot.asm) +- Before entering Protected Mode, we define GDT that defines the memory segments - `CODE_SEG` and `DATA_SEG`. +- In `boot.asm`, we load our kernel code from the disk and place it at address `CODE_SEG:0x100000`. +- Kernel code is written in `kernel.asm` which is assembled into an ELF file (Makefile `kernel.asm.o`). All other kernel code (not written yet at this point, but in the future) will be linked to output `kernelfull.o` and compiled into `kernel.bin`. +- The C compiler aligns stack frames, data, etc., by a multiple of 4 bytes, because memory access of 32-bit processors are a lot faster when done aligned. If the content of an object file is misaligned, it may cause unexpected errors. But our `kernel.asm` is not a C program, thus not aligned by default. +- To properly align the kernelfull.o, we do: + - Add the padding instruction at the end of `kernel.asm` so that it becomes 1-sector (512 bytes) long. + - Note that aligning `boot.asm` to 512 bytes is unrelated to the memory alignment issue we talk about here. The bootloader must have the boot signature 0x55AA at 511 and 512 byte. + - Make sure that `kernel.asm.o` is the first file to be linked. That ensures `kernel.asm.o` is located in the `.text` section ([`linker.ld`](../src/linker.ld)) when linked, and always starts at 0x100000. `kernel.asm.o` is 512 bytes long, so any other C object files linked after that are automatically aligned. + - In other kernel assembly files, specify `.asm` section so that they are linked at the end of the object file. If the assembled code is not a multiple of 4 bytes, that's okay because those files are at the end. diff --git a/src/boot/boot.asm b/src/boot/boot.asm index 2135e63..b83748b 100644 --- a/src/boot/boot.asm +++ b/src/boot/boot.asm @@ -62,9 +62,9 @@ gdt_descriptor: load32: mov eax, 1 ; Beginning of the sector to read from (Kernel starts at the first sector. 0 = bootloader) mov ecx, 100 ; End of the sector (we added 100 sectors in Makefile) - mov edi, 0x0100000 ; Address where the Kernel will be loaded into (linker.ld specifies 1M) + mov edi, 0x0100000 ; Address where the Kernel will be loaded into (linker.ld also specifies 1M) call ata_lba_read - jmp CODE_SEG:0x0100000 + jmp CODE_SEG:0x0100000 ; Jump to our kernel code ata_lba_read: mov ebx, eax ; Backup the LBA (Linear Block Address), which is set to 1 diff --git a/src/kernel.asm b/src/kernel.asm index 8720c75..6a24986 100644 --- a/src/kernel.asm +++ b/src/kernel.asm @@ -22,4 +22,8 @@ _start: cld ; Clears direction flag cli ; Disables interrupts - hlt ; This hangs the computer \ No newline at end of file + hlt ; This hangs the computer + +times 512 - ($ - $$) db 0 ; Pad the kernel code sector to 512 bytes + ; This ensures that any object files written in C and linked with this assembly + ; will be correctly aligned. diff --git a/src/linker.ld b/src/linker.ld index c0778ce..f3d5fce 100644 --- a/src/linker.ld +++ b/src/linker.ld @@ -3,24 +3,29 @@ OUTPUT_FORMAT(binary) SECTIONS { . = 1M; - .text : + .text : ALIGN(4096) { *(.text) } - .rodata : + .rodata : ALIGN(4096) { *(.rodata) } - .data : + .data : ALIGN(4096) { *(.data) } - .bss : + .bss : ALIGN(4096) { *(COMMON) *(.bss) } + + .asm : ALIGN(4096) + { + *(.asm) + } } \ No newline at end of file