diff --git a/Cargo.lock b/Cargo.lock index c8107ff6a5..ae93662c62 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -92,6 +92,12 @@ version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +[[package]] +name = "call-once" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a57a50948117a233b27f9bf73ab74709ab90d245216c4707cc16eea067a50bb" + [[package]] name = "cc" version = "1.0.82" @@ -312,7 +318,9 @@ dependencies = [ "qemu-exit", "rand_chacha", "shell-words", + "smallvec", "smoltcp", + "take-static", "talc", "time", "tock-registers", @@ -776,6 +784,12 @@ version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" +[[package]] +name = "smallvec" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" + [[package]] name = "smoltcp" version = "0.10.0" @@ -826,6 +840,15 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "take-static" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87a2fd121b45f8363ac84f579b8246ef518b04a6e545a8c09b9e98b78d32bde" +dependencies = [ + "call-once", +] + [[package]] name = "talc" version = "2.2.2" diff --git a/Cargo.toml b/Cargo.toml index c45a90d589..e2e91947d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -84,6 +84,8 @@ pflock = "0.2" qemu-exit = "3.0" rand_chacha = { version = "0.3", default-features = false } shell-words = { version = "1.1", default-features = false } +smallvec = { version = "1", features = ["const_new"] } +take-static = "0.1" talc = { version = "2" } time = { version = "0.3", default-features = false } zerocopy = { version = "0.7", features = ["derive"] } diff --git a/src/arch/aarch64/kernel/core_local.rs b/src/arch/aarch64/kernel/core_local.rs index b41a6a24f3..71c917d9f5 100644 --- a/src/arch/aarch64/kernel/core_local.rs +++ b/src/arch/aarch64/kernel/core_local.rs @@ -26,8 +26,12 @@ impl CoreLocal { pub fn install() { let core_id = CPU_ONLINE.load(Ordering::Relaxed); - let irq_statistics = &*Box::leak(Box::new(IrqStatistics::new())); - IRQ_COUNTERS.lock().insert(core_id, irq_statistics); + let irq_statistics = if core_id == 0 { + static FIRST_IRQ_STATISTICS: IrqStatistics = IrqStatistics::new(); + &FIRST_IRQ_STATISTICS + } else { + &*Box::leak(Box::new(IrqStatistics::new())) + }; let this = Self { this: ptr::null_mut(), @@ -36,7 +40,15 @@ impl CoreLocal { irq_statistics, async_tasks: RefCell::new(Vec::new()), }; - let this = Box::leak(Box::new(this)); + let this = if core_id == 0 { + take_static::take_static! { + static FIRST_CORE_LOCAL: Option = None; + } + FIRST_CORE_LOCAL.take().unwrap().insert(this) + } else { + this.add_irq_counter(); + Box::leak(Box::new(this)) + }; this.this = &*this; unsafe { @@ -52,6 +64,12 @@ impl CoreLocal { &*raw } } + + pub fn add_irq_counter(&self) { + IRQ_COUNTERS + .lock() + .insert(self.core_id, self.irq_statistics); + } } #[inline] diff --git a/src/arch/aarch64/kernel/mod.rs b/src/arch/aarch64/kernel/mod.rs index 19ecf8629f..2de414e95b 100644 --- a/src/arch/aarch64/kernel/mod.rs +++ b/src/arch/aarch64/kernel/mod.rs @@ -195,6 +195,7 @@ pub fn boot_processor_init() { crate::mm::init(); crate::mm::print_information(); + CoreLocal::get().add_irq_counter(); env::init(); interrupts::init(); interrupts::enable(); diff --git a/src/arch/aarch64/mm/physicalmem.rs b/src/arch/aarch64/mm/physicalmem.rs index 8aead1eb71..73f6a090a8 100644 --- a/src/arch/aarch64/mm/physicalmem.rs +++ b/src/arch/aarch64/mm/physicalmem.rs @@ -27,7 +27,7 @@ fn detect_from_limits() -> Result<(), ()> { limit - mm::kernel_end_address().as_usize(), Ordering::SeqCst, ); - PHYSICAL_FREE_LIST.lock().list.push_back(entry); + PHYSICAL_FREE_LIST.lock().push(entry); Ok(()) } diff --git a/src/arch/aarch64/mm/virtualmem.rs b/src/arch/aarch64/mm/virtualmem.rs index f69448b005..77bce6be8b 100644 --- a/src/arch/aarch64/mm/virtualmem.rs +++ b/src/arch/aarch64/mm/virtualmem.rs @@ -19,7 +19,7 @@ pub fn init() { start: mm::kernel_end_address().as_usize(), end: KERNEL_VIRTUAL_MEMORY_END.as_usize(), }; - KERNEL_FREE_LIST.lock().list.push_back(entry); + KERNEL_FREE_LIST.lock().push(entry); } pub fn allocate(size: usize) -> Result { diff --git a/src/arch/x86_64/kernel/core_local.rs b/src/arch/x86_64/kernel/core_local.rs index 5aec27b29d..323b396a48 100644 --- a/src/arch/x86_64/kernel/core_local.rs +++ b/src/arch/x86_64/kernel/core_local.rs @@ -37,8 +37,12 @@ impl CoreLocal { let core_id = CPU_ONLINE.load(Ordering::Relaxed); - let irq_statistics = &*Box::leak(Box::new(IrqStatistics::new())); - IRQ_COUNTERS.lock().insert(core_id, irq_statistics); + let irq_statistics = if core_id == 0 { + static FIRST_IRQ_STATISTICS: IrqStatistics = IrqStatistics::new(); + &FIRST_IRQ_STATISTICS + } else { + &*Box::leak(Box::new(IrqStatistics::new())) + }; let this = Self { this: ptr::null_mut(), @@ -49,7 +53,15 @@ impl CoreLocal { irq_statistics, async_tasks: RefCell::new(Vec::new()), }; - let this = Box::leak(Box::new(this)); + let this = if core_id == 0 { + take_static::take_static! { + static FIRST_CORE_LOCAL: Option = None; + } + FIRST_CORE_LOCAL.take().unwrap().insert(this) + } else { + this.add_irq_counter(); + Box::leak(Box::new(this)) + }; this.this = &*this; GsBase::write(VirtAddr::from_ptr(this)); @@ -64,6 +76,12 @@ impl CoreLocal { &*raw } } + + pub fn add_irq_counter(&self) { + IRQ_COUNTERS + .lock() + .insert(self.core_id, self.irq_statistics); + } } pub(crate) fn core_id() -> CoreId { diff --git a/src/arch/x86_64/kernel/mod.rs b/src/arch/x86_64/kernel/mod.rs index c9c0e7900d..a8afad3ec3 100644 --- a/src/arch/x86_64/kernel/mod.rs +++ b/src/arch/x86_64/kernel/mod.rs @@ -213,6 +213,7 @@ pub fn boot_processor_init() { crate::mm::init(); crate::mm::print_information(); + CoreLocal::get().add_irq_counter(); env::init(); gdt::add_current_core(); interrupts::load_idt(); diff --git a/src/arch/x86_64/mm/physicalmem.rs b/src/arch/x86_64/mm/physicalmem.rs index 8aac7a0347..1b4ef97725 100644 --- a/src/arch/x86_64/mm/physicalmem.rs +++ b/src/arch/x86_64/mm/physicalmem.rs @@ -53,7 +53,7 @@ fn detect_from_multiboot_info() -> Result<(), ()> { (m.base_address() + m.length() - start_address.as_u64()) as usize, Ordering::SeqCst, ); - PHYSICAL_FREE_LIST.lock().list.push_back(entry); + PHYSICAL_FREE_LIST.lock().push(entry); } assert!( @@ -73,17 +73,17 @@ fn detect_from_limits() -> Result<(), ()> { // add gap for the APIC if limit > KVM_32BIT_GAP_START { let entry = FreeListEntry::new(mm::kernel_end_address().as_usize(), KVM_32BIT_GAP_START); - PHYSICAL_FREE_LIST.lock().list.push_back(entry); + PHYSICAL_FREE_LIST.lock().push(entry); if limit > KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE { let entry = FreeListEntry::new(KVM_32BIT_GAP_START + KVM_32BIT_GAP_SIZE, limit); - PHYSICAL_FREE_LIST.lock().list.push_back(entry); + PHYSICAL_FREE_LIST.lock().push(entry); TOTAL_MEMORY.store(limit - KVM_32BIT_GAP_SIZE, Ordering::SeqCst); } else { TOTAL_MEMORY.store(KVM_32BIT_GAP_START, Ordering::SeqCst); } } else { let entry = FreeListEntry::new(mm::kernel_end_address().as_usize(), limit); - PHYSICAL_FREE_LIST.lock().list.push_back(entry); + PHYSICAL_FREE_LIST.lock().push(entry); TOTAL_MEMORY.store(limit, Ordering::SeqCst); } diff --git a/src/arch/x86_64/mm/virtualmem.rs b/src/arch/x86_64/mm/virtualmem.rs index bd2a6dff2b..2645d7bd43 100644 --- a/src/arch/x86_64/mm/virtualmem.rs +++ b/src/arch/x86_64/mm/virtualmem.rs @@ -15,7 +15,7 @@ pub fn init() { mm::kernel_end_address().as_usize(), kernel_heap_end().as_usize(), ); - KERNEL_FREE_LIST.lock().list.push_back(entry); + KERNEL_FREE_LIST.lock().push(entry); } pub fn allocate(size: usize) -> Result { diff --git a/src/lib.rs b/src/lib.rs index 55acdf62c5..23fd6ecb4b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -299,13 +299,6 @@ fn synch_all_cores() { /// Entry Point of HermitCore for the Boot Processor #[cfg(target_os = "none")] fn boot_processor_main() -> ! { - let init_heap_start = env::get_base_address() + env::get_image_size(); - let init_heap_len = - init_heap_start.align_up_to_large_page().as_usize() - init_heap_start.as_usize(); - unsafe { - ALLOCATOR.init(init_heap_start.as_mut_ptr(), init_heap_len); - } - // Initialize the kernel and hardware. arch::message_output_init(); unsafe { @@ -326,11 +319,6 @@ fn boot_processor_main() -> ! { env::get_tls_start(), env::get_tls_memsz() ); - info!( - "Init heap: [0x{:p} - 0x{:p}]", - init_heap_start, - init_heap_start + init_heap_len - ); arch::boot_processor_init(); scheduler::add_current_core(); diff --git a/src/mm/allocator.rs b/src/mm/allocator.rs index db9ff43f3d..7b2498d79b 100644 --- a/src/mm/allocator.rs +++ b/src/mm/allocator.rs @@ -31,13 +31,6 @@ impl LockedAllocator { self.0.talc().init(arena); } } - - pub unsafe fn extend(&self, heap_bottom: *mut u8, heap_size: usize) { - let arena = Span::from_base_size(heap_bottom, heap_size); - unsafe { - self.0.talc().extend(arena); - } - } } /// To avoid false sharing, the global memory allocator align diff --git a/src/mm/freelist.rs b/src/mm/freelist.rs index 136d49182e..3102754589 100644 --- a/src/mm/freelist.rs +++ b/src/mm/freelist.rs @@ -1,8 +1,8 @@ -use alloc::collections::linked_list::LinkedList; use core::alloc::AllocError; use core::cmp::Ordering; use align_address::Align; +use smallvec::SmallVec; #[derive(Debug)] pub struct FreeListEntry { @@ -18,16 +18,20 @@ impl FreeListEntry { #[derive(Debug)] pub struct FreeList { - pub list: LinkedList, + list: SmallVec<[FreeListEntry; 16]>, } impl FreeList { pub const fn new() -> Self { Self { - list: LinkedList::new(), + list: SmallVec::new_const(), } } + pub fn push(&mut self, entry: FreeListEntry) { + self.list.push(entry); + } + pub fn allocate(&mut self, size: usize, alignment: Option) -> Result { trace!( "Allocating {} bytes from Free List {:#X}", @@ -42,8 +46,7 @@ impl FreeList { }; // Find a region in the Free List that has at least the requested size. - let mut cursor = self.list.cursor_front_mut(); - while let Some(node) = cursor.current() { + for (i, node) in self.list.iter_mut().enumerate() { let (region_start, region_size) = (node.start, node.end - node.start); match region_size.cmp(&new_size) { @@ -55,7 +58,7 @@ impl FreeList { node.start += size + (new_addr - region_start); if new_addr != region_start { let new_entry = FreeListEntry::new(region_start, new_addr); - cursor.insert_before(new_entry); + self.list.insert(i, new_entry); } return Ok(new_addr); } else { @@ -73,14 +76,12 @@ impl FreeList { } return Ok(new_addr); } else { - cursor.remove_current(); + self.list.remove(i); return Ok(region_start); } } Ordering::Less => {} } - - cursor.move_next(); } Err(AllocError) @@ -96,14 +97,13 @@ impl FreeList { ); // Find a region in the Free List that has at least the requested size. - let mut cursor = self.list.cursor_front_mut(); - while let Some(node) = cursor.current() { + for (i, node) in self.list.iter_mut().enumerate() { let (region_start, region_size) = (node.start, node.end - node.start); if address > region_start && address + size < region_start + region_size { node.start = address + size; let new_entry = FreeListEntry::new(region_start, address); - cursor.insert_before(new_entry); + self.list.insert(i, new_entry); return Ok(()); } else if address > region_start && address + size == region_start + region_size { node.start = address + size; @@ -112,8 +112,6 @@ impl FreeList { node.start = region_start + size; return Ok(()); } - - cursor.move_next(); } Err(AllocError) @@ -128,9 +126,8 @@ impl FreeList { ); let end = address + size; - let mut cursor = self.list.cursor_front_mut(); - while let Some(node) = cursor.current() { + for (i, node) in self.list.iter_mut().enumerate() { let (region_start, region_end) = (node.start, node.end); if region_start == end { @@ -138,14 +135,16 @@ impl FreeList { node.start = address; // Check if it can even reunite with the previous region. - if let Some(prev_node) = cursor.peek_prev() { - let prev_region_end = prev_node.end; - - if prev_region_end == address { - // It can reunite, so let the current region span over the reunited region and move the duplicate node - // into the pool for deletion or reuse. - prev_node.end = region_end; - cursor.remove_current(); + if i > 0 { + if let Some(prev_node) = self.list.get_mut(i - 1) { + let prev_region_end = prev_node.end; + + if prev_region_end == address { + // It can reunite, so let the current region span over the reunited region and move the duplicate node + // into the pool for deletion or reuse. + prev_node.end = region_end; + self.list.remove(i); + } } } @@ -154,14 +153,14 @@ impl FreeList { node.end = end; // Check if it can even reunite with the next region. - if let Some(next_node) = cursor.peek_next() { + if let Some(next_node) = self.list.get_mut(i + 1) { let next_region_start = next_node.start; if next_region_start == end { // It can reunite, so let the current region span over the reunited region and move the duplicate node // into the pool for deletion or reuse. next_node.start = region_start; - cursor.remove_current(); + self.list.remove(i); } } @@ -172,17 +171,15 @@ impl FreeList { // We search the list from low to high addresses and insert us before the first entry that has a // higher address than us. let new_entry = FreeListEntry::new(address, end); - cursor.insert_before(new_entry); + self.list.insert(i, new_entry); return; } - - cursor.move_next(); } // We could not find an entry with a higher address than us. // So we become the new last entry in the list. Get that entry from the node pool. let new_element = FreeListEntry::new(address, end); - self.list.push_back(new_element); + self.push(new_element); } pub fn print_information(&self, header: &str) { @@ -200,53 +197,25 @@ impl FreeList { mod tests { use super::*; - #[test] - fn add_element() { - let mut freelist = FreeList::new(); - let entry = FreeListEntry::new(0x10000, 0x100000); - - freelist.list.push_back(entry); - - let mut cursor = freelist.list.cursor_front_mut(); - - while let Some(node) = cursor.peek_next() { - assert!(node.start != 0x1000); - assert!(node.end != 0x10000); - - cursor.move_next(); - } - } - #[test] fn allocate() { let mut freelist = FreeList::new(); let entry = FreeListEntry::new(0x10000, 0x100000); - freelist.list.push_back(entry); + freelist.push(entry); let addr = freelist.allocate(0x1000, None); assert_eq!(addr.unwrap(), 0x10000); - let mut cursor = freelist.list.cursor_front_mut(); - while let Some(node) = cursor.current() { + for node in &freelist.list { assert_eq!(node.start, 0x11000); assert_eq!(node.end, 0x100000); - - cursor.move_next(); } let addr = freelist.allocate(0x1000, Some(0x2000)); - let mut cursor = freelist.list.cursor_front_mut(); - assert!(cursor.current().is_some()); - if let Some(node) = cursor.current() { - assert_eq!(node.start, 0x11000); - } - - cursor.move_next(); - assert!(cursor.current().is_some()); - if let Some(node) = cursor.current() { - assert_eq!(node.start, 0x13000); - } + let mut iter = freelist.list.iter(); + assert_eq!(iter.next().unwrap().start, 0x11000); + assert_eq!(iter.next().unwrap().start, 0x13000); } #[test] @@ -254,16 +223,13 @@ mod tests { let mut freelist = FreeList::new(); let entry = FreeListEntry::new(0x10000, 0x100000); - freelist.list.push_back(entry); + freelist.push(entry); let addr = freelist.allocate(0x1000, None); freelist.deallocate(addr.unwrap(), 0x1000); - let mut cursor = freelist.list.cursor_front_mut(); - while let Some(node) = cursor.current() { + for node in &freelist.list { assert_eq!(node.start, 0x10000); assert_eq!(node.end, 0x100000); - - cursor.move_next(); } } } diff --git a/src/mm/mod.rs b/src/mm/mod.rs index 3ecb05c41e..d68750736a 100644 --- a/src/mm/mod.rs +++ b/src/mm/mod.rs @@ -115,7 +115,7 @@ pub(crate) fn init() { unsafe { let start = allocate(kernel_heap_size, true); - crate::ALLOCATOR.extend(start.as_mut_ptr(), kernel_heap_size); + crate::ALLOCATOR.init(start.as_mut_ptr(), kernel_heap_size); info!("Kernel heap starts at {:#x}", start); } @@ -223,21 +223,16 @@ pub(crate) fn init() { } let heap_end_addr = map_addr; - let init_heap_start_addr = env::get_base_address() + env::get_image_size(); #[cfg(not(feature = "newlib"))] unsafe { - crate::ALLOCATOR.extend( - init_heap_start_addr.as_mut_ptr(), - (heap_end_addr - init_heap_start_addr).into(), + crate::ALLOCATOR.init( + heap_start_addr.as_mut_ptr(), + (heap_end_addr - heap_start_addr).into(), ); } - info!( - "Heap extension from {:#x} to {:#x}", - heap_start_addr, heap_end_addr - ); - let heap_addr_range = init_heap_start_addr..heap_end_addr; + let heap_addr_range = heap_start_addr..heap_end_addr; info!("Heap is located at {heap_addr_range:#x?} ({map_size} Bytes unmapped)"); #[cfg(feature = "newlib")] HEAP_ADDR_RANGE.set(heap_addr_range).unwrap();