From 9710132b3e5108a6cc4dc738151865e80253df6f Mon Sep 17 00:00:00 2001 From: Paul Pan Date: Sun, 7 Apr 2024 01:02:23 +0800 Subject: [PATCH] feat: allocator: block: implement reserver function --- kernel/src/vspace/allocator/block.rs | 60 ++++++++++++++++++++++++- kernel/src/vspace/allocator/freelist.rs | 5 +-- kernel/src/vspace/allocator/mod.rs | 1 + 3 files changed, 62 insertions(+), 4 deletions(-) diff --git a/kernel/src/vspace/allocator/block.rs b/kernel/src/vspace/allocator/block.rs index f122a34..a99393a 100644 --- a/kernel/src/vspace/allocator/block.rs +++ b/kernel/src/vspace/allocator/block.rs @@ -47,6 +47,10 @@ impl RamBlock { } fn insert(&mut self, start: PhysAddr, size: usize) -> Result<&mut Option, ()> { + if size == 0 { + return Err(()); + } + for block in self.blocks.iter_mut() { if block.is_none() { *block = Some(Block { start, size }); @@ -102,12 +106,42 @@ impl RamBlock { None } + + pub fn reserve(&mut self, start: PhysAddr, size: usize) { + // NOTE: only support inclusive range + let victim = self + .blocks + .iter_mut() + .find(|block| block.is_some_and(|b| b.start_addr() <= start && start < b.end_addr())); + + if victim.is_none() { + return; + } + let victim = victim.unwrap(); + + if let Some(block) = victim.take() { + let region_start = block.start_addr(); + let region_end = block.end_addr(); + + let before_size = (start - region_start).as_usize(); + let after_size = (region_end - (start + size)).as_usize(); + + // insert larger block first, then before block, then after block + if after_size > before_size { + // we could safely assert that before_size > 0 here + let _ = self.insert(start + size, after_size); + let _ = self.insert(region_start, before_size); + } else { + let _ = self.insert(region_start, before_size); + let _ = self.insert(start + size, after_size); + } + } + } } #[cfg(test)] mod tests { use super::*; - use log::debug; #[test_case] fn test_block() { @@ -150,4 +184,28 @@ mod tests { let ptr = blk.alloc(Layout::from_size_align(1, 1).unwrap()); assert_eq!(ptr, None); } + + #[test_case] + fn test_block_reserve() { + let mut blk = RamBlock::<4>::new(); + blk.dealloc(PhysAddr(0), 100); + + blk.reserve(PhysAddr(0), 10); + let ptr = blk.alloc(Layout::from_size_align(10, 1).unwrap()); + assert_eq!(ptr, Some(PhysAddr(10))); + + blk.reserve(PhysAddr(90), 10); + let ptr = blk.alloc(Layout::from_size_align(70, 1).unwrap()); + assert_eq!(ptr, Some(PhysAddr(0x14))); + + blk.dealloc(PhysAddr(0), 30); + blk.reserve(PhysAddr(10), 10); + let ptr = blk.alloc(Layout::from_size_align(10, 1).unwrap()); + assert_eq!(ptr, Some(PhysAddr(0))); + let ptr = blk.alloc(Layout::from_size_align(10, 1).unwrap()); + assert_eq!(ptr, Some(PhysAddr(20))); + + let ptr = blk.alloc(Layout::from_size_align(1, 1).unwrap()); + assert_eq!(ptr, None); + } } diff --git a/kernel/src/vspace/allocator/freelist.rs b/kernel/src/vspace/allocator/freelist.rs index 76d52c7..e1fe5e4 100644 --- a/kernel/src/vspace/allocator/freelist.rs +++ b/kernel/src/vspace/allocator/freelist.rs @@ -116,10 +116,9 @@ impl FreeList { } pub fn reserve(&mut self, start: PhysAddr, size: usize) { + // NOTE: only support inclusive range if let Some((region, _)) = self.alloc_node(|region| { - let left = max(region.start_addr(), start); - let right = min(region.end_addr(), start + size); - (left < right).some(|| (), ()) + (region.start_addr() <= start && start < region.end_addr()).some(|| (), ()) }) { let region_start = region.start_addr(); let region_end = region.end_addr(); diff --git a/kernel/src/vspace/allocator/mod.rs b/kernel/src/vspace/allocator/mod.rs index 64ca50a..249b991 100644 --- a/kernel/src/vspace/allocator/mod.rs +++ b/kernel/src/vspace/allocator/mod.rs @@ -3,4 +3,5 @@ mod block; mod freelist; pub use bitmap::*; +pub use block::*; pub use freelist::*;