Compare commits

...

2 Commits

Author SHA1 Message Date
2aa88154b4 feat: allow to reserve space in freelist 2024-04-06 20:40:38 +08:00
bca4e37ef2 feat: add allocator 2024-04-04 19:28:21 +08:00
5 changed files with 516 additions and 0 deletions

View File

@ -3,6 +3,7 @@
#![no_main] #![no_main]
// Features // Features
#![feature(asm_const)] #![feature(asm_const)]
#![feature(const_mut_refs)]
#![feature(extern_types)] #![feature(extern_types)]
#![feature(naked_functions)] #![feature(naked_functions)]
#![feature(panic_info_message)] #![feature(panic_info_message)]

View File

@ -0,0 +1,286 @@
use crate::arch::layout::PAGE_SIZE;
use crate::vspace::addr::{AddressOps, PhysAddr};
use core::alloc::{GlobalAlloc, Layout};
use core::ptr::null_mut;
use log::warn;
use spin::Mutex;
trait BitmapCfg: Copy + Clone {
const CAPACITY: usize;
const DEFAULT: Self;
fn alloc_bits(&mut self) -> Option<usize>;
fn dealloc_bits(&mut self, index: usize);
}
#[derive(Copy, Clone)]
struct Bitmap32(u32);
impl BitmapCfg for Bitmap32 {
const CAPACITY: usize = u32::BITS as usize;
const DEFAULT: Self = Self(0);
fn alloc_bits(&mut self) -> Option<usize> {
// fast-path
let i = self.0.leading_zeros() as usize;
if i > 0 {
self.0 |= 1u32 << (Self::CAPACITY - i);
return Some(Self::CAPACITY - i);
}
// check full
if self.0 == u32::MAX {
return None;
}
// slow-path
for i in 0..Self::CAPACITY {
if self.0 & (1 << i) == 0 {
self.0 |= 1 << i;
return Some(i);
}
}
None
}
fn dealloc_bits(&mut self, index: usize) {
if index < Self::CAPACITY {
self.0 &= !(1 << index);
}
}
}
const BITS_PER_LEVEL: usize = 32;
#[derive(Copy, Clone)]
struct Bitmap<B: BitmapCfg> {
bits: u32, // must not overflow with BITS_PER_LEVEL
next: [B; BITS_PER_LEVEL],
}
const_assert!(core::mem::size_of::<u32>() * 8 >= BITS_PER_LEVEL);
impl<B: BitmapCfg> BitmapCfg for Bitmap<B> {
const CAPACITY: usize = BITS_PER_LEVEL * B::CAPACITY;
const DEFAULT: Self = Self {
bits: 0,
next: [B::DEFAULT; BITS_PER_LEVEL],
};
fn alloc_bits(&mut self) -> Option<usize> {
if self.bits == u32::MAX {
return None;
}
// fast-path
loop {
let i = self.bits.leading_zeros() as usize;
if i == 0 {
break;
}
if let Some(index) = self.alloc_index(BITS_PER_LEVEL - i) {
return Some(index);
}
}
// slow-path
for i in 0..BITS_PER_LEVEL {
if self.bits & (1 << i) == 0 {
if let Some(index) = self.alloc_index(i) {
return Some(index);
}
}
}
None
}
fn dealloc_bits(&mut self, index: usize) {
let i = index / B::CAPACITY;
if i < BITS_PER_LEVEL {
self.next[i].dealloc_bits(index % B::CAPACITY);
self.bits &= !(1 << i);
}
}
}
impl<B: BitmapCfg> Bitmap<B> {
fn alloc_index(&mut self, i: usize) -> Option<usize> {
if let Some(sub) = self.next[i].alloc_bits() {
return Some(i * B::CAPACITY + sub);
}
self.bits |= 1 << i;
None
}
}
// 1k pages, consumes (32+1)*32/8 = 132 bytes, allocates 1k * 4KiB = 4MiB memory
type Bitmap1K = Bitmap<Bitmap32>;
// 32k pages, consumes (32*(32+1)+1)*32/8 = 4228 bytes, allocates 32k * 4KiB = 128MiB memory
type Bitmap32K = Bitmap<Bitmap1K>;
const_assert_eq!(core::mem::size_of::<Bitmap1K>(), (32 + 1) * 32 / 8);
const_assert_eq!(
core::mem::size_of::<Bitmap32K>(),
(32 * (32 + 1) + 1) * 32 / 8
);
struct BitmapAllocator<B: BitmapCfg = Bitmap32K> {
bitmap: Mutex<B>,
base: PhysAddr,
}
unsafe impl<B: BitmapCfg> GlobalAlloc for BitmapAllocator<B> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if layout.size() > PAGE_SIZE || layout.align() > PAGE_SIZE {
warn!("unsupported layout: {:?}, page size: {}", layout, PAGE_SIZE);
return null_mut();
}
let bit = self.bitmap.lock().alloc_bits();
if let Some(index) = bit {
let addr = self.base + index * PAGE_SIZE;
return addr.as_mut_ptr();
}
null_mut()
}
unsafe fn dealloc(&self, ptr: *mut u8, _: Layout) {
let bit = (PhysAddr::from(ptr) - self.base).as_usize() / PAGE_SIZE;
self.bitmap.lock().dealloc_bits(bit);
}
}
impl<B: BitmapCfg> BitmapAllocator<B> {
pub fn new(base: PhysAddr) -> Self {
Self {
bitmap: Mutex::new(B::DEFAULT),
base,
}
}
}
pub type BitmapAllocator32K = BitmapAllocator<Bitmap32K>;
#[cfg(test)]
mod tests {
use super::*;
#[test_case]
fn test_bitmap32() {
let mut bitmap = Bitmap32::DEFAULT;
// alloc from empty
for i in 0..32 {
assert_eq!(bitmap.alloc_bits(), Some(i));
}
assert_eq!(bitmap.alloc_bits(), None);
// dealloc
for i in 0..16 {
bitmap.dealloc_bits(i * 2);
}
// predictable alloc from dealloc pattern
for i in 0..16 {
assert_eq!(bitmap.alloc_bits(), Some(i * 2));
}
// invalid dealloc
assert_eq!(bitmap.alloc_bits(), None);
bitmap.dealloc_bits(32);
assert_eq!(bitmap.alloc_bits(), None);
}
#[test_case]
fn test_bitmap1k() {
let mut bitmap = Bitmap1K::DEFAULT;
// alloc from empty
for i in 0..(32 * BITS_PER_LEVEL) {
assert_eq!(bitmap.alloc_bits(), Some(i));
}
// free the second slot
for i in 0..32 {
bitmap.dealloc_bits(32 + i);
}
// alloc again
for i in 0..32 {
assert_eq!(bitmap.alloc_bits(), Some(32 + i));
}
// invalid dealloc
assert_eq!(bitmap.alloc_bits(), None);
bitmap.dealloc_bits(32 * BITS_PER_LEVEL);
assert_eq!(bitmap.alloc_bits(), None);
}
#[test_case]
fn test_bitmap32k() {
let mut bitmap = Bitmap32K::DEFAULT;
// alloc from empty
for i in 0..(32 * BITS_PER_LEVEL * BITS_PER_LEVEL) {
assert_eq!(bitmap.alloc_bits(), Some(i));
}
// free the second slot
for i in 0..32 {
bitmap.dealloc_bits(32 + i);
}
// free the third layer
for i in 0..(32 * BITS_PER_LEVEL) {
bitmap.dealloc_bits(32 * BITS_PER_LEVEL * 2 + i);
}
// alloc again (second slot)
for i in 0..32 {
assert_eq!(bitmap.alloc_bits(), Some(32 + i));
}
// alloc again (third layer)
for i in 0..(32 * BITS_PER_LEVEL) {
assert_eq!(bitmap.alloc_bits(), Some(32 * BITS_PER_LEVEL * 2 + i));
}
// invalid dealloc
assert_eq!(bitmap.alloc_bits(), None);
bitmap.dealloc_bits(32 * BITS_PER_LEVEL * BITS_PER_LEVEL);
assert_eq!(bitmap.alloc_bits(), None);
}
#[test_case]
fn test_bitmap_allocator() {
let allocator = BitmapAllocator32K::new(PhysAddr(0x42));
// alloc from empty
for i in 0..32 {
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()) };
assert_eq!(PhysAddr::from(ptr).as_usize(), 0x42 + i * PAGE_SIZE);
}
// dealloc
for i in 0..16 {
unsafe {
allocator.dealloc(
PhysAddr(0x42 + i * 2 * PAGE_SIZE).as_mut_ptr(),
Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap(),
);
}
}
// predictable alloc from dealloc pattern
for i in 0..16 {
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()) };
assert_eq!(PhysAddr::from(ptr).as_usize(), 0x42 + i * 2 * PAGE_SIZE);
}
}
}

View File

@ -0,0 +1,223 @@
// adapted from https://os.phil-opp.com/allocator-designs/#linked-list-allocator
use crate::utils::then::Then;
use crate::vspace::addr::{AddressOps, PhysAddr};
use core::alloc::{GlobalAlloc, Layout};
use spin::Mutex;
struct ListNode {
size: usize,
next: Option<&'static mut ListNode>,
}
impl ListNode {
const fn new(size: usize) -> Self {
Self { size, next: None }
}
fn start_addr(&self) -> PhysAddr {
PhysAddr(self as *const _ as usize)
}
fn end_addr(&self) -> PhysAddr {
self.start_addr() + self.size
}
fn fit(&self, size: usize, align: usize) -> Result<PhysAddr, ()> {
let alloc_start = self.start_addr().align_up(align);
let alloc_end = alloc_start + size;
if alloc_end > self.end_addr() {
return Err(());
}
let excess_size = (self.end_addr() - alloc_end).as_usize();
if excess_size > 0 && excess_size < core::mem::size_of::<ListNode>() {
// GlobalAlloc requires that dealloc be called with the original size and alignment,
// so we can't "waste" the remaining space and must return an error.
return Err(());
}
Ok(alloc_start)
}
}
struct FreeList {
head: ListNode,
}
impl FreeList {
const fn new() -> Self {
Self {
head: ListNode::new(0),
}
}
fn alloc_node<F, V>(&mut self, mut predicate: F) -> Option<(&'static mut ListNode, V)>
where F: FnMut(&mut ListNode) -> Result<V, ()> {
let mut current = &mut self.head;
while let Some(ref mut region) = current.next {
if let Ok(v) = predicate(region) {
let next = region.next.take();
let ret = Some((current.next.take().unwrap(), v));
current.next = next;
return ret;
} else {
current = current.next.as_mut().unwrap();
}
}
None
}
fn align_layout(layout: Layout) -> (usize, usize) {
let layout = layout
.align_to(core::mem::align_of::<ListNode>())
.expect("adjusting alignment failed")
.pad_to_align();
let size = layout.size().max(core::mem::size_of::<ListNode>());
(size, layout.align())
}
unsafe fn alloc(&mut self, layout: Layout) -> *mut u8 {
let (size, align) = Self::align_layout(layout);
if let Some((region, alloc_start)) = self.alloc_node(|region| region.fit(size, align)) {
let alloc_end = alloc_start + size;
let excess_size = (region.end_addr() - alloc_end).as_usize();
if excess_size > 0 {
self.dealloc(alloc_end, excess_size);
}
alloc_start.as_mut_ptr()
} else {
core::ptr::null_mut()
}
}
unsafe fn dealloc(&mut self, start: PhysAddr, size: usize) {
assert_eq!(start.align_up(core::mem::align_of::<ListNode>()), start);
assert!(size >= core::mem::size_of::<ListNode>());
let mut node = ListNode::new(size);
node.next = self.head.next.take();
let node_ptr = start.as_mut_ptr::<ListNode>();
node_ptr.write(node);
self.head.next = Some(&mut *node_ptr);
}
pub fn reserve(&mut self, start: PhysAddr, size: usize) {
if let Some((region, _)) = self
.alloc_node(|region| (region.start_addr() <= start).chain(|| region.fit(size, 1), ()))
{
/* layout
* region: | before | [start +: size] | after |
* ^ ^ ^ ^ region.end_addr()
* | | alloc_start |
* | | alloc_end
* | region.start_addr()
*/
let region_start = region.start_addr();
let region_end = region.end_addr();
let before_size = (start - region_start).as_usize();
if before_size > 0 {
unsafe { self.dealloc(region_start, before_size) }
}
let after_size = (region_end - (start + size)).as_usize();
if after_size > 0 {
unsafe { self.dealloc(start + size, after_size) }
}
}
}
}
pub struct FreeListAllocator {
list: Mutex<FreeList>,
}
impl FreeListAllocator {
pub fn new(start: PhysAddr, size: usize) -> Self {
let mut list = FreeList::new();
unsafe { list.dealloc(start, size) }
Self {
list: Mutex::new(list),
}
}
}
unsafe impl GlobalAlloc for FreeListAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.list.lock().alloc(layout)
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let (size, _) = FreeList::align_layout(layout);
self.list.lock().dealloc(PhysAddr::from(ptr), size)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::arch::layout::PAGE_SIZE;
#[test_case]
fn test_freelist() {
const BASE: PhysAddr = PhysAddr(0x80300000);
let allocator = FreeListAllocator::new(BASE, 32 * PAGE_SIZE);
for i in 0..32 {
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()) };
assert_eq!(ptr as usize, (BASE + i * PAGE_SIZE).as_usize());
}
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()) };
assert_eq!(ptr, core::ptr::null_mut());
for i in (0..32).rev() {
let ptr = (BASE + i * PAGE_SIZE).as_mut_ptr();
unsafe {
allocator.dealloc(ptr, Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
};
}
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()) };
assert_eq!(ptr as usize, BASE.as_usize());
}
#[test_case]
fn test_freelist_reserve() {
const BASE: PhysAddr = PhysAddr(0x80300000);
let allocator = FreeListAllocator::new(BASE, 32 * PAGE_SIZE);
allocator
.list
.lock()
.reserve(BASE + 4 * PAGE_SIZE, 4 * PAGE_SIZE);
let mut cnt = 32 - 4;
loop {
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()) };
if ptr.is_null() {
assert_eq!(cnt, 0);
break;
}
let ptr = PhysAddr::from(ptr);
assert!(
!(BASE + 4 * PAGE_SIZE <= ptr && ptr < BASE + (4 + 4) * PAGE_SIZE),
"Bad alloc: returned ptr: {:?}, reserved range: {:?}->{:?}",
ptr,
BASE + 4 * PAGE_SIZE,
BASE + (4 + 4) * PAGE_SIZE
);
cnt -= 1;
}
}
}

View File

@ -0,0 +1,5 @@
mod bitmap;
mod freelist;
pub use bitmap::*;
pub use freelist::*;

View File

@ -1,2 +1,3 @@
pub mod addr; pub mod addr;
pub mod allocator;
pub mod paging; pub mod paging;