chore: vspace: split vspace functions

This commit is contained in:
Paul Pan 2024-06-02 15:38:45 +08:00
parent 538053fc3e
commit d53fa7b93e
8 changed files with 165 additions and 125 deletions

View File

@ -1,10 +1,10 @@
use super::super::lowlevel::{ArchLL, ArchLLOps};
use super::TEST_DEVICE;
use crate::arch::layout::mmap_phys_to_virt;
use crate::arch::vspace::is_kernel_pagetable_installed;
use crate::arch::vspace::is_kernel_pagetable_allocated;
unsafe fn get_qemu_test_device() -> *mut u32 {
if is_kernel_pagetable_installed() {
if is_kernel_pagetable_allocated() {
mmap_phys_to_virt(TEST_DEVICE.into()).into()
} else {
TEST_DEVICE

View File

@ -1,3 +1,4 @@
use super::vspace::install_kernel_pagetable;
use crate::arch::init_early_console;
use crate::arch::layout::{mmap_phys_to_virt, zero_bss};
use crate::arch::vspace::{setup_kernel_paging, setup_memory};
@ -71,6 +72,8 @@ unsafe fn pre_main(hart_id: usize, fdt_addr: usize) {
setup_memory(fdt_addr);
setup_kernel_paging();
// TODO: on secondary cpu, we should copy existing kernel page table and TSS, then remap TSS
install_kernel_pagetable();
// after kernel paging, board level early console is broken (no address mapping)
mute_console();

View File

@ -0,0 +1,135 @@
use super::{alloc_page, install_pagetable, map_range, RAM_ALLOCATOR};
use crate::{arch::layout::*, vspace::*};
use log::{debug, info};
use spin::Mutex;
use utils::{addr::*, size::*};
#[cfg(debug_assertions)]
use super::ALLOC_COUNT;
#[thread_local]
static KERNEL_PAGETABLE: Mutex<Option<Table<Level0>>> = Mutex::new(None);
pub const KERNEL_PAGETABLE_SIZE: usize = 3;
pub unsafe fn setup_memory(fdt_addr: usize) {
info!("Setting up memory");
let fdt = unsafe { fdt::Fdt::from_ptr(fdt_addr as *const u8).unwrap() };
let mut mem = RAM_ALLOCATOR.lock();
// Add main memory regions to allocator
for region in fdt.memory().regions() {
debug!(
"Adding free memory:\tstart: {:x?}, size: {:x?}",
region.starting_address, region.size
);
mem.dealloc(
PhysAddr::from(region.starting_address),
align_up(region.size.unwrap(), PAGE_SIZE),
);
}
// Exclude memory occupied by UEFI
for region in fdt.memory_reservations() {
debug!(
"Reserving memory:\tstart: {:x?}, size: {:x?}",
region.address(),
region.size()
);
mem.reserve(PhysAddr::from(region.address()), align_up(region.size(), PAGE_SIZE));
}
// Exclude memory occupied by OpenSBI
let reserved_memory = fdt
.find_node("/reserved-memory")
.map(|node| node.children())
.expect("No reserved memory found in device tree");
for child in reserved_memory {
let region = child.reg().unwrap().next().unwrap();
debug!(
"Reserving memory:\tstart: {:x?}, size: {:x?}",
region.starting_address, region.size
);
mem.reserve(
PhysAddr::from(region.starting_address),
align_up(region.size.unwrap(), PAGE_SIZE),
);
}
// Exclude kernel memory
let kernel_start = kernel_virt_to_phys(KERNEL_START.as_virt_addr());
let kernel_end = kernel_virt_to_phys(KERNEL_END.as_virt_addr()).align_up(PAGE_SIZE);
debug!("Reserving kernel memory:\tstart: {:x?}, end: {:x?}", kernel_start, kernel_end);
mem.reserve(kernel_start, (kernel_end - kernel_start).as_usize());
// Exclude FDT table
let fdt_addr = PhysAddr::from(fdt_addr);
let fdt_size = align_up(fdt.total_size(), PAGE_SIZE);
debug!("Reserving FDT memory:\tstart: {:x?}, size: {:x?}", fdt_addr, fdt_size);
mem.reserve(fdt_addr, fdt_size);
}
pub unsafe fn setup_kernel_paging() {
info!("Setting up kernel paging");
assert!(!is_kernel_pagetable_allocated(), "Kernel pagetable already allocated");
let root_pt = alloc_page();
let mut kernel_pt = Table::<Level0>::new(root_pt.as_usize().into());
macro_rules! map_section {
(($($section:ident),+),$attr:expr) => {
$(
let start = concat_idents!($section, _START).as_virt_addr().align_down(PAGE_SIZE);
let end = concat_idents!($section, _END).as_virt_addr().align_up(PAGE_SIZE);
debug!("Mapping section {}:\t[{:?}, {:?}]", stringify!($section), start, end);
map_range(&mut kernel_pt, start, kernel_virt_to_phys(start), (end - start).as_usize(), $attr);
)+
};
}
map_section!((TEXT), MapAttr::READABLE | MapAttr::EXECUTABLE);
map_section!((RODATA), MapAttr::READABLE);
map_section!((DATA, BSS), MapAttr::READABLE | MapAttr::WRITABLE);
// TODO: every core must have a separate TSS section
map_section!((TSS), MapAttr::READABLE | MapAttr::WRITABLE);
// map 4 GiB physical memory
// TODO: walk fdt to get all memory region? put it in driver init
map_range(
&mut kernel_pt,
mmap_phys_to_virt(PhysAddr(0)),
PhysAddr(0),
4 * GIB, // NOTE: will fail on 32-bit system
MapAttr::READABLE | MapAttr::WRITABLE,
);
#[cfg(debug_assertions)]
{
let count = ALLOC_COUNT.load(core::sync::atomic::Ordering::Acquire);
log::trace!("Kernel page table size: {:?}", count);
assert!(KERNEL_PAGETABLE_SIZE == count, "Kernel page table size mismatch");
}
*KERNEL_PAGETABLE.lock() = Some(kernel_pt);
}
pub unsafe fn install_kernel_pagetable() {
info!("Setting up new kernel pagetable");
let kernel_pt = KERNEL_PAGETABLE.lock();
let kernel_pt = kernel_pt.as_ref().expect("No kernel pagetable found");
install_pagetable(kernel_pt)
}
#[inline]
pub fn is_kernel_pagetable_allocated() -> bool {
KERNEL_PAGETABLE.lock().is_some()
}
unsafe fn map_kernel(from: VirtAddr, to: PhysAddr, size: usize, attr: MapAttr) {
let mut guard = KERNEL_PAGETABLE.lock();
let pt = guard.as_mut().unwrap();
map_range(pt, from, to, size, attr);
}

View File

@ -1,7 +1,9 @@
mod addr;
mod entry;
mod kmem;
mod level;
mod table;
mod utils;
pub use kmem::*;
pub use utils::*;

View File

@ -1,75 +1,23 @@
use core::panic;
use crate::{arch::layout::*, vspace::*};
use allocator::RamBlock;
use log::{debug, info};
use core::panic;
use log::debug;
use spin::Mutex;
use utils::{addr::*, size::GIB};
use utils::addr::*;
#[thread_local]
static KERNEL_PAGETABLE: Mutex<Option<Table<Level0>>> = Mutex::new(None);
pub static RAM_ALLOCATOR: Mutex<RamBlock<8>> = Mutex::new(RamBlock::new());
pub static KERNEL_ALLOCATOR: Mutex<RamBlock<8>> = Mutex::new(RamBlock::new());
#[cfg(debug_assertions)]
pub static ALLOC_COUNT: core::sync::atomic::AtomicUsize = core::sync::atomic::AtomicUsize::new(0);
pub unsafe fn setup_memory(fdt_addr: usize) {
info!("Setting up memory");
let fdt = unsafe { fdt::Fdt::from_ptr(fdt_addr as *const u8).unwrap() };
let mut mem = KERNEL_ALLOCATOR.lock();
// Add main memory regions to allocator
for region in fdt.memory().regions() {
debug!(
"Adding free memory:\tstart: {:x?}, size: {:x?}",
region.starting_address, region.size
);
mem.dealloc(
PhysAddr::from(region.starting_address),
align_up(region.size.unwrap(), PAGE_SIZE),
);
#[cfg(debug_assertions)]
fn alloc_callback() {
let cnt = ALLOC_COUNT.fetch_add(1, core::sync::atomic::Ordering::SeqCst);
}
// Exclude memory occupied by UEFI
for region in fdt.memory_reservations() {
debug!(
"Reserving memory:\tstart: {:x?}, size: {:x?}",
region.address(),
region.size()
);
mem.reserve(PhysAddr::from(region.address()), align_up(region.size(), PAGE_SIZE));
}
// Exclude memory occupied by OpenSBI
let reserved_memory = fdt
.find_node("/reserved-memory")
.map(|node| node.children())
.expect("No reserved memory found in device tree");
for child in reserved_memory {
let region = child.reg().unwrap().next().unwrap();
debug!(
"Reserving memory:\tstart: {:x?}, size: {:x?}",
region.starting_address, region.size
);
mem.reserve(
PhysAddr::from(region.starting_address),
align_up(region.size.unwrap(), PAGE_SIZE),
);
}
// Exclude kernel memory
let kernel_start = kernel_virt_to_phys(KERNEL_START.as_virt_addr());
let kernel_end = kernel_virt_to_phys(KERNEL_END.as_virt_addr()).align_up(PAGE_SIZE);
debug!("Reserving kernel memory:\tstart: {:x?}, end: {:x?}", kernel_start, kernel_end);
mem.reserve(kernel_start, (kernel_end - kernel_start).as_usize());
// Exclude FDT table
let fdt_addr = PhysAddr::from(fdt_addr);
let fdt_size = align_up(fdt.total_size(), PAGE_SIZE);
debug!("Reserving FDT memory:\tstart: {:x?}, size: {:x?}", fdt_addr, fdt_size);
mem.reserve(fdt_addr, fdt_size);
}
fn alloc_page() -> PhysAddr {
let addr = KERNEL_ALLOCATOR.lock().alloc(PAGE_LAYOUT).expect("Failed to allocate page");
#[cfg_attr(debug_assertions, tracer::trace_callback(log = true, callback = alloc_callback))]
pub fn alloc_page() -> PhysAddr {
let addr = RAM_ALLOCATOR.lock().alloc(PAGE_LAYOUT).expect("Failed to allocate page");
unsafe {
// zero page
@ -80,7 +28,7 @@ fn alloc_page() -> PhysAddr {
addr
}
fn map_range(pt: &mut Table<Level0>, from: VirtAddr, to: PhysAddr, size: usize, attr: MapAttr) {
pub fn map_range(pt: &mut Table<Level0>, from: VirtAddr, to: PhysAddr, size: usize, attr: MapAttr) {
let mut virt_start = from;
let mut phys_start = to;
let virt_end = from + size;
@ -132,18 +80,7 @@ fn map_range(pt: &mut Table<Level0>, from: VirtAddr, to: PhysAddr, size: usize,
}
}
pub unsafe fn map(from: VirtAddr, to: PhysAddr, size: usize, attr: MapAttr) {
let mut guard = KERNEL_PAGETABLE.lock();
let pt = guard.as_mut().unwrap();
map_range(pt, from, to, size, attr);
}
#[inline]
pub fn is_kernel_pagetable_installed() -> bool {
KERNEL_PAGETABLE.lock().is_some()
}
pub fn page_table_mode() -> riscv::register::satp::Mode {
fn page_table_mode() -> riscv::register::satp::Mode {
#[cfg(feature = "riscv.pagetable.sv39")]
return riscv::register::satp::Mode::Sv39;
#[cfg(feature = "riscv.pagetable.sv48")]
@ -152,46 +89,9 @@ pub fn page_table_mode() -> riscv::register::satp::Mode {
return riscv::register::satp::Mode::Sv57;
}
pub unsafe fn setup_kernel_paging() {
info!("Setting up kernel paging");
assert!(!is_kernel_pagetable_installed(), "Kernel pagetable already installed");
pub unsafe fn install_pagetable(pt: &Table<Level0>) {
let root_pt = PhysAddr::from(pt.entries as *const _ as *const usize);
let root_pt = alloc_page();
let mut kernel_pt = Table::<Level0>::new(root_pt.as_usize().into());
macro_rules! map_section {
(($($section:ident),+),$attr:expr) => {
$(
let start = concat_idents!($section, _START).as_virt_addr().align_down(PAGE_SIZE);
let end = concat_idents!($section, _END).as_virt_addr().align_up(PAGE_SIZE);
debug!("Mapping section {}:\t[{:?}, {:?}]", stringify!($section), start, end);
map_range(&mut kernel_pt, start, kernel_virt_to_phys(start), (end - start).as_usize(), $attr);
)+
};
}
map_section!((TEXT), MapAttr::READABLE | MapAttr::EXECUTABLE);
map_section!((RODATA), MapAttr::READABLE);
map_section!((DATA, BSS), MapAttr::READABLE | MapAttr::WRITABLE);
// TODO: every core must have a separate TSS section
map_section!((TSS), MapAttr::READABLE | MapAttr::WRITABLE);
// map 4 GiB physical memory
// TODO: walk fdt to get all memory region? put it in driver init
map_range(
&mut kernel_pt,
mmap_phys_to_virt(PhysAddr(0)),
PhysAddr(0),
4 * GIB, // NOTE: will fail on 32-bit system
MapAttr::READABLE | MapAttr::WRITABLE,
);
// setup new pagetable
debug!("Setting up new kernel pagetable");
riscv::register::satp::set(page_table_mode(), 0, root_pt.extract_ppn());
riscv::asm::sfence_vma_all();
// switch to virtual address
*KERNEL_PAGETABLE.lock() = Some(kernel_pt);
}

View File

@ -137,7 +137,7 @@ impl<'a> CNodeCap<'a> {
#[cfg(test)]
mod tests {
use super::*;
use crate::arch::vspace::KERNEL_ALLOCATOR;
use crate::arch::vspace::RAM_ALLOCATOR;
use crate::objects::cap::CapEntry;
use crate::objects::null::NullCap;
use core::alloc::Layout;
@ -148,7 +148,7 @@ mod tests {
let create_cnode = |radix: usize, guard_size: usize, guard: usize| {
let size = ObjectType::CNode.size(radix);
let layout = Layout::from_size_align(size, 1).unwrap();
let ptr = KERNEL_ALLOCATOR.lock().alloc(layout).unwrap();
let ptr = RAM_ALLOCATOR.lock().alloc(layout).unwrap();
let raw = CNodeCap::mint(radix, guard_size, guard, ptr);
CapEntry::new(raw)
};

View File

@ -134,14 +134,14 @@ impl UntypedCap<'_> {
#[cfg(test)]
mod tests {
use super::*;
use crate::{arch::vspace::KERNEL_ALLOCATOR, objects::cap::CapEntry};
use crate::{arch::vspace::RAM_ALLOCATOR, objects::cap::CapEntry};
use core::alloc::Layout;
use log::info;
fn alloc_mem(size: usize) -> PhysAddr {
assert!(size.is_power_of_two());
let layout = Layout::from_size_align(size, 1).unwrap();
KERNEL_ALLOCATOR.lock().alloc(layout).unwrap()
RAM_ALLOCATOR.lock().alloc(layout).unwrap()
}
fn create_untyped_cte(size: usize, is_device: bool) -> CapEntry {

View File

@ -42,16 +42,16 @@ pub fn trace_callback(attr: TokenStream, item: TokenStream) -> TokenStream {
let call_callback = callback.iter().map(|ident| {
quote! {
if #debug_print { log::debug!("[tracer][callback] invoking {}", stringify!(#ident)) }
if #debug_print { log::trace!("[tracer][callback] invoking {}", stringify!(#ident)) }
#ident();
}
});
let expanded = quote! {
#fn_vis #fn_sig {
if #debug_print { log::debug!("[tracer] tracing {}", stringify!(#fn_name)) }
if #debug_print { log::trace!("[tracer] tracing {}", stringify!(#fn_name)) }
{ #(#call_callback)* }
if #debug_print { log::debug!("[tracer][function] invoking {}", stringify!(#fn_name)) }
if #debug_print { log::trace!("[tracer][function] invoking {}", stringify!(#fn_name)) }
(|| #fn_block)()
}
};