Compare commits

...

2 Commits

Author SHA1 Message Date
a4861445fe feat: refactor vspace 2024-05-19 01:10:01 +08:00
a56c481970 feat: split lib/vspace:
1. `addr` move to `utils`
2. prepare to refactor `vspace`

[skip_ci]
2024-05-07 23:50:52 +08:00
42 changed files with 491 additions and 401 deletions

19
Cargo.lock generated
View File

@ -10,12 +10,11 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "allocator"
version = "0.1.0"
version = "0.1.1"
dependencies = [
"spin",
"static_assertions",
"utils",
"vspace",
]
[[package]]
@ -110,7 +109,6 @@ dependencies = [
"uapi",
"uart_16550",
"utils",
"vspace",
]
[[package]]
@ -254,7 +252,6 @@ version = "0.1.0"
dependencies = [
"num-derive",
"num-traits",
"vspace",
]
[[package]]
@ -276,19 +273,7 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "utils"
version = "0.1.0"
dependencies = [
"vspace",
]
[[package]]
name = "vspace"
version = "0.1.0"
dependencies = [
"bitflags 2.5.0",
"num-derive",
"num-traits",
]
version = "0.1.1"
[[package]]
name = "x86"

View File

@ -23,7 +23,6 @@ riscv = []
uapi = { path = "../uapi" }
allocator = { path = "../lib/allocator" }
utils = { path = "../lib/utils", default-features = false }
vspace = { path = "../lib/vspace", default-features = false }
bitflags = "2.5"
cfg-if = "1.0"

View File

@ -1,6 +1,6 @@
use super::layout::{BSS_END, BSS_START, TEXT_END, TEXT_START};
use crate::plat::backtrace::FrameWalker;
use vspace::addr::{AddressOps, VirtAddr};
use utils::addr::{AddressOps, VirtAddr};
impl FrameWalker {
fn read_fp() -> usize {

View File

@ -20,11 +20,18 @@ global_asm!(
# 0x00000000_00000000 -> 0x00000000 [ 0x00000000_00000000 -> 0x00000000_40000000 ]
# 0x00000000_40000000 -> 0x40000000 [ 0x00000000_40000000 -> 0x00000000_80000000 ]
# 0x00000000_80000000 -> 0x80000000 [ 0x00000000_80000000 -> 0x00000001_00000000 ]
# 0xFFFFFFC0_00000000 -> 0x00000000 [ 0xFFFFFFC0_00000000 -> 0xFFFFFFC0_40000000 ]
# 0xFFFFFFC0_40000000 -> 0x00000000 [ 0xFFFFFFC0_40000000 -> 0xFFFFFFC0_80000000 ]
# 0xFFFFFFC0_80000000 -> 0x00000000 [ 0xFFFFFFC0_80000000 -> 0xFFFFFFC1_00000000 ]
# 0xFFFFFFD0_00000000 -> 0x80000000 [ 0xFFFFFFD0_00000000 -> 0xFFFFFFD0_40000000 ]
.quad (0x00000 << 10) | 0xf
.quad (0x40000 << 10) | 0xf
.quad (0x80000 << 10) | 0xf
.zero 8 * 317
.zero 8 * 253
.quad (0x00000 << 10) | 0xf
.quad (0x40000 << 10) | 0xf
.quad (0x80000 << 10) | 0xf
.zero 8 * 61
.quad (0x80000 << 10) | 0xf
.zero 8 * 191
"

View File

@ -3,8 +3,8 @@ use crate::arch::layout::{mmap_phys_to_virt, zero_bss};
use crate::arch::vspace::{setup_kernel_paging, setup_memory};
use crate::entry::{rust_main, HART_ID};
use crate::plat::console::mute_console;
use utils::addr::AddressOps;
use utils::atomic::AtomicConstPtr;
use vspace::addr::AddressOps;
#[naked]
#[no_mangle]

View File

@ -1,7 +1,7 @@
use core::alloc::Layout;
use utils::addr::{AddressOps, PhysAddr, VirtAddr};
use utils::extern_addr::ExternSymbol;
use utils::size::KIB;
use vspace::addr::{AddressOps, PhysAddr, VirtAddr};
extern "C" {
pub static KERNEL_START: ExternSymbol;

View File

@ -0,0 +1,66 @@
use utils::{
addr::{AddressOps, VirtAddr},
MASK,
};
use crate::vspace::TableLevel;
#[cfg(feature = "riscv.pagetable.sv39")]
mod sv39;
fn sign_extend(addr: usize, bits: usize) -> usize {
// bits starts from 0
let sign = (addr >> bits) & 1;
let sign_ext = !MASK!(bits + 1);
if sign == 1 {
addr | sign_ext
} else {
addr
}
}
trait GenericPhysAddrPage: AddressOps {
const PG_OFFSET: usize;
const PPN_BITS: usize;
const PPN_OFFSET: usize;
const PA_PPN_MASK: usize = MASK!(Self::PPN_BITS) << Self::PG_OFFSET;
const PTE_PPN_MASK: usize = MASK!(Self::PPN_BITS) << Self::PPN_OFFSET;
fn extract_ppn(&self) -> usize {
(self.as_usize() & Self::PA_PPN_MASK) >> Self::PG_OFFSET
}
fn extract_ppn_shifted(&self) -> usize {
self.extract_ppn() << Self::PPN_OFFSET
}
fn from_pte(pte: usize) -> usize {
let addr = ((pte & Self::PTE_PPN_MASK) >> Self::PPN_OFFSET) << Self::PG_OFFSET;
let bits = Self::PPN_BITS + Self::PG_OFFSET;
sign_extend(addr, bits)
}
fn update_pte(&self, pte: usize) -> usize {
let ppn = self.extract_ppn();
(pte & !Self::PTE_PPN_MASK) | (ppn << Self::PPN_OFFSET)
}
}
trait GenericVirtAddrPage: AddressOps {
const PG_OFFSET: usize;
const MAX_LEVEL: usize;
fn extract_vpn<T: TableLevel>(&self) -> usize {
let mask = MASK!(T::LEVEL_BITS);
(self.as_usize() >> (Self::PG_OFFSET + T::LEVEL_BITS * (Self::MAX_LEVEL - T::LEVEL))) & mask
}
fn merge_vpn<T: TableLevel>(&self, vpn: usize) -> VirtAddr {
let shift = Self::PG_OFFSET + T::LEVEL_BITS * (Self::MAX_LEVEL - T::LEVEL);
let mask = MASK!(T::LEVEL_BITS);
let addr = (self.as_usize() & !(mask << shift)) | ((vpn & mask) << shift);
VirtAddr(sign_extend(addr, 39 - 1))
}
}

View File

@ -0,0 +1,42 @@
use super::{GenericPhysAddrPage, GenericVirtAddrPage};
use crate::vspace::{PhysAddrPage, TableLevel, VirtAddrPage};
use utils::addr::*;
impl GenericPhysAddrPage for PhysAddr {
const PG_OFFSET: usize = 12;
const PPN_BITS: usize = 44;
const PPN_OFFSET: usize = 10;
}
impl PhysAddrPage for PhysAddr {
fn extract_ppn(&self) -> usize {
GenericPhysAddrPage::extract_ppn(self)
}
fn extract_ppn_shifted(&self) -> usize {
GenericPhysAddrPage::extract_ppn_shifted(self)
}
fn from_pte(pte: usize) -> PhysAddr {
PhysAddr::from(<PhysAddr as GenericPhysAddrPage>::from_pte(pte))
}
fn update_pte(&self, pte: usize) -> usize {
GenericPhysAddrPage::update_pte(self, pte)
}
}
impl GenericVirtAddrPage for VirtAddr {
const PG_OFFSET: usize = 12;
const MAX_LEVEL: usize = 2; // Level0, Level1, Level2
}
impl VirtAddrPage for VirtAddr {
fn extract_vpn<T: TableLevel>(&self) -> usize {
GenericVirtAddrPage::extract_vpn::<T>(self)
}
fn merge_vpn<T: TableLevel>(&self, vpn: usize) -> usize {
GenericVirtAddrPage::merge_vpn::<T>(self, vpn).into()
}
}

View File

@ -1,7 +1,9 @@
use super::traits::PhysAddrPaging;
use crate::{
arch::layout::mmap_phys_to_virt,
vspace::{EntryOps, MapAttr, PhysAddrPage},
};
use bitflags::bitflags;
use vspace::addr::PhysAddr;
use vspace::paging::{EntryOps, MapAttr};
use utils::addr::{PhysAddr, VirtAddr};
bitflags! {
#[derive(Debug)]
@ -81,29 +83,33 @@ pub struct Entry(usize);
impl EntryOps for Entry {
fn new_page(phys_addr: PhysAddr, attr: MapAttr) -> Self {
let flags = PTEFlags::from(attr);
let ppn = phys_addr.to_ppn_shifted();
let ppn = phys_addr.extract_ppn_shifted();
Self(ppn | flags.bits())
}
fn new_table(phys_addr: PhysAddr) -> Self {
let flags = PTEFlags::VALID;
let ppn = phys_addr.to_ppn_shifted();
let ppn = phys_addr.extract_ppn_shifted();
Self(ppn | flags.bits())
}
fn addr(&self) -> PhysAddr {
fn paddr(&self) -> PhysAddr {
PhysAddr::from_pte(self.0)
}
fn vaddr(&self) -> VirtAddr {
unsafe { mmap_phys_to_virt(self.paddr()) }
}
fn attr(&self) -> MapAttr {
let flags = PTEFlags::from_bits_truncate(self.0);
flags.into()
}
fn set_addr(&mut self, addr: PhysAddr) {
self.0 = addr.merge_pte(self.0);
fn set_paddr(&mut self, addr: PhysAddr) {
self.0 = addr.update_pte(self.0);
}
fn set_attr(&mut self, attr: MapAttr) {
@ -127,7 +133,7 @@ impl EntryOps for Entry {
impl core::fmt::Debug for Entry {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct("PageTableEntry")
.field("addr", &self.addr())
.field("addr", &self.paddr())
.field("flag", &PTEFlags::from_bits_truncate(self.0))
.finish()
}

View File

@ -0,0 +1,25 @@
use super::entry::Entry;
use crate::vspace::{LevelInvalid, TableLevel};
macro_rules! table_level_impl_none {
($level:ident) => {
impl TableLevel for $level {
type Entry = Entry;
type NEXT = LevelInvalid;
type PREVIOUS = LevelInvalid;
const LEVEL: usize = 0;
const LEVEL_SIZE: usize = 0;
const LEVEL_BITS: usize = 0;
const ENTRIES: usize = 0;
const DUMMY: bool = true;
}
};
}
use table_level_impl_none;
table_level_impl_none!(LevelInvalid);
#[cfg(feature = "riscv.pagetable.sv39")]
mod sv39;

View File

@ -0,0 +1,40 @@
use super::table_level_impl_none;
use super::Entry;
use crate::vspace::*;
use utils::size::*;
impl TableLevel for Level0 {
type Entry = Entry;
type NEXT = Level1;
type PREVIOUS = LevelInvalid;
const LEVEL: usize = 0;
const LEVEL_SIZE: usize = 1 * GIB;
const LEVEL_BITS: usize = 9;
const ENTRIES: usize = 512;
}
impl TableLevel for Level1 {
type Entry = Entry;
type NEXT = Level2;
type PREVIOUS = Level0;
const LEVEL: usize = 1;
const LEVEL_SIZE: usize = 2 * MIB;
const LEVEL_BITS: usize = 9;
const ENTRIES: usize = 512;
}
impl TableLevel for Level2 {
type Entry = Entry;
type NEXT = LevelInvalid;
type PREVIOUS = Level1;
const LEVEL: usize = 2;
const LEVEL_SIZE: usize = 4 * KIB;
const LEVEL_BITS: usize = 9;
const ENTRIES: usize = 512;
}
table_level_impl_none!(Level3);
table_level_impl_none!(Level4);

View File

@ -1,8 +1,7 @@
mod addr;
mod entry;
mod level;
mod table;
mod traits;
mod utils;
pub use table::Table;
pub use traits::*;
pub use utils::*;

View File

@ -1,147 +1,118 @@
use super::entry::Entry;
use super::traits::{TableLevelSize, VirtAddrPaging};
use crate::arch::layout::{kernel_phys_to_virt, PAGE_SIZE};
use num_traits::ToPrimitive;
use vspace::addr::*;
use vspace::paging::*;
use crate::{arch::layout::PAGE_SIZE, vspace::*};
use utils::addr::*;
#[repr(C, align(4096))]
pub struct Table {
entries: [Entry; PAGE_SIZE / core::mem::size_of::<Entry>()],
}
assert_eq_size!(Table, [u8; PAGE_SIZE]);
const_assert_eq!(core::mem::size_of::<Table>(), Table::TABLE_SIZE);
impl Table {
fn lookup_mut_internal(&mut self, vaddr: VirtAddr) -> (&mut Entry, TableLevel) {
// NOTE: we assume that this page table is the root page table
let mut cur = Self::MAX_PAGE_SIZE;
let mut table = self;
loop {
let vpn = vaddr.to_vpn(cur);
let entry = table.entries[vpn];
if !entry.is_valid() || entry.is_leaf() || cur.next().is_none() {
return (&mut table.entries[vpn], cur);
}
cur = cur.next().unwrap();
table = unsafe { Self::new(kernel_phys_to_virt(entry.addr()).as_usize().into()) };
}
}
pub fn mode() -> riscv::register::satp::Mode {
#[cfg(feature = "riscv.pagetable.sv39")]
return riscv::register::satp::Mode::Sv39;
#[cfg(feature = "riscv.pagetable.sv48")]
return riscv::register::satp::Mode::Sv48;
#[cfg(feature = "riscv.pagetable.sv57")]
return riscv::register::satp::Mode::Sv57;
}
}
impl TableOps for Table {
type Entry = Entry;
#[cfg(feature = "riscv.pagetable.sv39")]
const MAX_PAGE_SIZE: TableLevel = TableLevel::Level2;
const TABLE_SIZE: usize = PAGE_SIZE;
unsafe fn new(location: VirtAddr) -> &'static mut Self {
impl<'a, T: TableLevel> TableOps<'a, T> for Table<'a, T> {
unsafe fn new(location: VirtAddr) -> Self {
assert!(!T::DUMMY);
assert!(location.is_aligned(PAGE_SIZE));
let ptr: *mut Self = location.into();
&mut *ptr
assert!(location.is_aligned((1 << T::LEVEL_BITS) as usize));
Table {
entries: unsafe { core::slice::from_raw_parts_mut(location.as_mut_ptr::<T::Entry>(), T::ENTRIES) },
}
}
fn map(&mut self, from: VirtAddr, to: PhysAddr, attr: MapAttr, level: TableLevel) -> PageResult {
assert!(from.is_aligned(PAGE_SIZE));
assert!(to.is_aligned(PAGE_SIZE));
assert!(level.is_aligned(from));
if !attr.contains(MapAttr::PAGE_TABLE) {
assert!(level.is_aligned(to));
fn map(&mut self, from: VirtAddr, to: PhysAddr, attr: MapAttr) -> PageResult {
if !from.is_aligned(T::LEVEL_SIZE) || !to.is_aligned(PAGE_SIZE) {
return Err(PageError::NotAligned);
}
let (entry, cur) = self.lookup_mut_internal(from);
let vpn = from.extract_vpn::<T>();
let entry = &mut self.entries[vpn];
if cur > level {
// previous level is not mapped
return Err(PageError::MissingEntry(cur));
if entry.is_valid() {
return Err(PageError::AlreadyMapped);
}
if entry.is_valid() || cur < level {
// requested level is already mapped
return Err(PageError::AlreadyMapped(cur));
}
entry.set_addr(to);
entry.set_paddr(to);
entry.set_attr(attr);
Ok(())
}
fn unmap(&mut self, vaddr: VirtAddr) -> PageResult {
let (entry, level) = self.lookup_mut_internal(vaddr);
let vpn = vaddr.extract_vpn::<T>();
let entry = &mut self.entries[vpn];
// do some sanity checks
if !entry.is_valid() {
return Err(PageError::MissingEntry(level));
return Err(PageError::MissingEntry);
}
entry.set_addr(PhysAddr::default());
#[cfg(debug_assertions)]
if entry.is_leaf() {
use log::warn;
if T::NEXT::DUMMY {
panic!("unmap failed: sanity check: leaf page table at the last level");
}
let table = unsafe { Table::<T::NEXT>::new(entry.vaddr()) };
for i in 0..T::ENTRIES {
if table.entries[i].is_valid() {
// don't crash the kernel, might be intentional
warn!("unmap: sanity check: leaf page table is not empty");
}
}
}
// unmap the entry
entry.set_paddr(PhysAddr::default());
entry.set_attr(MapAttr::empty());
Ok(())
}
fn lookup(&mut self, vaddr: VirtAddr) -> Option<&Self::Entry> {
let (entry, _) = self.lookup_mut_internal(vaddr);
fn lookup(&mut self, vaddr: VirtAddr) -> Option<&T::Entry> {
let vpn = vaddr.extract_vpn::<T>();
let entry = &self.entries[vpn];
entry.is_valid().then_some(entry)
}
fn lookup_mut(&mut self, vaddr: VirtAddr) -> Option<&mut Self::Entry> {
let (entry, _) = self.lookup_mut_internal(vaddr);
fn lookup_mut(&mut self, vaddr: VirtAddr) -> Option<&mut T::Entry> {
let vpn = vaddr.extract_vpn::<T>();
let entry = &mut self.entries[vpn];
entry.is_valid().then_some(entry)
}
fn translate(&mut self, vaddr: VirtAddr) -> Option<PhysAddr> {
let (entry, level) = self.lookup_mut_internal(vaddr);
entry
.is_valid()
.then_some(entry.addr())
.map(|p| p.as_usize() | vaddr.lower_bits(level.to_usize().unwrap()))
.map(|p| p.into())
fn retype<V: TableLevel>(&mut self) -> Table<'a, V> {
assert!(V::LEVEL_BITS <= T::LEVEL_BITS);
let ptr = self.entries.as_mut_ptr() as usize;
let ptr = ptr as *mut V::Entry;
unsafe { Table::new(VirtAddr::from(ptr)) }
}
}
impl Table {
fn debug_walk(&self, f: &mut core::fmt::Formatter, base: VirtAddr, level: TableLevel) -> core::fmt::Result {
impl<T: TableLevel> Table<'_, T> {
fn debug_walk(&self, f: &mut core::fmt::Formatter, base: VirtAddr) -> core::fmt::Result {
macro_rules! print_one {
($($arg:tt)*) => {
for _ in level.to_usize().unwrap()..Self::MAX_PAGE_SIZE.to_usize().unwrap() {
for _ in 0..T::LEVEL {
write!(f, "\t")?;
}
writeln!(f, $($arg)*)?;
};
}
for (i, entry) in self.entries.into_iter().enumerate() {
for (i, entry) in self.entries.iter().enumerate() {
if !entry.is_valid() {
continue;
}
print_one!(
"[{:>3}]: {:?} -> {:?} : {:?}",
"[{:>3}]: {:#x?} -> {:?} : {:?}",
i,
base.merge_vpn(i, level),
entry.addr(),
base.merge_vpn::<T>(i),
entry.paddr(),
entry
);
if !entry.is_leaf() && level.next().is_some() {
let table = unsafe { Self::new(entry.addr().as_usize().into()) };
table.debug_walk(f, base.merge_vpn(i, level), level.next().unwrap())?;
if !entry.is_leaf() && !T::NEXT::DUMMY {
let table = unsafe { Table::<T::NEXT>::new(entry.vaddr()) };
table.debug_walk(f, base.merge_vpn::<T>(i).into())?;
}
}
@ -149,9 +120,9 @@ impl Table {
}
}
impl core::fmt::Debug for Table {
impl<T: TableLevel> core::fmt::Debug for Table<'_, T> {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
writeln!(f, "PageTable({:p}):", self)?;
self.debug_walk(f, VirtAddr(0), Self::MAX_PAGE_SIZE)
self.debug_walk(f, VirtAddr(0))
}
}

View File

@ -1,95 +0,0 @@
use num_traits::ToPrimitive;
use utils::size::{GIB, KIB, MIB, TIB};
use utils::MASK;
use vspace::addr::{AddressOps, PhysAddr, VirtAddr};
use vspace::paging::TableLevel;
pub trait PhysAddrPaging {
const PG_OFFSET: usize;
const PPN_BITS: usize;
const PPN_OFFSET: usize;
const PA_PPN_MASK: usize = MASK!(Self::PPN_BITS) << Self::PG_OFFSET;
const PTE_PPN_MASK: usize = MASK!(Self::PPN_BITS) << Self::PPN_OFFSET;
fn to_ppn(&self) -> usize
where Self: AddressOps {
(self.as_usize() & Self::PA_PPN_MASK) >> Self::PG_OFFSET
}
fn to_ppn_shifted(&self) -> usize
where Self: PhysAddrPaging + AddressOps {
self.to_ppn() << Self::PPN_OFFSET
}
fn from_pte(pte: usize) -> PhysAddr {
let ppn = (pte & Self::PTE_PPN_MASK) >> Self::PPN_OFFSET;
let paddr = ppn << Self::PG_OFFSET;
PhysAddr::from(paddr)
}
fn merge_pte(&self, pte: usize) -> usize
where Self: PhysAddrPaging + AddressOps {
let ppn = self.to_ppn_shifted();
(pte & !Self::PTE_PPN_MASK) | ppn
}
}
#[cfg(feature = "riscv.pagetable.sv39")]
impl PhysAddrPaging for PhysAddr {
const PG_OFFSET: usize = 12;
const PPN_BITS: usize = 44;
const PPN_OFFSET: usize = 10;
}
pub trait VirtAddrPaging {
const PG_OFFSET: usize;
const VPN_BITS: usize;
const VPN_MASK: usize = (1 << Self::VPN_BITS) - 1;
fn to_vpn(&self, level: TableLevel) -> usize
where Self: AddressOps {
self.as_usize() >> (Self::PG_OFFSET + Self::VPN_BITS * level.to_usize().unwrap()) & Self::VPN_MASK
}
fn merge_vpn(&self, vpn: usize, size: TableLevel) -> VirtAddr
where Self: AddressOps {
let shift = Self::PG_OFFSET + Self::VPN_BITS * size.to_usize().unwrap();
let mask = Self::VPN_MASK << shift;
VirtAddr((self.as_usize() & !mask) | ((vpn & Self::VPN_MASK) << shift))
}
fn lower_bits(&self, level: usize) -> usize
where Self: AddressOps {
self.as_usize() & MASK!(Self::PG_OFFSET + Self::VPN_BITS * (level + 1))
}
}
#[cfg(feature = "riscv.pagetable.sv39")]
impl VirtAddrPaging for VirtAddr {
const PG_OFFSET: usize = 12;
const VPN_BITS: usize = 9;
}
pub trait TableLevelSize {
fn level_size(&self) -> usize;
fn align<A: AddressOps>(&self, addr: A) -> A {
addr.align_down(self.level_size())
}
fn is_aligned<A: AddressOps>(&self, addr: A) -> bool {
self.align(addr) == addr
}
}
impl TableLevelSize for TableLevel {
fn level_size(&self) -> usize {
match self {
Self::Level0 => 4 * KIB,
Self::Level1 => 2 * MIB,
Self::Level2 => 1 * GIB,
Self::Level3 => 512 * GIB,
Self::Level4 => 256 * TIB,
}
}
}

View File

@ -1,24 +1,16 @@
use super::table::Table;
use super::traits::{PhysAddrPaging, TableLevelSize};
use crate::arch::layout::*;
use core::panic;
use crate::{arch::layout::*, vspace::*};
use allocator::RamBlock;
use log::{debug, info};
use spin::Mutex;
use utils::size::GIB;
use vspace::addr::*;
use vspace::paging::PageError::*;
use vspace::paging::*;
use utils::{addr::*, size::GIB};
#[thread_local]
static KERNEL_PAGETABLE: Mutex<Option<&mut Table>> = Mutex::new(None);
static KERNEL_PAGETABLE: Mutex<Option<Table<Level0>>> = Mutex::new(None);
pub static KERNEL_ALLOCATOR: Mutex<RamBlock<8>> = Mutex::new(RamBlock::new());
#[inline]
fn alloc_page() -> PhysAddr {
KERNEL_ALLOCATOR.lock().alloc(PAGE_LAYOUT).expect("Failed to allocate page")
}
pub unsafe fn setup_memory(fdt_addr: usize) {
info!("Setting up memory");
let fdt = unsafe { fdt::Fdt::from_ptr(fdt_addr as *const u8).unwrap() };
@ -76,58 +68,74 @@ pub unsafe fn setup_memory(fdt_addr: usize) {
mem.reserve(fdt_addr, fdt_size);
}
#[inline]
fn map_one(pt: &mut Table, vaddr: VirtAddr, paddr: PhysAddr, attr: MapAttr, level: TableLevel) -> PageResult<()> {
loop {
let ret = pt.map(vaddr, paddr, attr, level);
if let Err(MissingEntry(missed_level)) = ret {
pt.map(missed_level.align(vaddr), alloc_page(), MapAttr::PAGE_TABLE, missed_level)?;
continue;
}
return ret;
fn alloc_page() -> PhysAddr {
let addr = KERNEL_ALLOCATOR.lock().alloc(PAGE_LAYOUT).expect("Failed to allocate page");
unsafe {
// zero page
let vaddr = mmap_phys_to_virt(addr);
core::slice::from_raw_parts_mut(vaddr.as_mut_ptr::<u8>(), PAGE_SIZE).fill(core::mem::zeroed());
}
addr
}
#[inline]
fn map_range(pt: &mut Table, from: VirtAddr, to: PhysAddr, size: usize, attr: MapAttr) -> PageResult<()> {
fn map_range(pt: &mut Table<Level0>, from: VirtAddr, to: PhysAddr, size: usize, attr: MapAttr) {
let mut virt_start = from;
let mut phys_start = to;
let virt_end = from + size;
let mut map_level = Table::MAX_PAGE_SIZE;
debug!("Mapping physical memory:\t[{:X?}, {:X?}]", virt_start, virt_end,);
macro_rules! safe_map {
($pt:expr, $level_size:expr, $virt_start:expr, $virt_end:expr, $phys_start:expr, $attr:expr) => {
if $virt_start.is_aligned($level_size) && $virt_start + $level_size <= $virt_end {
match $pt.map($virt_start, $phys_start, $attr) {
Ok(()) => {
// map success, goto next region
virt_start += $level_size;
phys_start += $level_size;
continue;
},
Err(PageError::NotAligned) => panic!("level_fits check failed"),
Err(PageError::MissingEntry) => panic!("Err(MissingEntry) never happens in Table::map"),
Err(PageError::AlreadyMapped) => panic!("page already mapped"),
}
}
};
}
macro_rules! map_pagetable {
($pt:expr, $level:ty, $addr:expr) => {
unsafe {
Table::<$level>::new(mmap_phys_to_virt(
$pt.lookup_mut($addr).map(|e| e.paddr()).unwrap_or_else(|| {
let page = alloc_page();
let addr = $addr.align_down(<$level as TableLevel>::PREVIOUS::LEVEL_SIZE);
debug!("Creating new pagetable:\t{:?} -> {:?}", addr, page);
$pt.map(addr, page, MapAttr::PAGE_TABLE).unwrap();
page
}),
))
}
};
}
while virt_start < virt_end {
let ret = map_one(pt, virt_start, phys_start, attr, map_level);
safe_map!(pt, Level0::LEVEL_SIZE, virt_start, virt_end, phys_start, attr);
if ret.is_ok() {
// map success, move to next region
virt_start += map_level.level_size();
phys_start += map_level.level_size();
let mut l1_pt = map_pagetable!(pt, Level1, virt_start);
safe_map!(l1_pt, Level1::LEVEL_SIZE, virt_start, virt_end, phys_start, attr);
// check whether we could raise the level
if let Some(prv) = map_level.previous()
&& prv.is_aligned(phys_start)
{
map_level = prv;
}
continue;
}
let mut l2_pt = map_pagetable!(l1_pt, Level2, virt_start);
safe_map!(l2_pt, Level2::LEVEL_SIZE, virt_start, virt_end, phys_start, attr);
// already mapped, try smaller level
match map_level.next() {
Some(next) => map_level = next,
None => return ret,
panic!("Failed to map memory:\t[{:?}, {:?}]\n{:?}", virt_start, virt_end, pt);
}
}
Ok(())
}
pub unsafe fn map(from: VirtAddr, to: PhysAddr, size: usize, attr: MapAttr) {
let mut guard = KERNEL_PAGETABLE.lock();
let pt = guard.as_mut().unwrap();
map_range(pt, from, to, size, attr).expect("Failed to map memory");
map_range(pt, from, to, size, attr);
}
#[inline]
@ -135,28 +143,29 @@ pub fn is_kernel_pagetable_installed() -> bool {
KERNEL_PAGETABLE.lock().is_some()
}
pub fn page_table_mode() -> riscv::register::satp::Mode {
#[cfg(feature = "riscv.pagetable.sv39")]
return riscv::register::satp::Mode::Sv39;
#[cfg(feature = "riscv.pagetable.sv48")]
return riscv::register::satp::Mode::Sv48;
#[cfg(feature = "riscv.pagetable.sv57")]
return riscv::register::satp::Mode::Sv57;
}
pub unsafe fn setup_kernel_paging() {
info!("Setting up kernel paging");
assert!(!is_kernel_pagetable_installed(), "Kernel pagetable already installed");
let root_pt = alloc_page();
let kernel_pt = Table::new(root_pt.as_usize().into());
let mut kernel_pt = Table::<Level0>::new(root_pt.as_usize().into());
macro_rules! map_section {
(($($section:ident),+),$attr:expr) => {
$(
let start = concat_idents!($section, _START).as_virt_addr();
let end = concat_idents!($section, _END).as_virt_addr();
debug!("Mapping section {}:\t[{:X?}, {:X?}]", stringify!($section), start, end);
for addr in (start..end).step_by(PAGE_SIZE) {
let _ = map_one(kernel_pt, addr, kernel_virt_to_phys(addr), $attr, TableLevel::Level0).is_err_and(|_| {
panic!(
"Failed to map section: {:X?} - {:X?}",
concat_idents!($section, _START).as_virt_addr(),
concat_idents!($section, _END).as_virt_addr()
);
});
}
let start = concat_idents!($section, _START).as_virt_addr().align_down(PAGE_SIZE);
let end = concat_idents!($section, _END).as_virt_addr().align_up(PAGE_SIZE);
debug!("Mapping section {}:\t[{:?}, {:?}]", stringify!($section), start, end);
map_range(&mut kernel_pt, start, kernel_virt_to_phys(start), (end - start).as_usize(), $attr);
)+
};
}
@ -171,19 +180,18 @@ pub unsafe fn setup_kernel_paging() {
// map 4 GiB physical memory
// TODO: walk fdt to get all memory region? put it in driver init
map_range(
kernel_pt,
&mut kernel_pt,
mmap_phys_to_virt(PhysAddr(0)),
PhysAddr(0),
3 * GIB - 1 + GIB,
4 * GIB, // NOTE: will fail on 32-bit system
MapAttr::READABLE | MapAttr::WRITABLE,
)
.expect("Failed to map physical memory");
);
// setup new pagetable
debug!("Setting up new kernel pagetable");
riscv::register::satp::set(Table::mode(), 0, root_pt.to_ppn());
riscv::register::satp::set(page_table_mode(), 0, root_pt.extract_ppn());
riscv::asm::sfence_vma_all();
// switch to virtual address
*KERNEL_PAGETABLE.lock() = Some(Table::new(mmap_phys_to_virt(root_pt)));
*KERNEL_PAGETABLE.lock() = Some(kernel_pt);
}

View File

@ -4,7 +4,7 @@ use crate::drivers::Driver;
use crate::plat::console::ConsoleDevice;
use core::sync::atomic::{AtomicPtr, Ordering};
use fdt::node::FdtNode;
use vspace::addr::PhysAddr;
use utils::addr::PhysAddr;
// https://static.dev.sifive.com/FU540-C000-v1.0.pdf

View File

@ -4,7 +4,7 @@ use crate::drivers::Driver;
use crate::plat::console::ConsoleDevice;
use fdt::node::FdtNode;
use uart_16550::MmioSerialPort;
use vspace::addr::PhysAddr;
use utils::addr::PhysAddr;
pub struct Uart16550
where Uart16550: SerialDriver

View File

@ -25,9 +25,10 @@ mod drivers;
mod entry;
mod lang;
mod logging;
mod objects;
// mod objects;
mod plat;
mod scheduler;
mod vspace;
// test infrastructure
#[cfg(test)]

View File

@ -1,11 +1,11 @@
use crate::objects::null::NullCap;
use core::cell::Cell;
use uapi::cap::ObjectType;
use utils::addr::PhysAddr;
use utils::{
linked_list::{Link, LinkHelper},
LinkHelperImpl,
};
use vspace::addr::PhysAddr;
/// RawCap is the specific implementation of capability which stores in CNode
#[derive(Copy, Clone, Default, PartialEq, Eq)]

View File

@ -4,8 +4,8 @@ use super::{
};
use crate::arch::layout::mmap_phys_to_virt;
use uapi::{cap::ObjectType, error::CapFault};
use utils::addr::{AddressOps, PhysAddr};
use utils::MASK;
use vspace::addr::{AddressOps, PhysAddr};
/// CNodeObject is a array of Capabilities (`RawCap`)
/// The size of the array is stored in CNodeCap

View File

@ -1,7 +1,7 @@
use super::cap::RawCap;
use super::{Cap, KernelObject};
use uapi::cap::ObjectType;
use vspace::addr::PhysAddr;
use utils::addr::PhysAddr;
/// NullObject is used as empty (capability) slot
pub struct NullObject {}

View File

@ -4,9 +4,9 @@ use super::null::NullCap;
use super::{Cap, KernelObject};
use uapi::cap::ObjectType;
use uapi::error::{SysError, SysResult};
use utils::addr::{align_up, PhysAddr};
use utils::then::Then;
use utils::MASK;
use vspace::addr::{align_up, PhysAddr};
/// UntypedObject is used as raw memory (associated with PhysAddr)
/// It can be further retyped to other objects (TCB, CNode ...)

View File

@ -1,5 +1,5 @@
use log::error;
use vspace::addr::VirtAddr;
use utils::addr::VirtAddr;
#[derive(Clone, Copy)]
pub struct FrameWalker {

38
kernel/src/vspace/addr.rs Normal file
View File

@ -0,0 +1,38 @@
/*
/// Extracts the Physical Page Number (PPN) from a given Page Table Entry (PTE).
fn pte_extract_ppn(pte: usize) -> usize;
/// Merges the Physical Page Number (PPN) into a given Page Table Entry (PTE).
/// Returns the new PTE.
fn merge_pte_ppn(addr: PhysAddr, pte: usize) -> usize;
*/
use super::TableLevel;
use utils::addr::{AddressOps, PhysAddr, VirtAddr};
pub trait PhysAddrPage: AddressOps {
/// Extracts the Physical Page Number (PPN).
fn extract_ppn(&self) -> usize;
/// Almost the same as `paddr_extract_ppn`, but the PPN remains shifted in the address.
fn extract_ppn_shifted(&self) -> usize;
/// Converts a PTE into a Physical Address.
fn from_pte(pte: usize) -> Self;
/// Merges the Physical Page Number (PPN) into a given Page Table Entry (PTE).
fn update_pte(&self, pte: usize) -> usize;
}
pub trait VirtAddrPage: AddressOps {
/// Extracts the Virtual Page Number (VPN).
fn extract_vpn<T: TableLevel>(&self) -> usize;
/// Merges current address into the Virtual Page Number (VPN).
fn merge_vpn<T: TableLevel>(&self, vpn: usize) -> usize;
}
assert_impl_all!(PhysAddr: PhysAddrPage);
assert_impl_all!(VirtAddr: VirtAddrPage);

View File

@ -1,6 +1,6 @@
use crate::addr::PhysAddr;
use bitflags::bitflags;
use core::fmt::Debug;
use utils::addr::{PhysAddr, VirtAddr};
bitflags! {
#[derive(Debug, Copy, Clone)]
@ -17,10 +17,11 @@ pub trait EntryOps: Clone + Copy + Debug {
fn new_page(phys_addr: PhysAddr, attr: MapAttr) -> Self;
fn new_table(phys_addr: PhysAddr) -> Self;
fn addr(&self) -> PhysAddr;
fn paddr(&self) -> PhysAddr;
fn vaddr(&self) -> VirtAddr;
fn attr(&self) -> MapAttr;
fn set_addr(&mut self, addr: PhysAddr);
fn set_paddr(&mut self, addr: PhysAddr);
fn set_attr(&mut self, attr: MapAttr);
fn is_valid(&self) -> bool;

View File

@ -0,0 +1,47 @@
use super::EntryOps;
pub struct Level4
where Self: TableLevel;
pub struct Level3
where Self: TableLevel;
pub struct Level2
where Self: TableLevel;
pub struct Level1
where Self: TableLevel;
pub struct Level0
where Self: TableLevel;
pub struct LevelInvalid
where Self: TableLevel;
/// `TableLevel` describes a specific level of a page table.
pub trait TableLevel {
/// `LEVEL` represents the level of the page table.
/// Level 0 is the largest page.
const LEVEL: usize;
/// `LEVEL_SIZE` represents the maximum address that this level of the page table can map (assume Huge Page).
const LEVEL_SIZE: usize;
/// `LEVEL_BITS` represents the vaddr bits that this page table level can map.
const LEVEL_BITS: usize;
/// `ENTRIES` represents the number of entries in this level of the page table.
const ENTRIES: usize;
/// `Entry` represents the type of this level's page table entry.
type Entry: EntryOps;
/// `NEXT` represents the next level (smaller) of the page table.
type NEXT: TableLevel;
/// `PREVIOUS` represents the previous level (larger) of the page table.
type PREVIOUS: TableLevel;
/// `DUMMY` represents that current level is invalid.
const DUMMY: bool = false;
}

View File

@ -1,5 +1,9 @@
mod addr;
mod entry;
mod level;
mod table;
pub use addr::*;
pub use entry::*;
pub use level::*;
pub use table::*;

View File

@ -0,0 +1,34 @@
use super::{MapAttr, TableLevel};
use core::fmt::Debug;
use utils::addr::{PhysAddr, VirtAddr};
#[derive(Debug)]
pub enum PageError {
AlreadyMapped,
MissingEntry,
NotAligned,
}
pub type PageResult<T = ()> = Result<T, PageError>;
pub trait TableOps<'a, T: TableLevel> {
/// # Safety
/// `location` must be a page-aligned virtual address and will not be dropped.
unsafe fn new(location: VirtAddr) -> Self;
// following methods only works at current level
fn map(&mut self, from: VirtAddr, to: PhysAddr, attr: MapAttr) -> PageResult;
fn unmap(&mut self, vaddr: VirtAddr) -> PageResult;
fn lookup(&mut self, vaddr: VirtAddr) -> Option<&T::Entry>;
fn lookup_mut(&mut self, vaddr: VirtAddr) -> Option<&mut T::Entry>;
fn retype<V: TableLevel>(&mut self) -> Table<'a, V>;
}
pub struct Table<'a, T: TableLevel>
where Self: TableOps<'a, T>
{
pub entries: &'a mut [T::Entry],
}

View File

@ -1,10 +1,9 @@
[package]
name = "allocator"
version = "0.1.0"
version = "0.1.1"
edition = "2021"
[dependencies]
vspace = { path = "../vspace" }
utils = { path = "../utils" }
spin = "0.9"

View File

@ -1,7 +1,7 @@
use core::alloc::{GlobalAlloc, Layout};
use core::ptr::null_mut;
use spin::Mutex;
use vspace::addr::{AddressOps, PhysAddr};
use utils::addr::{AddressOps, PhysAddr};
pub trait BitmapCfg: Copy + Clone {
const CAPACITY: usize;

View File

@ -1,6 +1,6 @@
use core::alloc::Layout;
use core::cmp::min;
use vspace::addr::{AddressOps, PhysAddr};
use utils::addr::{AddressOps, PhysAddr};
#[derive(Copy, Clone, Debug)]
struct Block {

View File

@ -3,8 +3,8 @@
use core::alloc::{GlobalAlloc, Layout};
use core::fmt::Debug;
use spin::Mutex;
use utils::addr::{AddressOps, PhysAddr};
use utils::then::Then;
use vspace::addr::{AddressOps, PhysAddr};
struct ListNode {
size: usize,

View File

@ -1,11 +1,8 @@
[package]
name = "utils"
version = "0.1.0"
version = "0.1.1"
edition = "2021"
[features]
default = []
legacy = []
[dependencies]
vspace = { path = "../vspace" }

View File

@ -1,4 +1,4 @@
use vspace::addr::VirtAddr;
use crate::addr::VirtAddr;
extern "C" {
pub type ExternSymbol;

View File

@ -1,6 +1,8 @@
#![no_std]
#![feature(extern_types)]
#![feature(step_trait)]
pub mod addr;
pub mod assert;
pub mod atomic;
pub mod bin;

View File

@ -1,13 +0,0 @@
[package]
name = "vspace"
version = "0.1.0"
edition = "2021"
[features]
default = []
legacy = []
[dependencies]
bitflags = "2.4"
num-derive = "0.4"
num-traits = { version = "0.2", default-features = false }

View File

@ -1,9 +0,0 @@
#![cfg_attr(not(test), no_std)]
#![feature(const_mut_refs)]
#![feature(step_trait)]
#[macro_use]
extern crate num_derive;
pub mod addr;
pub mod paging;

View File

@ -1,60 +0,0 @@
use super::{EntryOps, MapAttr};
use crate::addr::{PhysAddr, VirtAddr};
use core::fmt::Debug;
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, FromPrimitive, ToPrimitive)]
pub enum TableLevel {
Level0 = 0, // KiloPage
Level1 = 1, // MegaPage
Level2 = 2, // GigaPage
Level3 = 3, // TeraPage
Level4 = 4, // PetaPage
}
impl TableLevel {
pub fn next(&self) -> Option<Self> {
match self {
Self::Level0 => None,
Self::Level1 => Some(Self::Level0),
Self::Level2 => Some(Self::Level1),
Self::Level3 => Some(Self::Level2),
Self::Level4 => Some(Self::Level3),
}
}
pub fn previous(&self) -> Option<Self> {
match self {
Self::Level0 => Some(Self::Level1),
Self::Level1 => Some(Self::Level2),
Self::Level2 => Some(Self::Level3),
Self::Level3 => Some(Self::Level4),
Self::Level4 => None,
}
}
}
#[derive(Debug)]
pub enum PageError {
AlreadyMapped(TableLevel),
MissingEntry(TableLevel),
}
pub type PageResult<T = ()> = Result<T, PageError>;
pub trait TableOps: Debug {
type Entry: EntryOps;
const MAX_PAGE_SIZE: TableLevel;
const TABLE_SIZE: usize;
/// # Safety
/// `location` must be a page-aligned virtual address and will not be dropped.
unsafe fn new(location: VirtAddr) -> &'static mut Self;
fn map(&mut self, from: VirtAddr, to: PhysAddr, attr: MapAttr, level: TableLevel) -> PageResult;
fn unmap(&mut self, vaddr: VirtAddr) -> PageResult;
fn lookup(&mut self, vaddr: VirtAddr) -> Option<&Self::Entry>;
fn lookup_mut(&mut self, vaddr: VirtAddr) -> Option<&mut Self::Entry>;
fn translate(&mut self, vaddr: VirtAddr) -> Option<PhysAddr>;
}

View File

@ -11,6 +11,5 @@ condense_wildcard_suffixes = true
format_macro_matchers = true
match_arm_leading_pipes = "Preserve"
match_block_trailing_comma = true
reorder_impl_items = true
use_try_shorthand = true
where_single_line = true

View File

@ -4,6 +4,5 @@ version = "0.1.0"
edition = "2021"
[dependencies]
vspace = { path = "../lib/vspace" }
num-traits = { version = "0.2", default-features = false }
num-derive = "0.4"

View File

@ -1,5 +1,3 @@
pub use vspace::paging::MapAttr;
// TODO: Only support leaf page for now, no huge page support!
pub const FRAME_SIZE: usize = 4096;