feat: add initial vspace support

This commit is contained in:
Paul Pan 2024-03-26 00:05:55 +08:00
parent 77d02a128b
commit c899797fa0
13 changed files with 475 additions and 11 deletions

3
kernel/Cargo.lock generated
View File

@ -239,6 +239,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "vspace"
version = "0.1.0"
dependencies = [
"bitflags 2.5.0",
]
[[package]]
name = "x86"

View File

@ -8,7 +8,8 @@ edition = "2021"
[features]
default = ["arch_riscv64", "board_virt", "log_color"]
arch_riscv64 = []
arch_riscv64 = ["vspace/riscv_sv39"]
# TODO: riscv32 not supported yet
arch_riscv32 = []
board_default = []
@ -25,17 +26,17 @@ lto = "thin"
[dependencies]
api = { path = "../api" }
vspace = { path = "../lib/vspace" }
vspace = { path = "../lib/vspace", default-features = false }
bitflags = "2.4.2"
cfg-if = "1.0.0"
bitflags = "2.4"
cfg-if = "1.0"
fdt = "0.1"
lazy_static = { version = "1.4.0", features = ["spin_no_std"] }
lazy_static = { version = "1.4", features = ["spin_no_std"] }
log = "0.4"
num-derive = "0.4"
num-traits = { version = "0.2", default-features = false }
riscv = { version = "0.11.1", features = ["s-mode"] }
sbi-rt = { version = "0.0.3" }
spin = "0.9.8"
static_assertions = "1.1.0"
riscv = { version = "0.11", features = ["s-mode"] }
sbi-rt = { version = "0.0" }
spin = "0.9"
static_assertions = "1.1"
uart_16550 = "0.3"

9
lib/vspace/Cargo.lock generated
View File

@ -2,6 +2,15 @@
# It is not intended for manual editing.
version = 3
[[package]]
name = "bitflags"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
[[package]]
name = "vspace"
version = "0.1.0"
dependencies = [
"bitflags",
]

View File

@ -3,4 +3,11 @@ name = "vspace"
version = "0.1.0"
edition = "2021"
[features]
default = ["arch_riscv", "riscv_sv39"]
arch_riscv = []
riscv_sv39 = ["arch_riscv"]
[dependencies]
bitflags = "2.4"

View File

@ -13,10 +13,10 @@ pub fn align_down(addr: usize, align: usize) -> usize {
addr & !(align - 1)
}
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Default, PartialOrd, PartialEq)]
pub struct PhysAddr(pub usize);
#[derive(Copy, Clone, PartialOrd, PartialEq)]
#[derive(Copy, Clone, Default, PartialOrd, PartialEq)]
pub struct VirtAddr(pub usize);
pub trait AddressOps {

View File

@ -1,3 +1,4 @@
#![no_std]
pub mod addr;
pub mod paging;

View File

@ -0,0 +1,7 @@
pub use arch::*;
// Arch Level
#[cfg(feature = "arch_riscv")]
#[path = "riscv/mod.rs"]
#[allow(clippy::module_inception)]
mod arch;

View File

@ -0,0 +1,158 @@
use crate::addr::{AddressOps, PhysAddr};
use crate::paging::{MapAttr, PageTableEntryOps};
use bitflags::bitflags;
bitflags! {
#[derive(Debug)]
pub struct PTEFlags : u64 {
const VALID = 1 << 0;
const READABLE = 1 << 1;
const WRITABLE = 1 << 2;
const EXECUTABLE = 1 << 3;
const USER_ACCESSIBLE = 1 << 4;
const GLOBAL = 1 << 5;
const ACCESSED = 1 << 6;
const DIRTY = 1 << 7;
const RSW = 1 << 8 | 1 << 9;
}
}
impl From<PTEFlags> for MapAttr {
fn from(flags: PTEFlags) -> Self {
let mut attr = Self::empty();
if flags.contains(PTEFlags::READABLE) {
attr.insert(Self::READABLE);
}
if flags.contains(PTEFlags::WRITABLE) {
attr.insert(Self::WRITABLE);
}
if flags.contains(PTEFlags::EXECUTABLE) {
attr.insert(Self::EXECUTABLE);
}
if flags.contains(PTEFlags::USER_ACCESSIBLE) {
attr.insert(Self::USER_ACCESSIBLE);
}
if flags.contains(PTEFlags::VALID)
&& !flags.contains(PTEFlags::READABLE | PTEFlags::WRITABLE | PTEFlags::EXECUTABLE)
{
attr.insert(Self::PAGE_TABLE);
}
attr
}
}
impl From<MapAttr> for PTEFlags {
fn from(attr: MapAttr) -> Self {
if attr.is_empty() {
return Self::empty();
}
let mut flags = Self::VALID;
if attr.contains(MapAttr::USER_ACCESSIBLE) {
flags.insert(Self::USER_ACCESSIBLE);
}
if attr.contains(MapAttr::PAGE_TABLE) {
// PAGE_TABLE <=> RWX.empty()
return flags;
}
if attr.contains(MapAttr::READABLE) {
flags.insert(Self::READABLE);
}
if attr.contains(MapAttr::WRITABLE) {
flags.insert(Self::WRITABLE);
}
if attr.contains(MapAttr::EXECUTABLE) {
flags.insert(Self::EXECUTABLE);
}
flags
}
}
#[cfg(feature = "riscv_sv39")]
impl PhysAddr {
const PA_PPN_MASK: u64 = ((1 << Self::PPN_BITS) - 1) << Self::PG_OFFSET;
const PG_OFFSET: u64 = 12;
const PPN_BITS: u64 = 44;
const PPN_OFFSET: u64 = 10;
const PTE_PPN_MASK: u64 = ((1 << Self::PPN_BITS) - 1) << Self::PPN_OFFSET;
}
impl PhysAddr {
fn to_ppn_shifted(self) -> u64 {
((self.as_u64() & Self::PA_PPN_MASK) >> Self::PG_OFFSET) << Self::PPN_OFFSET
}
fn from_pte(pte: u64) -> Self {
let ppn = (pte & Self::PTE_PPN_MASK) >> Self::PPN_OFFSET;
let paddr = ppn << Self::PG_OFFSET;
PhysAddr::from(paddr as usize)
}
fn merge_pte(self, pte: u64) -> u64 {
let ppn = self.to_ppn_shifted();
(pte & !Self::PTE_PPN_MASK) | ppn
}
}
#[derive(Clone, Copy, Default)]
pub struct PageTableEntry(u64);
impl PageTableEntryOps for PageTableEntry {
fn new_page(paddr: PhysAddr, attr: MapAttr) -> Self {
let flags = PTEFlags::from(attr);
let ppn = paddr.to_ppn_shifted();
Self(ppn | flags.bits())
}
fn new_table(paddr: PhysAddr) -> Self {
let flags = PTEFlags::VALID;
let ppn = paddr.to_ppn_shifted();
Self(ppn | flags.bits())
}
fn addr(&self) -> PhysAddr {
PhysAddr::from_pte(self.0)
}
fn attr(&self) -> MapAttr {
let flags = PTEFlags::from_bits_truncate(self.0);
flags.into()
}
fn set_addr(&mut self, addr: PhysAddr) {
self.0 = addr.merge_pte(self.0);
}
fn set_attr(&mut self, attr: MapAttr) {
let flags = PTEFlags::from(attr);
self.0 = (self.0 & !PTEFlags::all().bits()) | flags.bits();
}
fn is_valid(&self) -> bool {
self.0 & PTEFlags::VALID.bits() != 0
}
fn is_leaf(&self) -> bool {
let flags = PTEFlags::from_bits_truncate(self.0);
let valid = flags.contains(PTEFlags::VALID);
let rwx = flags & (PTEFlags::READABLE | PTEFlags::WRITABLE | PTEFlags::EXECUTABLE);
valid && rwx.bits() != 0
}
}
impl core::fmt::Debug for PageTableEntry {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct("PageTableEntry")
.field("addr", &self.addr())
.field("flag", &PTEFlags::from_bits_truncate(self.0))
.finish()
}
}

View File

@ -0,0 +1,5 @@
mod entry;
mod table;
pub use entry::PageTableEntry;
pub use table::PageTable;

View File

@ -0,0 +1,175 @@
use crate::addr::{AddressOps, PhysAddr, VirtAddr};
use crate::paging::{
MapAttr, PageError, PageResult, PageSize, PageTableEntry, PageTableEntryOps, PageTableOps,
};
const PAGE_SIZE: usize = 4096;
#[cfg(feature = "riscv_sv39")]
impl VirtAddr {
const PG_OFFSET: usize = 12;
const VPN_BITS: usize = 9;
const VPN_MASK: usize = (1 << Self::VPN_BITS) - 1;
}
impl VirtAddr {
fn to_vpn(self, size: PageSize) -> usize {
let level = size.to_level();
assert!(level <= 3, "invalid level: {}", level);
self.0 >> (Self::PG_OFFSET + Self::VPN_BITS * level) & Self::VPN_MASK
}
fn merge_vpn(&self, vpn: usize, size: PageSize) -> Self {
let shift = Self::PG_OFFSET + Self::VPN_BITS * size.to_level();
let mask = Self::VPN_MASK << shift;
VirtAddr((self.0 & !mask) | ((vpn & Self::VPN_MASK) << shift))
}
fn lower_bits(self, level: usize) -> usize {
self.0 & ((1 << (Self::PG_OFFSET + Self::VPN_BITS * (level + 1))) - 1)
}
}
#[repr(C, align(4096))]
pub struct PageTable {
// Assume at least SV39 paging
entries: [PageTableEntry; 512],
}
impl PageTable {
fn lookup_mut_internal(&mut self, vaddr: VirtAddr) -> (&mut PageTableEntry, PageSize) {
// NOTE: we assume that this page table is the root page table
let mut cur = Self::MAX_PAGE_SIZE;
let mut table = self;
loop {
let vpn = vaddr.to_vpn(cur);
let entry = table.entries[vpn];
if !entry.is_valid() || entry.is_leaf() || cur.next().is_none() {
return (&mut table.entries[vpn], cur);
}
cur = cur.next().unwrap();
table = unsafe {
// NOTE: we assume that kernel space is 1:1 mapped
Self::from_va(entry.addr().as_usize().into())
};
}
}
}
impl PageTableOps for PageTable {
type Entry = PageTableEntry;
#[cfg(feature = "riscv_sv39")]
const MAX_PAGE_SIZE: PageSize = PageSize::Giga;
unsafe fn from_va(from: VirtAddr) -> &'static mut Self {
let ptr: *mut Self = from.into();
&mut *ptr
}
fn map(&mut self, from: VirtAddr, to: PhysAddr, attr: MapAttr, size: PageSize) -> PageResult {
assert!(from.is_aligned(PAGE_SIZE));
assert!(to.is_aligned(PAGE_SIZE));
assert!(size.is_aligned(from.as_usize()));
if !attr.contains(MapAttr::PAGE_TABLE) {
assert!(size.is_aligned(to.as_usize()));
}
let (entry, cur) = self.lookup_mut_internal(from);
if cur.to_level() < size.to_level() {
return Err(PageError::MissingEntry);
}
if entry.is_valid() || cur.to_level() > size.to_level() {
return Err(PageError::AlreadyMapped);
}
entry.set_addr(to);
entry.set_attr(attr);
Ok(())
}
fn unmap(&mut self, vaddr: VirtAddr) -> PageResult {
let (entry, _) = self.lookup_mut_internal(vaddr);
if !entry.is_valid() {
return Err(PageError::MissingEntry);
}
entry.set_addr(PhysAddr::default());
entry.set_attr(MapAttr::empty());
Ok(())
}
fn lookup(&mut self, vaddr: VirtAddr) -> Option<Self::Entry> {
let (entry, _) = self.lookup_mut_internal(vaddr);
entry.is_valid().then_some(*entry)
}
fn lookup_mut(&mut self, vaddr: VirtAddr) -> Option<&mut Self::Entry> {
let (entry, _) = self.lookup_mut_internal(vaddr);
entry.is_valid().then_some(entry)
}
fn translate(&mut self, vaddr: VirtAddr) -> Option<PhysAddr> {
let (entry, size) = self.lookup_mut_internal(vaddr);
entry
.is_valid()
.then_some(entry.addr())
.map(|p| p.as_usize() | vaddr.lower_bits(size.to_level()))
.map(|p| p.into())
}
}
impl PageTable {
fn debug_walk(
&self,
f: &mut core::fmt::Formatter,
base: VirtAddr,
size: PageSize,
) -> core::fmt::Result {
macro_rules! w {
($($arg:tt)*) => {
for _ in size.to_level()..Self::MAX_PAGE_SIZE.to_level() {
write!(f, "\t")?;
}
writeln!(f, $($arg)*)?;
};
}
for (i, entry) in self.entries.into_iter().enumerate() {
if !entry.is_valid() {
continue;
}
w!(
"[{:>3}]: {:?} -> {:?} : {:?}",
i,
base.merge_vpn(i, size),
entry.addr(),
entry
);
if !entry.is_leaf() && size.next().is_some() {
let table = unsafe { Self::from_va(entry.addr().as_usize().into()) };
table.debug_walk(f, base.merge_vpn(i, size), size.next().unwrap())?;
}
}
Ok(())
}
}
impl core::fmt::Debug for PageTable {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
writeln!(f, "PageTable({:p}):", self)?;
self.debug_walk(f, VirtAddr(0), Self::MAX_PAGE_SIZE)
}
}

View File

@ -0,0 +1,28 @@
use crate::addr::PhysAddr;
use bitflags::bitflags;
use core::fmt::Debug;
bitflags! {
#[derive(Debug, Copy, Clone)]
pub struct MapAttr: usize {
const PAGE_TABLE = 1 << 0;
const READABLE = 1 << 1;
const WRITABLE = 1 << 2;
const EXECUTABLE = 1 << 3;
const USER_ACCESSIBLE = 1 << 4;
}
}
pub trait PageTableEntryOps: Clone + Copy + Debug {
fn new_page(paddr: PhysAddr, attr: MapAttr) -> Self;
fn new_table(paddr: PhysAddr) -> Self;
fn addr(&self) -> PhysAddr;
fn attr(&self) -> MapAttr;
fn set_addr(&mut self, addr: PhysAddr);
fn set_attr(&mut self, attr: MapAttr);
fn is_valid(&self) -> bool;
fn is_leaf(&self) -> bool;
}

View File

@ -0,0 +1,7 @@
mod arch;
mod entry;
mod table;
pub use arch::*;
pub use entry::*;
pub use table::*;

View File

@ -0,0 +1,63 @@
use super::{MapAttr, PageTableEntryOps};
use crate::addr::{PhysAddr, VirtAddr};
use core::fmt::Debug;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PageSize {
Kilo,
Mega,
Giga,
Tera,
}
impl PageSize {
pub fn is_aligned(&self, addr: usize) -> bool {
match self {
Self::Kilo => addr % (4 * 1024) == 0,
Self::Mega => addr % (2 * 1024 * 1024) == 0,
Self::Giga => addr % (1 * 1024 * 1024 * 1024) == 0,
Self::Tera => addr % (512 * 1024 * 1024 * 1024 * 1024) == 0,
}
}
pub fn to_level(&self) -> usize {
match self {
Self::Kilo => 0,
Self::Mega => 1,
Self::Giga => 2,
Self::Tera => 3,
}
}
pub fn next(&self) -> Option<Self> {
match self {
Self::Kilo => None,
Self::Mega => Some(Self::Kilo),
Self::Giga => Some(Self::Mega),
Self::Tera => Some(Self::Giga),
}
}
}
#[derive(Debug)]
pub enum PageError {
AlreadyMapped,
MissingEntry,
}
pub type PageResult<T = ()> = Result<T, PageError>;
pub trait PageTableOps: Debug {
type Entry: PageTableEntryOps;
const MAX_PAGE_SIZE: PageSize;
unsafe fn from_va(from: VirtAddr) -> &'static mut Self;
fn map(&mut self, from: VirtAddr, to: PhysAddr, attr: MapAttr, size: PageSize) -> PageResult;
fn unmap(&mut self, vaddr: VirtAddr) -> PageResult;
fn lookup(&mut self, vaddr: VirtAddr) -> Option<Self::Entry>;
fn lookup_mut(&mut self, vaddr: VirtAddr) -> Option<&mut Self::Entry>;
fn translate(&mut self, vaddr: VirtAddr) -> Option<PhysAddr>;
}