feat: objects: adapt to new vspace

This commit is contained in:
Paul Pan 2024-05-19 17:07:29 +08:00
parent 36b061d9a1
commit 7d5875cb0c
3 changed files with 31 additions and 51 deletions

View File

@ -25,7 +25,7 @@ mod drivers;
mod entry; mod entry;
mod lang; mod lang;
mod logging; mod logging;
// mod objects; mod objects;
mod plat; mod plat;
mod scheduler; mod scheduler;
mod vspace; mod vspace;

View File

@ -1,20 +1,10 @@
use super::{cap::RawCap, Cap, KernelObject}; use super::{cap::RawCap, Cap, KernelObject};
use crate::{ use crate::{arch::layout::mmap_phys_to_virt, objects::cap::CapEntry, vspace::*};
arch::{
layout::mmap_phys_to_virt,
vspace::{Table, TableLevelSize},
},
objects::cap::CapEntry,
};
use uapi::{ use uapi::{
cap::ObjectType, cap::ObjectType,
error::{SysError, SysResult}, error::{SysError, SysResult},
}; };
use utils::MASK; use utils::{addr::*, MASK};
use vspace::{
addr::{AddressOps, PhysAddr, VirtAddr},
paging::{EntryOps, MapAttr, PageError, TableLevel, TableOps},
};
/// FrameObject refers to a region of physical memory, aka leaf PTE in pagetable /// FrameObject refers to a region of physical memory, aka leaf PTE in pagetable
pub type FrameObject = [u8]; pub type FrameObject = [u8];
@ -57,12 +47,11 @@ impl<'a> FrameCap<'a> {
const VM_RIGHT_MASK: usize = MASK!(Self::VM_RIGHT_BITS); const VM_RIGHT_MASK: usize = MASK!(Self::VM_RIGHT_BITS);
const VM_RIGHT_OFFSET: usize = 0; const VM_RIGHT_OFFSET: usize = 0;
pub fn mint(ptr: PhysAddr, size: TableLevel, attr: MapAttr, is_device: bool) -> RawCap { pub fn mint(ptr: PhysAddr, size: usize, attr: MapAttr, is_device: bool) -> RawCap {
let size_bits = size.level_size().ilog2() as usize; let size_bits = size.ilog2() as usize;
debug_assert!(size_bits <= FrameCap::FRAME_SIZE_BITS); debug_assert!(size_bits <= FrameCap::FRAME_SIZE_BITS);
// TODO: support huge page // NOTE: we are not checking frame size
assert!(size == TableLevel::Level0, "only support leaf page for now");
let arg0 = 0 let arg0 = 0
| ((attr.bits() & Self::VM_RIGHT_MASK) << Self::VM_RIGHT_OFFSET) | ((attr.bits() & Self::VM_RIGHT_MASK) << Self::VM_RIGHT_OFFSET)
@ -137,15 +126,14 @@ impl<'a> FrameCap<'a> {
self.as_object_mut().fill(fill.unwrap_or(0)); self.as_object_mut().fill(fill.unwrap_or(0));
} }
pub fn map_page(&self, root: &mut Table, vaddr: VirtAddr, attr: MapAttr) -> SysResult { pub fn map_page<T: TableLevel>(&self, root: &mut Table<T>, vaddr: VirtAddr, attr: MapAttr) -> SysResult {
let masked_attr = attr & self.attr(); let masked_attr = attr & self.attr();
// TODO: support huge page root.map(vaddr, self.cte.cap.get().ptr, masked_attr).map_err(|e| match e {
root.map(vaddr, self.cte.cap.get().ptr, masked_attr, TableLevel::Level0) PageError::AlreadyMapped => SysError::AlreadyMapped,
.map_err(|e| match e { PageError::MissingEntry => SysError::MissingEntry,
PageError::AlreadyMapped(_) => SysError::AlreadyMapped, PageError::NotAligned => SysError::InvalidArgument,
PageError::MissingEntry(_) => SysError::MissingEntry, })?;
})?;
self.set_mapped_asid(0); self.set_mapped_asid(0);
self.set_mapped_vaddr(vaddr); self.set_mapped_vaddr(vaddr);
@ -153,15 +141,15 @@ impl<'a> FrameCap<'a> {
Ok(()) Ok(())
} }
pub fn unmap(&self, root: &mut Table) -> SysResult { pub fn unmap<T: TableLevel>(&self, root: &mut Table<T>) -> SysResult {
let vaddr = self.mapped_vaddr(); let vaddr = self.mapped_vaddr();
if vaddr.0 == 0 { if vaddr.0 == 0 {
return Err(SysError::NotMapped); return Err(SysError::NotMapped);
} }
match root.lookup_mut(self.mapped_vaddr()) { match root.lookup_mut(self.mapped_vaddr()) {
Some(entry) if entry.is_leaf() && entry.addr() == self.cte.cap.get().ptr => { Some(entry) if entry.is_leaf() && entry.paddr() == self.cte.cap.get().ptr => {
entry.set_addr(PhysAddr::default()); entry.set_paddr(PhysAddr::default());
entry.set_attr(MapAttr::empty()); entry.set_attr(MapAttr::empty());
self.set_mapped_asid(0); self.set_mapped_asid(0);

View File

@ -1,19 +1,13 @@
use crate::arch::{layout::mmap_phys_to_virt, vspace::Table};
use super::{cap::RawCap, Cap, KernelObject}; use super::{cap::RawCap, Cap, KernelObject};
use crate::{arch::layout::mmap_phys_to_virt, vspace::*};
use uapi::{ use uapi::{
cap::ObjectType, cap::ObjectType,
error::{SysError, SysResult}, error::{SysError, SysResult},
vspace::MapAttr,
};
use utils::MASK;
use vspace::{
addr::{AddressOps, PhysAddr, VirtAddr},
paging::{EntryOps, PageError, TableLevel, TableOps},
}; };
use utils::{addr::*, MASK};
/// TableObject is an object that represents a page table /// TableObject is an object that represents a page table
pub type TableObject = Table; pub struct TableObject([usize]);
impl KernelObject for TableObject { impl KernelObject for TableObject {
const OBJ_TYPE: ObjectType = ObjectType::PageTable; const OBJ_TYPE: ObjectType = ObjectType::PageTable;
} }
@ -87,33 +81,31 @@ impl<'a> TableCap<'a> {
}); });
} }
pub fn as_object(&self) -> &TableObject { pub fn as_object<T: TableLevel>(&self) -> Table<T> {
unsafe { unsafe {
let virt = mmap_phys_to_virt(self.cte.cap.get().ptr); let virt = mmap_phys_to_virt(self.cte.cap.get().ptr);
Table::new(virt) Table::new(virt)
} }
} }
pub fn as_object_mut(&mut self) -> &mut TableObject { pub fn as_object_mut<T: TableLevel>(&mut self) -> Table<T> {
unsafe { self.as_object()
let virt = mmap_phys_to_virt(self.cte.cap.get().ptr);
Table::new(virt)
}
} }
pub fn clear(&self) { pub fn clear<T: TableLevel>(&self) {
let array: &mut [u8] = unsafe { let array: &mut [u8] = unsafe {
let virt = mmap_phys_to_virt(self.cte.cap.get().ptr); let virt = mmap_phys_to_virt(self.cte.cap.get().ptr);
core::slice::from_raw_parts_mut(virt.as_mut_ptr(), Table::TABLE_SIZE) core::slice::from_raw_parts_mut(virt.as_mut_ptr(), T::ENTRIES * core::mem::size_of::<T::Entry>())
}; };
array.fill(0); array.fill(0);
} }
pub fn map_table(&self, root: &mut Table, vaddr: VirtAddr, level: TableLevel) -> SysResult { pub fn map_table<T: TableLevel>(&self, root: &mut Table<T>, vaddr: VirtAddr) -> SysResult {
root.map(vaddr, self.cte.cap.get().ptr, MapAttr::PAGE_TABLE, level) root.map(vaddr, self.cte.cap.get().ptr, MapAttr::PAGE_TABLE)
.map_err(|e| match e { .map_err(|e| match e {
PageError::AlreadyMapped(_) => SysError::AlreadyMapped, PageError::AlreadyMapped => SysError::AlreadyMapped,
PageError::MissingEntry(_) => SysError::MissingEntry, PageError::MissingEntry => SysError::MissingEntry,
PageError::NotAligned => SysError::InvalidArgument,
})?; })?;
self.set_mapped_asid(0); self.set_mapped_asid(0);
@ -122,15 +114,15 @@ impl<'a> TableCap<'a> {
Ok(()) Ok(())
} }
pub fn unmap(&self, root: &mut Table) -> SysResult { pub fn unmap<T: TableLevel>(&self, root: &mut Table<T>) -> SysResult {
let vaddr = self.mapped_vaddr(); let vaddr = self.mapped_vaddr();
if vaddr.0 == 0 { if vaddr.0 == 0 {
return Err(SysError::NotMapped); return Err(SysError::NotMapped);
} }
match root.lookup_mut(self.mapped_vaddr()) { match root.lookup_mut(self.mapped_vaddr()) {
Some(entry) if !entry.is_leaf() && entry.addr() == self.cte.cap.get().ptr => { Some(entry) if !entry.is_leaf() && entry.paddr() == self.cte.cap.get().ptr => {
entry.set_addr(PhysAddr::default()); entry.set_paddr(PhysAddr::default());
entry.set_attr(MapAttr::empty()); entry.set_attr(MapAttr::empty());
self.set_mapped_asid(0); self.set_mapped_asid(0);