Compare commits

..

4 Commits

13 changed files with 117 additions and 93 deletions

6
Cargo.lock generated
View File

@ -37,9 +37,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.5.0"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1"
checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
[[package]]
name = "cfg-if"
@ -101,7 +101,7 @@ name = "kernel"
version = "0.1.0"
dependencies = [
"allocator",
"bitflags 2.5.0",
"bitflags 2.6.0",
"cfg-if",
"cpio",
"elf",

View File

@ -26,7 +26,7 @@ tracer = { path = "../lib/tracer" }
uapi = { path = "../uapi" }
utils = { path = "../lib/utils", default-features = false }
bitflags = "2.5"
bitflags = "2.6"
cfg-if = "1.0"
elf = { version = "0.7", default-features = false }
fdt = { git = "https://github.com/panpaul/fdt.git" } # fork from upstream

View File

@ -16,7 +16,7 @@ unsafe extern "C" fn _start(hart_id: usize, fdt_addr: usize) -> ! {
// we should be launched by OpenSBI and running is S-mode
// 128 KiB stack for debugging only
const STACK_SIZE: usize = 4096 * 32;
const STACK_SIZE: usize = 4096 * 32; // at least 21, TODO: figure out why we need sooo much stack
#[link_section = ".bss.boot_stack"]
static mut STACK: [u8; STACK_SIZE] = [0u8; STACK_SIZE];
@ -26,8 +26,8 @@ unsafe extern "C" fn _start(hart_id: usize, fdt_addr: usize) -> ! {
.option push
.option norelax
1: auipc gp, %pcrel_hi(__global_pointer$)
addi gp, gp, %pcrel_lo(1b)
2: auipc gp, %pcrel_hi(__global_pointer$)
addi gp, gp, %pcrel_lo(2b)
.option pop
lla t0, KERNEL_OFFSET

View File

@ -38,8 +38,8 @@ extern "C" fn idle_thread() {
unsafe {
core::arch::asm!(
"
1: wfi
j 1b
2: wfi
j 2b
",
options(noreturn)
)

View File

@ -6,7 +6,7 @@ use fdt::{node::FdtNode, Fdt};
use log::trace;
use spin::Mutex;
const IRQ_INVALID: usize = u32::MAX as usize;
const IRQ_OCCUPIED: usize = u32::MAX as usize; // OpenSBI will rewrite IRQ_M_EXT to this value
const IRQ_S_EXT: usize = 9;
const IRQ_M_EXT: usize = 11;
@ -62,7 +62,7 @@ impl Driver for IrqPlic {
let phandle = ctx.phandle; // cpu intc
let irq = ctx.interrupts().next().unwrap(); // IRQ_M_EXT/IRQ_S_EXT
if irq == IRQ_INVALID {
if irq == IRQ_OCCUPIED {
trace!("[IrqPlic] context {} taken by SBI", i);
continue;
}

View File

@ -1,5 +1,4 @@
use crate::objects::*;
use core::cell::Cell;
use core::fmt::Debug;
use uapi::cap::ObjectType;
use utils::addr::PhysAddr;
@ -31,10 +30,20 @@ impl RawCap {
cap_type,
}
}
/// modify the capability
/// # Safety
/// This function breaks the safety rules of Rust, race condition may occur
pub unsafe fn update<F>(&self, f: F) -> &Self
where F: FnOnce(&mut Self) {
#[allow(invalid_reference_casting)]
f(unsafe { &mut *(self as *const _ as *mut Self) });
self
}
}
pub struct CapEntry {
pub cap: Cell<RawCap>,
pub cap: RawCap,
pub link: Link<Self>,
}
@ -43,20 +52,20 @@ LinkHelperImpl!(CapEntry: link);
impl CapEntry {
pub fn new(cap: RawCap) -> Self {
Self {
cap: Cell::new(cap),
cap,
link: Link::default(),
}
}
pub fn init(&mut self) {
self.cap = Cell::new(NullCap::mint());
self.cap = NullCap::mint();
self.link = Link::default();
}
}
impl Clone for CapEntry {
fn clone(&self) -> Self {
let mut cte = Self::new(self.cap.get());
let mut cte = Self::new(self.cap);
cte.link = self.link.clone();
cte
}
@ -70,7 +79,7 @@ impl From<RawCap> for CapEntry {
impl Debug for CapEntry {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let cap_type = self.cap.get().cap_type;
let cap_type = self.cap.cap_type;
match cap_type {
ObjectType::CNode => write!(f, "{:?}", CNodeCap::try_from(self)),
ObjectType::Frame => write!(f, "{:?}", FrameCap::try_from(self)),

View File

@ -56,15 +56,15 @@ impl<'a> CNodeCap<'a> {
}
fn radix_bits(&self) -> usize {
(self.cte.cap.get().args[0] >> Self::RADIX_OFFSET) & Self::RADIX_MASK
(self.cte.cap.args[0] >> Self::RADIX_OFFSET) & Self::RADIX_MASK
}
fn guard_bits(&self) -> usize {
(self.cte.cap.get().args[0] >> Self::GUARD_SIZE_OFFSET) & Self::GUARD_SIZE_MASK
(self.cte.cap.args[0] >> Self::GUARD_SIZE_OFFSET) & Self::GUARD_SIZE_MASK
}
fn guard(&self) -> usize {
self.cte.cap.get().args[1]
self.cte.cap.args[1]
}
/// CNodeObject length
@ -77,14 +77,14 @@ impl<'a> CNodeCap<'a> {
pub fn as_object(&self) -> &CNodeObject {
unsafe {
let virt = mmap_phys_to_virt(self.cte.cap.get().ptr);
let virt = mmap_phys_to_virt(self.cte.cap.ptr);
core::slice::from_raw_parts(virt.as_const_ptr(), self.length())
}
}
pub fn as_object_mut(&mut self) -> &mut CNodeObject {
unsafe {
let virt = mmap_phys_to_virt(self.cte.cap.get().ptr);
let virt = mmap_phys_to_virt(self.cte.cap.ptr);
core::slice::from_raw_parts_mut(virt.as_mut_ptr(), self.length())
}
}
@ -142,7 +142,7 @@ impl Debug for CNodeCap<'_> {
.field("guard_bits", &self.guard_bits())
.field("guard", &self.guard())
.field("length", &self.length())
.field("ptr", &self.cte.cap.get().ptr)
.field("ptr", &self.cte.cap.ptr)
.finish()
}
}
@ -151,7 +151,6 @@ impl Debug for CNodeCap<'_> {
mod tests {
use super::*;
use crate::arch::vspace::RAM_ALLOCATOR;
use crate::objects::*;
use core::alloc::Layout;
use log::info;
@ -176,17 +175,18 @@ mod tests {
cnode1.as_object_mut()[6] = cte2.clone();
cnode2.as_object_mut()[1] = cte3.clone();
cnode3.as_object_mut()[2] = NullCap::mint().into();
cnode3.as_object_mut()[2].cap.update(|mut cap| {
cap.args[0] = 0xdeadbeef;
cap.args[1] = 0xaa55aa55;
cap
});
unsafe {
cnode3.as_object_mut()[2].cap.update(|cap| {
cap.args[0] = 0xdeadbeef;
cap.args[1] = 0xaa55aa55;
});
}
{
// cte2.clone() cte3.clone() should not modify cte itself
info!("Testing whether RawCap and Cap held the same cap");
assert!(cnode2.cte.cap.get() == cte2.cap.get());
assert!(cnode3.cte.cap.get() == cte3.cap.get());
assert!(cnode2.cte.cap == cte2.cap);
assert!(cnode3.cte.cap == cte3.cap);
}
let root = cnode1;
@ -213,7 +213,7 @@ mod tests {
// * cnode2 length = 8
info!("Testing resolve to cte2");
let target = root.resolve_address_bits(0x96, 8).unwrap();
assert!(target.cap.get() == cte2.cap.get());
assert!(target.cap == cte2.cap);
}
{
@ -222,7 +222,7 @@ mod tests {
// * cnode3 length = 15
info!("Testing resolve to cte3");
let target = root.resolve_address_bits(0x4b31, 15).unwrap();
assert!(target.cap.get() == cte3.cap.get());
assert!(target.cap == cte3.cap);
}
{
@ -231,8 +231,8 @@ mod tests {
// * target length = 21
info!("Testing resolve to target");
let target = root.resolve_address_bits(0x12cc6a, 21).unwrap();
assert!(target.cap.get().args[0] == 0xdeadbeef);
assert!(target.cap.get().args[1] == 0xaa55aa55);
assert!(target.cap.args[0] == 0xdeadbeef);
assert!(target.cap.args[1] == 0xaa55aa55);
}
{

View File

@ -64,54 +64,56 @@ impl<'a> FrameCap<'a> {
}
pub fn attr(&self) -> MapAttr {
let bits = (self.cte.cap.get().args[0] >> Self::VM_RIGHT_OFFSET) & Self::VM_RIGHT_MASK;
let bits = (self.cte.cap.args[0] >> Self::VM_RIGHT_OFFSET) & Self::VM_RIGHT_MASK;
MapAttr::from_bits_truncate(bits)
}
pub fn is_device(&self) -> bool {
let bits = (self.cte.cap.get().args[0] >> Self::IS_DEVICE_OFFSET) & Self::IS_DEVICE_MASK;
let bits = (self.cte.cap.args[0] >> Self::IS_DEVICE_OFFSET) & Self::IS_DEVICE_MASK;
bits != 0
}
pub fn size(&self) -> usize {
let bits = (self.cte.cap.get().args[0] >> Self::FRAME_SIZE_OFFSET) & Self::FRAME_SIZE_MASK;
let bits = (self.cte.cap.args[0] >> Self::FRAME_SIZE_OFFSET) & Self::FRAME_SIZE_MASK;
1 << bits
}
pub fn mapped_asid(&self) -> usize {
(self.cte.cap.get().args[0] >> Self::ASID_OFFSET) & Self::ASID_MASK
(self.cte.cap.args[0] >> Self::ASID_OFFSET) & Self::ASID_MASK
}
pub fn set_mapped_asid(&self, asid: usize) {
self.cte.cap.update(|mut cap| {
let asid = (asid & Self::ASID_MASK) << Self::ASID_OFFSET;
let arg0 = cap.args[0] & !(Self::ASID_MASK << Self::ASID_OFFSET);
cap.args[0] = arg0 | asid;
cap
});
unsafe {
self.cte.cap.update(|cap| {
let asid = (asid & Self::ASID_MASK) << Self::ASID_OFFSET;
let arg0 = cap.args[0] & !(Self::ASID_MASK << Self::ASID_OFFSET);
cap.args[0] = arg0 | asid;
})
};
}
pub fn mapped_vaddr(&self) -> VirtAddr {
VirtAddr(self.cte.cap.get().args[1])
VirtAddr(self.cte.cap.args[1])
}
pub fn set_mapped_vaddr(&self, vaddr: VirtAddr) {
self.cte.cap.update(|mut cap| {
cap.args[1] = vaddr.0;
cap
});
unsafe {
self.cte.cap.update(|cap| {
cap.args[1] = vaddr.0;
})
};
}
pub fn as_object(&self) -> &FrameObject {
unsafe {
let virt = mmap_phys_to_virt(self.cte.cap.get().ptr);
let virt = mmap_phys_to_virt(self.cte.cap.ptr);
core::slice::from_raw_parts(virt.as_const_ptr(), self.size())
}
}
pub fn as_object_mut(&mut self) -> &mut FrameObject {
unsafe {
let virt = mmap_phys_to_virt(self.cte.cap.get().ptr);
let virt = mmap_phys_to_virt(self.cte.cap.ptr);
core::slice::from_raw_parts_mut(virt.as_mut_ptr(), self.size())
}
}
@ -126,7 +128,7 @@ impl<'a> FrameCap<'a> {
pub fn map<T: TableLevel>(&self, root: &mut Table<T>, vaddr: VirtAddr, attr: MapAttr) -> SysResult {
let masked_attr = attr & self.attr();
root.map(vaddr, self.cte.cap.get().ptr, masked_attr)?;
root.map(vaddr, self.cte.cap.ptr, masked_attr)?;
self.set_mapped_asid(0);
self.set_mapped_vaddr(vaddr);
@ -141,7 +143,7 @@ impl<'a> FrameCap<'a> {
}
match root.lookup_mut(vaddr) {
Some(entry) if entry.is_leaf() && entry.paddr() == self.cte.cap.get().ptr => {
Some(entry) if entry.is_leaf() && entry.paddr() == self.cte.cap.ptr => {
entry.set_paddr(PhysAddr::default());
entry.set_attr(MapAttr::empty());
@ -159,7 +161,7 @@ impl<'a> FrameCap<'a> {
impl Debug for FrameCap<'_> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("FrameCap")
.field("ptr", &self.cte.cap.get().ptr)
.field("ptr", &self.cte.cap.ptr)
.field("size", &self.size())
.field("attr", &self.attr())
.field("is_device", &self.is_device())

View File

@ -48,7 +48,7 @@ impl<'a, T: KernelObject + ?Sized> TryFrom<&'a CapEntry> for Cap<'a, T> {
type Error = SysError;
fn try_from(new: &'a CapEntry) -> SysResult<Self> {
if new.cap.get().cap_type != T::OBJ_TYPE {
if new.cap.cap_type != T::OBJ_TYPE {
Err(SysError::CapTypeMismatch)
} else {
Ok(Self {

View File

@ -43,47 +43,50 @@ impl<'a> TableCap<'a> {
}
pub fn is_mapped(&self) -> bool {
let bits = (self.cte.cap.get().args[0] >> Self::IS_MAPPED_OFFSET) & Self::IS_MAPPED_MASK;
let bits = (self.cte.cap.args[0] >> Self::IS_MAPPED_OFFSET) & Self::IS_MAPPED_MASK;
bits != 0
}
pub fn set_mapped(&self, mapped: bool) {
self.cte.cap.update(|mut cap| {
let is_mapped = if mapped { 1 } else { 0 };
let bits = (is_mapped & Self::IS_MAPPED_MASK) << Self::IS_MAPPED_OFFSET;
let arg0 = cap.args[0] & !(Self::IS_MAPPED_MASK << Self::IS_MAPPED_OFFSET);
cap.args[0] = arg0 | bits;
cap
});
unsafe {
self.cte.cap.update(|cap| {
let is_mapped = if mapped { 1 } else { 0 };
let bits = (is_mapped & Self::IS_MAPPED_MASK) << Self::IS_MAPPED_OFFSET;
let arg0 = cap.args[0] & !(Self::IS_MAPPED_MASK << Self::IS_MAPPED_OFFSET);
cap.args[0] = arg0 | bits;
})
};
}
pub fn asid(&self) -> usize {
(self.cte.cap.get().args[0] >> Self::ASID_OFFSET) & Self::ASID_MASK
(self.cte.cap.args[0] >> Self::ASID_OFFSET) & Self::ASID_MASK
}
pub fn set_mapped_asid(&self, asid: usize) {
self.cte.cap.update(|mut cap| {
let bits = (asid & Self::ASID_MASK) << Self::ASID_OFFSET;
let arg0 = cap.args[0] & !(Self::ASID_MASK << Self::ASID_OFFSET);
cap.args[0] = arg0 | bits;
cap
});
unsafe {
self.cte.cap.update(|cap| {
let bits = (asid & Self::ASID_MASK) << Self::ASID_OFFSET;
let arg0 = cap.args[0] & !(Self::ASID_MASK << Self::ASID_OFFSET);
cap.args[0] = arg0 | bits;
})
};
}
pub fn mapped_vaddr(&self) -> VirtAddr {
VirtAddr(self.cte.cap.get().args[1])
VirtAddr(self.cte.cap.args[1])
}
pub fn set_mapped_vaddr(&self, vaddr: VirtAddr) {
self.cte.cap.update(|mut cap| {
cap.args[1] = vaddr.0;
cap
});
unsafe {
self.cte.cap.update(|cap| {
cap.args[1] = vaddr.0;
})
};
}
pub fn as_object<T: TableLevel>(&self) -> Table<T> {
unsafe {
let virt = mmap_phys_to_virt(self.cte.cap.get().ptr);
let virt = mmap_phys_to_virt(self.cte.cap.ptr);
Table::new(virt)
}
}
@ -94,14 +97,14 @@ impl<'a> TableCap<'a> {
pub fn clear<T: TableLevel>(&self) {
let array: &mut [u8] = unsafe {
let virt = mmap_phys_to_virt(self.cte.cap.get().ptr);
let virt = mmap_phys_to_virt(self.cte.cap.ptr);
core::slice::from_raw_parts_mut(virt.as_mut_ptr(), T::ENTRIES * core::mem::size_of::<T::Entry>())
};
array.fill(0);
}
pub fn map<T: TableLevel>(&self, root: &mut Table<T>, vaddr: VirtAddr) -> SysResult {
root.map(vaddr, self.cte.cap.get().ptr, MapAttr::PAGE_TABLE)?;
root.map(vaddr, self.cte.cap.ptr, MapAttr::PAGE_TABLE)?;
self.set_mapped_asid(0);
self.set_mapped_vaddr(vaddr);
@ -116,7 +119,7 @@ impl<'a> TableCap<'a> {
}
match root.lookup_mut(vaddr) {
Some(entry) if !entry.is_leaf() && entry.paddr() == self.cte.cap.get().ptr => {
Some(entry) if !entry.is_leaf() && entry.paddr() == self.cte.cap.ptr => {
entry.set_paddr(PhysAddr::default());
entry.set_attr(MapAttr::empty());
@ -133,6 +136,6 @@ impl<'a> TableCap<'a> {
impl Debug for TableCap<'_> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("TableCap").field("ptr", &self.cte.cap.get().ptr).finish()
f.debug_struct("TableCap").field("ptr", &self.cte.cap.ptr).finish()
}
}

View File

@ -149,14 +149,14 @@ impl<'a> TcbCap<'a> {
pub fn as_object(&self) -> &TcbObject {
unsafe {
let virt = mmap_phys_to_virt(self.cte.cap.get().ptr);
let virt = mmap_phys_to_virt(self.cte.cap.ptr);
&*(virt.as_const_ptr())
}
}
pub fn as_object_mut(&mut self) -> &mut TcbObject {
unsafe {
let virt = mmap_phys_to_virt(self.cte.cap.get().ptr);
let virt = mmap_phys_to_virt(self.cte.cap.ptr);
&mut *(virt.as_mut_ptr())
}
}

View File

@ -42,22 +42,23 @@ impl UntypedCap<'_> {
}
fn free_offset(&self) -> usize {
self.cte.cap.get().args[0]
self.cte.cap.args[0]
}
fn set_free_offset(&mut self, free_offset: usize) {
self.cte.cap.update(|mut c| {
c.args[0] = free_offset;
c
});
unsafe {
self.cte.cap.update(|c| {
c.args[0] = free_offset;
})
};
}
pub fn is_device(&self) -> bool {
(self.cte.cap.get().args[1] >> 6) & 1 == 1
(self.cte.cap.args[1] >> 6) & 1 == 1
}
fn block_bits(&self) -> usize {
self.cte.cap.get().args[1] & MASK!(6)
self.cte.cap.args[1] & MASK!(6)
}
fn block_size(&self) -> usize {
@ -110,7 +111,7 @@ impl UntypedCap<'_> {
// Create new capabilities in slot
for (i, slot) in slots.iter_mut().enumerate() {
let addr = self.cte.cap.get().ptr + start_offset + i * obj_size;
let addr = self.cte.cap.ptr + start_offset + i * obj_size;
let new_cap = match obj_type {
ObjectType::Untyped => UntypedCap::mint(0, obj_type.bits(user_obj_bits), self.is_device(), addr),
ObjectType::CNode => CNodeCap::mint(user_obj_bits, 0, 0, addr),
@ -121,7 +122,7 @@ impl UntypedCap<'_> {
_ => return Err(SysError::InvalidArgument),
};
slot.cap.set(new_cap);
slot.cap = new_cap;
self.append(slot);
}
@ -138,7 +139,7 @@ impl Debug for UntypedCap<'_> {
.field("free_offset", &self.free_offset())
.field("block_bits", &self.block_bits())
.field("is_device", &self.is_device())
.field("ptr", &self.cte.cap.get().ptr)
.field("ptr", &self.cte.cap.ptr)
.finish()
}
}

View File

@ -23,3 +23,12 @@ pub struct IrqManager {
state: [IrqState; IRQ_NUM],
}
impl IrqManager {
pub fn new() -> Self {
Self {
handler: core::array::from_fn::<_, IRQ_NUM, _>(|_| CapEntry::new(NullCap::mint())),
state: [IrqState::Inactive; IRQ_NUM],
}
}
}