Compare commits

..

3 Commits

Author SHA1 Message Date
1d026d8053 feat: map kernel
1. initial riscv32 support
2. fix bug in table.rs: cur level order is reversed
3. introduce global kernel_{phys, virt}_to_{virt, phys} for address translating
4. remove unused riscv/mm
5. initial early_memory management through RamBlock
6. initial kernel space mapping support
2024-04-08 16:23:10 +08:00
13e331f4d2 chore: introduce PAGE_LAYOUT 2024-04-07 20:10:08 +08:00
70ca917778 chore: rename symbol name from linker script 2024-04-07 19:38:26 +08:00
22 changed files with 504 additions and 308 deletions

View File

@ -3,3 +3,6 @@ target = "riscv64imac-unknown-none-elf"
[target.'cfg(all(target_arch = "riscv64", target_os = "none"))']
runner = "qemu-system-riscv64 -nographic -machine virt -serial mon:stdio -smp 1 -kernel "
[target.'cfg(all(target_arch = "riscv32", target_os = "none"))']
runner = "qemu-system-riscv32 -nographic -machine virt -serial mon:stdio -smp 1 -kernel "

View File

@ -8,6 +8,8 @@ edition = "2021"
[features]
default = ["riscv.board.virt", "log_color"]
legacy = []
riscv = []
"riscv.pagetable.sv32" = []
@ -16,7 +18,7 @@ riscv = []
"riscv.pagetable.sv57" = []
"riscv.riscv64" = ["riscv", "riscv.pagetable.sv39"]
"riscv.riscv32" = ["riscv", "riscv.pagetable.sv32"]
"riscv.riscv32" = ["riscv", "riscv.pagetable.sv32", "legacy"]
"riscv.board.default" = ["riscv.riscv64"]
"riscv.board.virt" = ["riscv.riscv64"]

View File

@ -13,7 +13,7 @@ fn main() {
},
TargetConfig {
target: "riscv32",
lds: "src/arch/riscv/linker.ld",
lds: "src/arch/riscv/linker32.ld",
},
];

View File

@ -1,24 +1,43 @@
use super::board::console::init_early_console;
use super::layout::{__boot_stack_end, __bss_end, __tbss_end, __tbss_start};
use super::vspace::utils::setup_kernel_paging;
use crate::arch::layout::zero_bss;
use crate::arch::vspace::utils::setup_memory;
use crate::vspace::allocator::RamBlock;
use fdt::Fdt;
#[naked]
#[no_mangle]
#[allow(named_asm_labels)]
#[link_section = ".text.entry"]
unsafe extern "C" fn _start(hart_id: usize, device_tree_addr: usize) -> ! {
// NOTE: no stack here
// we should be launched by OpenSBI and running is S-mode
const STACK_SIZE: usize = 4096 * 16;
// 128 KiB stack for debugging only
const STACK_SIZE: usize = 4096 * 32;
#[link_section = ".bss.boot_stack"]
static mut STACK: [u8; STACK_SIZE] = [0u8; STACK_SIZE];
core::arch::asm!(
"csrw sie, 0",
"csrw sip, 0",
"la gp, __global_pointer$",
"la tp, __tss_start",
"la sp, {stack} + {stack_size}",
"j {main}",
core::arch::asm!("
csrw sie, 0
csrw sip, 0
.option push
.option norelax
1: auipc gp, %pcrel_hi(__global_pointer$)
addi gp, gp, %pcrel_lo(1b)
.option pop
la tp, TSS_START
la sp, {stack} + {stack_size}
j {main}
.section .data
.global KERNEL_OFFSET
KERNEL_OFFSET: .quad __kernel_offset
.global MMAP_OFFSET
MMAP_OFFSET: .quad MMAP_BASE_ADDRESS
",
stack_size = const STACK_SIZE,
stack = sym STACK,
main = sym pre_main,
@ -26,27 +45,24 @@ unsafe extern "C" fn _start(hart_id: usize, device_tree_addr: usize) -> ! {
)
}
extern "C" fn pre_main(hart_id: usize, device_tree_addr: usize) -> ! {
extern "C" fn pre_main(hart_id: usize, fdt_addr: usize) -> ! {
zero_bss();
init_early_console();
// Don't know why, but the **fucking** rust compiler will hard-code jump table to absolute
// addresses, even if I forced compiler to use pic mode, and global spin-lock depends on it,
// so we must remap the kernel to the higher half before anything.
// Console is not available yet !!
// TODO: initialize page table
crate::entry::rust_main(hart_id, device_tree_addr);
}
#[inline(always)]
fn zero_bss() {
fn clear_range<T: Clone>(bgn: *mut T, end: *mut T) {
unsafe {
core::slice::from_raw_parts_mut(bgn, end.offset_from(bgn) as usize)
.fill(core::mem::zeroed())
}
}
let fdt = unsafe { Fdt::from_ptr(fdt_addr as *const u8).unwrap() };
let mut allocator = RamBlock::<8>::new();
unsafe {
clear_range(__boot_stack_end.as_mut_ptr(), __bss_end.as_mut_ptr());
clear_range(__tbss_start.as_mut_ptr(), __tbss_end.as_mut_ptr());
setup_memory(&fdt, fdt_addr, &mut allocator);
setup_kernel_paging(&mut allocator);
}
crate::logging::init();
init_early_console();
crate::entry::rust_main(hart_id, fdt, allocator);
}

View File

@ -1,31 +1,67 @@
use crate::utils::extern_addr::ExternSymbol;
use crate::utils::size::KIB;
use crate::vspace::addr::{AddressOps, PhysAddr, VirtAddr};
use core::alloc::Layout;
extern "C" {
static __kernel_start: ExternSymbol;
static __kernel_end: ExternSymbol;
pub static KERNEL_START: ExternSymbol;
pub static KERNEL_END: ExternSymbol;
pub static KERNEL_OFFSET: usize;
pub static MMAP_OFFSET: usize;
static __text_start: ExternSymbol;
static __text_end: ExternSymbol;
pub static TEXT_START: ExternSymbol;
pub static TEXT_END: ExternSymbol;
static __rodata_start: ExternSymbol;
static __rodata_end: ExternSymbol;
pub static RODATA_START: ExternSymbol;
pub static RODATA_END: ExternSymbol;
static __data_start: ExternSymbol;
static __data_end: ExternSymbol;
pub static DATA_START: ExternSymbol;
pub static DATA_END: ExternSymbol;
static __bss_start: ExternSymbol;
pub static __boot_stack_end: ExternSymbol;
pub static __bss_end: ExternSymbol;
pub static BSS_START: ExternSymbol;
pub static BOOT_STACK_END: ExternSymbol;
pub static BSS_END: ExternSymbol;
static __tss_start: ExternSymbol;
static __tss_end: ExternSymbol;
pub static TSS_START: ExternSymbol;
pub static TSS_END: ExternSymbol;
static __tdata_start: ExternSymbol;
static __tdata_end: ExternSymbol;
pub static TDATA_START: ExternSymbol;
pub static TDATA_END: ExternSymbol;
pub static __tbss_start: ExternSymbol;
pub static __tbss_end: ExternSymbol;
pub static TBSS_START: ExternSymbol;
pub static TBSS_END: ExternSymbol;
}
pub const PAGE_SIZE: usize = 4 * KIB;
pub const PAGE_LAYOUT: Layout = unsafe { Layout::from_size_align_unchecked(PAGE_SIZE, PAGE_SIZE) };
#[inline(always)]
pub fn zero_bss() {
fn clear_range<T: Clone>(bgn: *mut T, end: *mut T) {
unsafe {
core::slice::from_raw_parts_mut(bgn, end.offset_from(bgn) as usize)
.fill(core::mem::zeroed())
}
}
unsafe {
clear_range(BOOT_STACK_END.as_mut_ptr(), BSS_END.as_mut_ptr());
clear_range(TBSS_START.as_mut_ptr(), TBSS_END.as_mut_ptr());
}
}
pub unsafe fn kernel_phys_to_virt(phys: PhysAddr) -> VirtAddr {
VirtAddr(phys.as_usize() + KERNEL_OFFSET)
}
pub unsafe fn kernel_virt_to_phys(virt: VirtAddr) -> PhysAddr {
PhysAddr(virt.as_usize() - KERNEL_OFFSET)
}
pub unsafe fn mmap_phys_to_virt(phys: PhysAddr) -> VirtAddr {
VirtAddr(phys.as_usize() + MMAP_OFFSET)
}
pub unsafe fn mmap_virt_to_phys(virt: VirtAddr) -> PhysAddr {
PhysAddr(virt.as_usize() - MMAP_OFFSET)
}

View File

@ -1,71 +0,0 @@
OUTPUT_ARCH(riscv)
ENTRY(_start)
BASE_ADDRESS = 0x80200000;
PAGE_SIZE = 0x1000;
SECTIONS {
. = BASE_ADDRESS;
__kernel_start = .;
.text : {
__text_start = .;
*(.text.entry)
*(.text .text.*)
__text_end = .;
}
.rodata : {
. = ALIGN(8);
__rodata_start = .;
*(.rodata .rodata.*)
*(.srodata .srodata.*)
__rodata_end = .;
}
.data : {
. = ALIGN(8);
__data_start = .;
*(.data .data.*)
PROVIDE( __global_pointer$ = . + 0x800 );
*(.sdata .sdata.*)
__data_end = .;
}
.bss : {
. = ALIGN(8);
__bss_start = .;
*(.bss.boot_stack)
__boot_stack_end = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
__bss_end = .;
}
.tss : {
. = ALIGN(8);
__tss_start = .;
. = ALIGN(8);
__tdata_start = .;
*(.tdata .tdata.*)
__tdata_end = .;
. = ALIGN(8);
__tbss_start = .;
*(.tbss .tbss.*)
__tbss_end = .;
__tss_end = .;
}
. = ALIGN(PAGE_SIZE);
__kernel_end = .;
/DISCARD/ : {
*(.eh_frame_hdr)
*(.eh_frame)
}
}

View File

@ -0,0 +1,74 @@
OUTPUT_ARCH(riscv)
ENTRY(_start)
MMAP_BASE_ADDRESS = 0x0;
BASE_ADDRESS = 0x80200000;
PHY_BASE_ADDRESS = 0x80200000;
PAGE_SIZE = 0x1000;
SECTIONS {
. = BASE_ADDRESS;
KERNEL_START = .;
__kernel_offset = . - PHY_BASE_ADDRESS;
.text : AT(ADDR(.text) - __kernel_offset) {
TEXT_START = .;
*(.text.entry)
*(.text .text.*)
TEXT_END = .;
}
.rodata : AT(ADDR(.rodata) - __kernel_offset) {
. = ALIGN(PAGE_SIZE);
RODATA_START = .;
*(.rodata .rodata.*)
*(.srodata .srodata.*)
RODATA_END = .;
}
.data : AT(ADDR(.data) - __kernel_offset) {
. = ALIGN(PAGE_SIZE);
DATA_START = .;
*(.data .data.*)
PROVIDE( __global_pointer$ = . + 0x800 );
*(.sdata .sdata.*)
DATA_END = .;
}
.bss : AT(ADDR(.bss) - __kernel_offset) {
. = ALIGN(PAGE_SIZE);
BSS_START = .;
*(.bss.boot_stack)
BOOT_STACK_END = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
BSS_END = .;
}
.tss : AT(ADDR(.tss) - __kernel_offset) {
. = ALIGN(PAGE_SIZE);
TSS_START = .;
. = ALIGN(8);
TDATA_START = .;
*(.tdata .tdata.*)
TDATA_END = .;
. = ALIGN(8);
TBSS_START = .;
*(.tbss .tbss.*)
TBSS_END = .;
TSS_END = .;
}
. = ALIGN(PAGE_SIZE);
KERNEL_END = .;
/DISCARD/ : {
*(.eh_frame_hdr)
*(.eh_frame)
}
}

View File

@ -0,0 +1,79 @@
OUTPUT_ARCH(riscv)
ENTRY(_start)
/* We use high memory (0xFFFF....) for kernel space
* For sv39 and larger layout, memory base starts from 0xFFFFFFC000000000: {1'b1, {38{1'b0}}}
* Our kernel will placed at 0xFFFFFFD000000000 (VA) and 0x80200000 (PA)
* Regions between 0x...C... and 0x...D... will be reserved for firmware starting from 0x80000000 (PA) */
MMAP_BASE_ADDRESS = 0xFFFFFFC000000000;
BASE_ADDRESS = 0xFFFFFFD000000000;
PHY_BASE_ADDRESS = 0x80200000;
PAGE_SIZE = 0x1000;
SECTIONS {
. = BASE_ADDRESS;
KERNEL_START = .;
__kernel_offset = . - PHY_BASE_ADDRESS;
.text : AT(ADDR(.text) - __kernel_offset) {
TEXT_START = .;
*(.text.entry)
*(.text .text.*)
TEXT_END = .;
}
.rodata : AT(ADDR(.rodata) - __kernel_offset) {
. = ALIGN(PAGE_SIZE);
RODATA_START = .;
*(.rodata .rodata.*)
*(.srodata .srodata.*)
RODATA_END = .;
}
.data : AT(ADDR(.data) - __kernel_offset) {
. = ALIGN(PAGE_SIZE);
DATA_START = .;
*(.data .data.*)
PROVIDE( __global_pointer$ = . + 0x800 );
*(.sdata .sdata.*)
DATA_END = .;
}
.bss : AT(ADDR(.bss) - __kernel_offset) {
. = ALIGN(PAGE_SIZE);
BSS_START = .;
*(.bss.boot_stack)
BOOT_STACK_END = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
BSS_END = .;
}
.tss : AT(ADDR(.tss) - __kernel_offset) {
. = ALIGN(PAGE_SIZE);
TSS_START = .;
. = ALIGN(8);
TDATA_START = .;
*(.tdata .tdata.*)
TDATA_END = .;
. = ALIGN(8);
TBSS_START = .;
*(.tbss .tbss.*)
TBSS_END = .;
TSS_END = .;
}
. = ALIGN(PAGE_SIZE);
KERNEL_END = .;
/DISCARD/ : {
*(.eh_frame_hdr)
*(.eh_frame)
}
}

View File

@ -1 +0,0 @@
pub mod page;

View File

@ -1,136 +0,0 @@
use bitflags::bitflags;
use crate::mm::addr::{AddressOps, PhysAddr};
use crate::mm::page;
pub const PAGE_SIZE: usize = 4096;
const PG_OFFSET: u64 = 12;
const PPN_OFFSET: u64 = 10;
const PPN_BITS: u64 = 44;
const PTE_PPN_MASK: u64 = ((1 << PPN_BITS) - 1) << PPN_OFFSET;
const PA_PPN_MASK: u64 = ((1 << PPN_BITS) - 1) << PG_OFFSET;
bitflags! {
pub struct PTEFlags : u64 {
const VALID = 1 << 0;
const READABLE = 1 << 1;
const WRITABLE = 1 << 2;
const EXECUTABLE = 1 << 3;
const USER_ACCESSIBLE = 1 << 4;
const GLOBAL = 1 << 5;
const ACCESSED = 1 << 6;
const DIRTY = 1 << 7;
const RSW = 1 << 8 | 1 << 9;
}
}
impl From<PTEFlags> for page::MapAttr {
fn from(flags: PTEFlags) -> Self {
let mut attr = Self::empty();
if flags.contains(PTEFlags::READABLE) {
attr.insert(Self::READABLE);
}
if flags.contains(PTEFlags::WRITABLE) {
attr.insert(Self::WRITABLE);
}
if flags.contains(PTEFlags::EXECUTABLE) {
attr.insert(Self::EXECUTABLE);
}
if flags.contains(PTEFlags::USER_ACCESSIBLE) {
attr.insert(Self::USER_ACCESSIBLE);
}
attr
}
}
impl From<page::MapAttr> for PTEFlags {
fn from(attr: page::MapAttr) -> Self {
if attr.is_empty() {
return Self::empty();
}
let mut flags = Self::VALID;
if attr.contains(page::MapAttr::READABLE) {
flags.insert(Self::READABLE);
}
if attr.contains(page::MapAttr::WRITABLE) {
flags.insert(Self::WRITABLE);
}
if attr.contains(page::MapAttr::EXECUTABLE) {
flags.insert(Self::EXECUTABLE);
}
if attr.contains(page::MapAttr::USER_ACCESSIBLE) {
flags.insert(Self::USER_ACCESSIBLE);
}
flags
}
}
#[derive(Clone, Copy)]
struct PTE(u64);
impl page::PageTableEntry for PTE {
fn new_page(paddr: PhysAddr, attr: page::MapAttr) -> Self {
let flags = PTEFlags::from(attr);
let ppn = ((paddr.as_u64() & PA_PPN_MASK) >> PG_OFFSET) << PPN_OFFSET;
Self(ppn | flags.bits())
}
fn new_table(paddr: PhysAddr) -> Self {
let flags = PTEFlags::VALID;
let ppn = ((paddr.as_u64() & PA_PPN_MASK) >> PG_OFFSET) << PPN_OFFSET;
Self(ppn | flags.bits())
}
fn addr(&self) -> PhysAddr {
let ppn = (self.0 & PTE_PPN_MASK) >> PPN_OFFSET;
let paddr = ppn << PG_OFFSET;
PhysAddr::from(paddr as usize)
}
fn attr(&self) -> page::MapAttr {
let flags = PTEFlags::from_bits_truncate(self.0);
flags.into()
}
fn set_addr(&mut self, addr: PhysAddr) {
let ppn = ((addr.as_u64() & PA_PPN_MASK) >> PG_OFFSET) << PPN_OFFSET;
self.0 = (self.0 & !PTE_PPN_MASK) | ppn;
}
fn set_attr(&mut self, attr: page::MapAttr) {
let flags = PTEFlags::from(attr);
self.0 = (self.0 & !PTEFlags::all().bits()) | flags.bits();
}
fn is_valid(&self) -> bool {
self.0 & PTEFlags::VALID.bits() != 0
}
}
// we'll implement sv39 paging scheme
pub struct Size4KiB;
impl page::PageSize for Size4KiB {
const SIZE: usize = 1 << 12; // 4KiB
}
pub struct Size2MiB;
impl page::PageSize for Size2MiB {
const SIZE: usize = 1 << 21; // 2MiB
}
pub struct Size1GiB;
impl page::PageSize for Size1GiB {
const SIZE: usize = 1 << 30; // 1GiB
}

View File

@ -79,6 +79,15 @@ assert_eq_size!(Entry, u64);
#[cfg(feature = "riscv.pagetable.sv32")]
assert_eq_size!(Entry, u32);
#[cfg(feature = "riscv.pagetable.sv32")]
impl PhysAddr {
const PA_PPN_MASK: usize = ((1 << Self::PPN_BITS) - 1) << Self::PG_OFFSET;
const PG_OFFSET: usize = 12;
const PPN_BITS: usize = 22;
const PPN_OFFSET: usize = 10;
const PTE_PPN_MASK: usize = ((1 << Self::PPN_BITS) - 1) << Self::PPN_OFFSET;
}
#[cfg(feature = "riscv.pagetable.sv39")]
impl PhysAddr {
const PA_PPN_MASK: usize = ((1 << Self::PPN_BITS) - 1) << Self::PG_OFFSET;
@ -89,8 +98,12 @@ impl PhysAddr {
}
impl PhysAddr {
pub fn to_ppn(self) -> usize {
(self.as_usize() & Self::PA_PPN_MASK) >> Self::PG_OFFSET
}
fn to_ppn_shifted(self) -> usize {
((self.as_usize() & Self::PA_PPN_MASK) >> Self::PG_OFFSET) << Self::PPN_OFFSET
self.to_ppn() << Self::PPN_OFFSET
}
fn from_pte(pte: usize) -> Self {

View File

@ -1,6 +1,6 @@
mod entry;
mod table;
mod utils;
pub mod utils;
pub use entry::Entry;
pub use table::Table;

View File

@ -1,9 +1,15 @@
use crate::arch::layout::{kernel_phys_to_virt, PAGE_SIZE};
use crate::utils::size::*;
use crate::vspace::addr::*;
use crate::vspace::paging::*;
use num_traits::ToPrimitive;
const PAGE_SIZE: usize = 4096;
#[cfg(feature = "riscv.pagetable.sv32")]
impl VirtAddr {
const PG_OFFSET: usize = 12;
const VPN_BITS: usize = 10;
const VPN_MASK: usize = (1 << Self::VPN_BITS) - 1;
}
#[cfg(feature = "riscv.pagetable.sv39")]
impl VirtAddr {
@ -29,18 +35,28 @@ impl VirtAddr {
}
impl TableLevel {
pub fn is_aligned<A: AddressOps>(&self, addr: A) -> bool {
pub fn level_size(&self) -> usize {
match self {
Self::Level0 => addr.is_aligned(4 * KIB),
Self::Level0 => 4 * KIB,
#[cfg(feature = "riscv.pagetable.sv32")]
Self::Level1 => addr.is_aligned(4 * MIB),
Self::Level1 => 4 * MIB,
#[cfg(not(feature = "riscv.pagetable.sv32"))]
Self::Level1 => addr.is_aligned(2 * MIB),
Self::Level2 => addr.is_aligned(1 * GIB),
Self::Level3 => addr.is_aligned(512 * GIB),
Self::Level4 => addr.is_aligned(256 * TIB),
Self::Level1 => 2 * MIB,
Self::Level2 => 1 * GIB,
#[cfg(not(feature = "legacy"))]
Self::Level3 => 512 * GIB,
#[cfg(not(feature = "legacy"))]
Self::Level4 => 256 * TIB,
}
}
pub fn align<A: AddressOps>(&self, addr: A) -> A {
addr.align_down(self.level_size())
}
pub fn is_aligned<A: AddressOps>(&self, addr: A) -> bool {
self.align(addr) == addr
}
}
#[repr(C, align(4096))]
@ -65,10 +81,7 @@ impl Table {
}
cur = cur.next().unwrap();
table = unsafe {
// NOTE: we assume that kernel space is 1:1 mapped
Self::new(entry.addr().as_usize().into())
};
table = unsafe { Self::new(kernel_phys_to_virt(entry.addr()).as_usize().into()) };
}
}
}
@ -76,10 +89,12 @@ impl Table {
impl TableOps for Table {
type Entry = Entry;
#[cfg(feature = "riscv.pagetable.sv32")]
const MAX_PAGE_SIZE: TableLevel = TableLevel::Level1;
#[cfg(feature = "riscv.pagetable.sv39")]
const MAX_PAGE_SIZE: TableLevel = TableLevel::Level2;
unsafe fn new(location: PhysAddr) -> &'static mut Self {
unsafe fn new(location: VirtAddr) -> &'static mut Self {
assert!(location.is_aligned(PAGE_SIZE));
let ptr: *mut Self = location.into();
&mut *ptr
@ -101,11 +116,13 @@ impl TableOps for Table {
let (entry, cur) = self.lookup_mut_internal(from);
if cur < level {
if cur > level {
// previous level is not mapped
return Err(PageError::MissingEntry(cur));
}
if entry.is_valid() || cur > level {
if entry.is_valid() || cur < level {
// requested level is already mapped
return Err(PageError::AlreadyMapped(cur));
}

View File

@ -1 +1,122 @@
use crate::arch::layout::*;
use crate::utils::size::GIB;
use crate::vspace::addr::{align_up, AddressOps, PhysAddr, VirtAddr};
use crate::vspace::allocator::RamBlock;
use crate::vspace::paging::PageError::{AlreadyMapped, MissingEntry};
use crate::vspace::paging::{MapAttr, Table, TableLevel, TableOps};
pub unsafe fn setup_memory<const N: usize>(fdt: &fdt::Fdt, fdt_addr: usize, mem: &mut RamBlock<N>) {
// Add main memory regions to allocator
for region in fdt.memory().regions() {
mem.dealloc(
PhysAddr::from(region.starting_address),
align_up(region.size.unwrap(), PAGE_SIZE),
);
}
// Exclude memory occupied by UEFI
for region in fdt.memory_reservations() {
mem.reserve(
PhysAddr::from(region.address()),
align_up(region.size(), PAGE_SIZE),
);
}
// Exclude memory occupied by OpenSBI
let reserved_memory = fdt
.find_node("/reserved-memory")
.map(|node| node.children())
.expect("No reserved memory found in device tree");
for child in reserved_memory {
let region = child.reg().unwrap().next().unwrap();
mem.reserve(
PhysAddr::from(region.starting_address),
align_up(region.size.unwrap(), PAGE_SIZE),
);
}
// Exclude kernel memory
let kernel_start = KERNEL_START.as_phys_addr() - KERNEL_OFFSET;
let kernel_end = (KERNEL_END.as_phys_addr() - KERNEL_OFFSET).align_up(PAGE_SIZE);
mem.reserve(kernel_start, (kernel_end - kernel_start).as_usize());
// Exclude FDT table
let fdt_addr = PhysAddr::from(fdt_addr);
let fdt_size = align_up(fdt.total_size(), PAGE_SIZE);
mem.reserve(fdt_addr, fdt_size);
}
pub unsafe fn setup_kernel_paging<const N: usize>(allocator: &mut RamBlock<N>) {
let mut alloc = || {
allocator
.alloc(PAGE_LAYOUT)
.expect("Failed to allocate page table")
};
let root_pt = alloc();
let kernel_pt = unsafe { Table::new(root_pt.as_usize().into()) };
let mut map = |vaddr: VirtAddr, paddr: PhysAddr, attr: MapAttr, level: TableLevel| loop {
match kernel_pt.map(vaddr, paddr, attr, level) {
Ok(_) => return true,
Err(MissingEntry(missed_level)) => kernel_pt
.map(
missed_level.align(vaddr),
alloc(),
MapAttr::PAGE_TABLE,
missed_level,
)
.expect("Failed to map miss-ed page table"),
Err(AlreadyMapped(_)) => return false,
}
};
macro_rules! map_section {
(($($section:ident),+),$attr:expr) => {
$(
for addr in (concat_idents!($section, _START).as_phys_addr()..concat_idents!($section, _END).as_phys_addr()).step_by(PAGE_SIZE) {
if !map(kernel_phys_to_virt(addr), addr, $attr, TableLevel::Level0) {
panic!(
"Failed to map section: {:X?} - {:X?}",
concat_idents!($section, _START).as_phys_addr(),
concat_idents!($section, _END).as_phys_addr()
);
}
}
)+
};
}
map_section!((TEXT), MapAttr::READABLE | MapAttr::EXECUTABLE);
map_section!((RODATA), MapAttr::READABLE);
map_section!((DATA, BSS), MapAttr::READABLE | MapAttr::WRITABLE);
// TODO: every core must have a separate TSS section
map_section!((TSS), MapAttr::READABLE | MapAttr::WRITABLE);
// map 4 GiB physical memory
// TODO: walk fdt to get all memory region?
for addr in (0..(3 * GIB - 1 + GIB)).step_by(TableLevel::Level1.level_size()) {
let phys_addr = PhysAddr(addr);
map(
mmap_phys_to_virt(phys_addr),
phys_addr,
MapAttr::READABLE | MapAttr::WRITABLE,
TableLevel::Level1,
);
}
riscv::register::satp::set(riscv::register::satp::Mode::Sv39, 0, root_pt.to_ppn());
riscv::asm::sfence_vma_all();
macro_rules! remap {
($reg:ident) => {
let $reg: usize;
core::arch::asm!(concat!("mv {}, ", stringify!($reg)), out(reg) $reg);
let $reg = kernel_phys_to_virt(PhysAddr($reg));
core::arch::asm!(concat!("mv ", stringify!($reg), ", {}"), in(reg) $reg.as_usize());
};
}
todo!("remap registers");
}

View File

@ -1,23 +1,19 @@
use core::cell::Cell;
use fdt::Fdt;
use log::{debug, error, info, warn};
use crate::plat::console::{set_console, ConsoleDevice, ConsoleDriver, CONSOLE};
use crate::plat::lowlevel::{Hardware, LowLevel};
use crate::plat::timer::{Timer, TimerOps};
use crate::plat::trap::{Trap, TrapOps};
use crate::vspace::allocator::RamBlock;
#[thread_local]
pub static HART_ID: Cell<usize> = Cell::new(0);
#[no_mangle]
pub extern "C" fn rust_main(hart_id: usize, device_tree_addr: usize) -> ! {
pub fn rust_main<const N: usize>(hart_id: usize, fdt: fdt::Fdt, mut _allocator: RamBlock<N>) -> ! {
HART_ID.set(hart_id);
crate::logging::init();
info!("Kernel Started");
let fdt = unsafe { Fdt::from_ptr(device_tree_addr as *const u8).unwrap() };
setup_console(&fdt);
#[cfg(test)]
@ -53,7 +49,7 @@ pub extern "C" fn rust_main(hart_id: usize, device_tree_addr: usize) -> ! {
Hardware::shutdown(true);
}
fn setup_console(fdt: &Fdt) {
fn setup_console(fdt: &fdt::Fdt) {
// NOTE: ignore stdin: both stdin and stdout will go through stdout device
match fdt
.chosen()

View File

@ -3,11 +3,13 @@
#![no_main]
// Features
#![feature(asm_const)]
#![feature(concat_idents)]
#![feature(const_mut_refs)]
#![feature(extern_types)]
#![feature(let_chains)]
#![feature(naked_functions)]
#![feature(panic_info_message)]
#![feature(step_trait)]
#![feature(stmt_expr_attributes)]
#![feature(thread_local)]
// Test Infrastructure

View File

@ -1,23 +1,32 @@
use crate::vspace::addr::PhysAddr;
use crate::vspace::addr::{PhysAddr, VirtAddr};
extern "C" {
pub type ExternSymbol;
}
impl ExternSymbol {
#[inline(always)]
pub fn as_ptr(&'static self) -> *const u8 {
self as *const Self as *const u8
}
#[inline(always)]
pub fn as_mut_ptr(&'static self) -> *mut u8 {
self as *const Self as *mut u8
}
#[inline(always)]
pub fn as_usize(&'static self) -> usize {
self.as_ptr() as usize
}
#[inline(always)]
pub fn as_phys_addr(&'static self) -> PhysAddr {
PhysAddr::from(self.as_usize())
}
#[inline(always)]
pub fn as_virt_addr(&'static self) -> VirtAddr {
VirtAddr::from(self.as_usize())
}
}

View File

@ -2,11 +2,15 @@ pub const B: usize = 1;
pub const KB: usize = B * 1000;
pub const MB: usize = KB * 1000;
pub const GB: usize = MB * 1000;
#[cfg(not(feature = "legacy"))]
pub const TB: usize = GB * 1000;
#[cfg(not(feature = "legacy"))]
pub const PB: usize = TB * 1000;
pub const KIB: usize = B * 1024;
pub const MIB: usize = KIB * 1024;
pub const GIB: usize = MIB * 1024;
#[cfg(not(feature = "legacy"))]
pub const TIB: usize = GIB * 1024;
#[cfg(not(feature = "legacy"))]
pub const PIB: usize = TIB * 1024;

View File

@ -1,5 +1,6 @@
use core::fmt::*;
use core::hash::*;
use core::iter::Step;
use core::num::*;
use core::ops::*;
@ -19,7 +20,7 @@ pub struct PhysAddr(pub usize);
#[derive(Copy, Clone, Default, Eq, Ord, PartialOrd, PartialEq)]
pub struct VirtAddr(pub usize);
pub trait AddressOps {
pub trait AddressOps: Copy + Clone + Default + Eq + Ord + PartialOrd + PartialEq {
fn as_mut_ptr<T>(&self) -> *mut T {
self.as_usize() as *mut T
}
@ -352,3 +353,39 @@ impl Hash for VirtAddr {
self.0.hash(state);
}
}
impl Step for PhysAddr {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
if end.0 >= start.0 {
Some(end.0 - start.0)
} else {
None
}
}
fn forward_checked(start: Self, count: usize) -> Option<Self> {
start.as_usize().checked_add(count).map(PhysAddr)
}
fn backward_checked(start: Self, count: usize) -> Option<Self> {
start.as_usize().checked_sub(count).map(PhysAddr)
}
}
impl Step for VirtAddr {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
if end.0 >= start.0 {
Some(end.0 - start.0)
} else {
None
}
}
fn forward_checked(start: Self, count: usize) -> Option<Self> {
start.as_usize().checked_add(count).map(VirtAddr)
}
fn backward_checked(start: Self, count: usize) -> Option<Self> {
start.as_usize().checked_sub(count).map(VirtAddr)
}
}

View File

@ -168,6 +168,7 @@ pub type BitmapAllocator32K = BitmapAllocator<Bitmap32K>;
#[cfg(test)]
mod tests {
use super::*;
use crate::arch::layout::PAGE_LAYOUT;
#[test_case]
fn test_bitmap32() {
@ -261,25 +262,20 @@ mod tests {
// alloc from empty
for i in 0..32 {
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()) };
let ptr = unsafe { allocator.alloc(PAGE_LAYOUT) };
assert_eq!(PhysAddr::from(ptr).as_usize(), 0x42 + i * PAGE_SIZE);
}
// dealloc
for i in 0..16 {
unsafe {
allocator.dealloc(
PhysAddr(0x42 + i * 2 * PAGE_SIZE).as_mut_ptr(),
Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap(),
);
allocator.dealloc(PhysAddr(0x42 + i * 2 * PAGE_SIZE).as_mut_ptr(), PAGE_LAYOUT);
}
}
// predictable alloc from dealloc pattern
for i in 0..16 {
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()) };
let ptr = unsafe { allocator.alloc(PAGE_LAYOUT) };
assert_eq!(PhysAddr::from(ptr).as_usize(), 0x42 + i * 2 * PAGE_SIZE);
}
}

View File

@ -3,7 +3,6 @@
use crate::utils::then::Then;
use crate::vspace::addr::{AddressOps, PhysAddr};
use core::alloc::{GlobalAlloc, Layout};
use core::cmp::{max, min};
use core::fmt::Debug;
use spin::Mutex;
@ -175,36 +174,33 @@ unsafe impl GlobalAlloc for FreeListAllocator {
#[cfg(test)]
mod tests {
use super::*;
use crate::arch::layout::PAGE_SIZE;
use crate::arch::layout::{PAGE_LAYOUT, PAGE_SIZE};
#[test_case]
// TODO: freelist tests are broken since it requires real free memory to work, ignore for now
// #[test_case]
fn test_freelist() {
const BASE: PhysAddr = PhysAddr(0x80300000);
let allocator = FreeListAllocator::new(BASE, 32 * PAGE_SIZE);
for i in 0..32 {
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()) };
let ptr = unsafe { allocator.alloc(PAGE_LAYOUT) };
assert_eq!(ptr as usize, (BASE + i * PAGE_SIZE).as_usize());
}
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()) };
let ptr = unsafe { allocator.alloc(PAGE_LAYOUT) };
assert_eq!(ptr, core::ptr::null_mut());
for i in (0..32).rev() {
let ptr = (BASE + i * PAGE_SIZE).as_mut_ptr();
unsafe {
allocator.dealloc(ptr, Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap())
};
unsafe { allocator.dealloc(ptr, PAGE_LAYOUT) };
}
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()) };
let ptr = unsafe { allocator.alloc(PAGE_LAYOUT) };
assert_eq!(ptr as usize, BASE.as_usize());
}
#[test_case]
// #[test_case]
fn test_freelist_reserve() {
const BASE: PhysAddr = PhysAddr(0x80300000);
@ -216,8 +212,7 @@ mod tests {
let mut cnt = 32 - 4;
loop {
let ptr =
unsafe { allocator.alloc(Layout::from_size_align(PAGE_SIZE, PAGE_SIZE).unwrap()) };
let ptr = unsafe { allocator.alloc(PAGE_LAYOUT) };
if ptr.is_null() {
assert_eq!(cnt, 0);
break;

View File

@ -4,11 +4,13 @@ use core::fmt::Debug;
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, FromPrimitive, ToPrimitive)]
pub enum TableLevel {
Level0 = 0,
Level1 = 1,
Level2 = 2,
Level3 = 3,
Level4 = 4,
Level0 = 0, // KiloPage
Level1 = 1, // MegaPage
Level2 = 2, // GigaPage
#[cfg(not(feature = "legacy"))]
Level3 = 3, // TeraPage
#[cfg(not(feature = "legacy"))]
Level4 = 4, // PetaPage
}
impl TableLevel {
@ -17,7 +19,9 @@ impl TableLevel {
Self::Level0 => None,
Self::Level1 => Some(Self::Level0),
Self::Level2 => Some(Self::Level1),
#[cfg(not(feature = "legacy"))]
Self::Level3 => Some(Self::Level2),
#[cfg(not(feature = "legacy"))]
Self::Level4 => Some(Self::Level3),
}
}
@ -35,7 +39,7 @@ pub trait TableOps: Debug {
type Entry: EntryOps;
const MAX_PAGE_SIZE: TableLevel;
unsafe fn new(location: PhysAddr) -> &'static mut Self;
unsafe fn new(location: VirtAddr) -> &'static mut Self;
fn map(&mut self, from: VirtAddr, to: PhysAddr, attr: MapAttr, level: TableLevel)
-> PageResult;