Compare commits

..

6 Commits

18 changed files with 314 additions and 181 deletions

View File

@ -10,6 +10,7 @@ jobs:
build:
strategy:
matrix:
mode: [ debug, release ]
include:
- arch: riscv64
qemu-pkg: qemu-system-misc
@ -36,10 +37,10 @@ jobs:
components: clippy, rust-src
- name: Run Clippy
run: make clippy ARCH=${{ matrix.arch }}
run: make clippy ARCH=${{ matrix.arch }} MODE=${{ matrix.mode }}
- name: Test
run: make test ARCH=${{ matrix.arch }}
run: make test ARCH=${{ matrix.arch }} MODE=${{ matrix.mode }}
- name: Build
run: make kernel ARCH=${{ matrix.arch }}
run: make kernel ARCH=${{ matrix.arch }} MODE=${{ matrix.mode }}

9
Cargo.lock generated
View File

@ -106,6 +106,7 @@ dependencies = [
"sbi-rt",
"spin",
"static_assertions",
"tracer",
"uapi",
"uart_16550",
"utils",
@ -246,6 +247,14 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "tracer"
version = "0.1.0"
dependencies = [
"quote",
"syn",
]
[[package]]
name = "uapi"
version = "0.1.0"

View File

@ -1,4 +1,5 @@
ARCH ?= riscv64
MODE ?= debug
ifeq ($(ARCH), riscv64)
BUILD_TARGET := riscv64imac-unknown-none-elf
@ -7,6 +8,12 @@ else
$(error Invalid TARGET: $(ARCH))
endif
ifeq ($(MODE), release)
CARGO_BUILD_ARGS += --release
else ifneq ($(MODE), debug)
$(error Invalid MODE: $(MODE))
endif
RUSTFLAGS += -Crelocation-model=static
RUSTFLAGS += -Ccode-model=medium
#RUSTFLAGS += -Ctarget-feature=+relax
@ -19,6 +26,13 @@ CARGO_BUILD_ARGS += -Zbuild-std=core,compiler_builtins,alloc
CARGO_BUILD_ARGS += -Zbuild-std-features=compiler-builtins-mem
CARGO_BUILD_ARGS += $(CARGO_TARGET_ARGS)
QEMU = qemu-system-$(ARCH)
QEMU_ARGS = -nographic -serial mon:stdio -smp 1
QEMU_ARGS += -machine virt # TODO: override by $(BOARD)
QEMU_ARGS += -kernel target/$(BUILD_TARGET)/$(MODE)/kernel
QEMU_ARGS += -initrd build/init.cpio
kernel:
env RUSTFLAGS="$(RUSTFLAGS)" cargo build --bin kernel $(CARGO_BUILD_ARGS)
@ -36,7 +50,7 @@ kernel-test-dump:
@env RUSTFLAGS="$(RUSTFLAGS)" cargo test $(CARGO_TARGET_ARGS) --bin=kernel --no-run --message-format=json
kernel-asm: kernel
riscv64-elf-objdump -d target/$(BUILD_TARGET)/debug/kernel | c++filt -t > kernel.asm
riscv64-elf-objdump -d target/$(BUILD_TARGET)/$(MODE)/kernel | c++filt -t > kernel.asm
build-target:
@echo $(BUILD_TARGET)
@ -45,32 +59,9 @@ clean:
cargo clean
qemu: kernel
qemu-system-$(ARCH) \
-nographic -machine virt -serial mon:stdio -smp 1 \
-kernel target/$(BUILD_TARGET)/debug/kernel
$(QEMU) $(QEMU_ARGS)
qemu-gdb: kernel
qemu-system-$(ARCH) \
-nographic -machine virt -serial mon:stdio -smp 1 \
-kernel target/$(BUILD_TARGET)/debug/kernel \
-s -S
$(QEMU) $(QEMU_ARGS) -s -S
replay.qcow2:
qemu-img create -f qcow2 replay.qcow2 1G
qemu-record: kernel replay.qcow2
qemu-system-$(ARCH) \
-nographic -machine virt -serial mon:stdio -smp 1 \
-kernel target/$(BUILD_TARGET)/debug/kernel \
-icount shift=auto,rr=record,rrfile=replay.bin,rrsnapshot=init \
-drive file=replay.qcow2,if=none,id=rr
qemu-replay: kernel replay.qcow2
qemu-system-$(ARCH) \
-nographic -machine virt -serial mon:stdio -smp 1 \
-kernel target/$(BUILD_TARGET)/debug/kernel \
-icount shift=auto,rr=replay,rrfile=replay.bin,rrsnapshot=init \
-drive file=replay.qcow2,if=none,id=rr \
-s -S
.PHONY: kernel clippy test kernel-test-dump kernel-asm build-target clean qemu qemu-gdb qemu-record qemu-replay
.PHONY: kernel clippy test kernel-test-dump kernel-asm build-target clean qemu qemu-gdb

View File

@ -1,10 +1,10 @@
use super::super::lowlevel::{ArchLL, ArchLLOps};
use super::TEST_DEVICE;
use crate::arch::layout::mmap_phys_to_virt;
use crate::arch::vspace::is_kernel_pagetable_installed;
use crate::arch::vspace::is_kernel_pagetable_allocated;
unsafe fn get_qemu_test_device() -> *mut u32 {
if is_kernel_pagetable_installed() {
if is_kernel_pagetable_allocated() {
mmap_phys_to_virt(TEST_DEVICE.into()).into()
} else {
TEST_DEVICE

View File

@ -1,3 +1,4 @@
use super::vspace::install_kernel_pagetable;
use crate::arch::init_early_console;
use crate::arch::layout::{mmap_phys_to_virt, zero_bss};
use crate::arch::vspace::{setup_kernel_paging, setup_memory};
@ -71,6 +72,8 @@ unsafe fn pre_main(hart_id: usize, fdt_addr: usize) {
setup_memory(fdt_addr);
setup_kernel_paging();
// TODO: on secondary cpu, we should copy existing kernel page table and TSS, then remap TSS
install_kernel_pagetable();
// after kernel paging, board level early console is broken (no address mapping)
mute_console();

View File

@ -0,0 +1,135 @@
use super::{alloc_page, install_pagetable, map_range, RAM_ALLOCATOR};
use crate::{arch::layout::*, vspace::*};
use log::{debug, info};
use spin::Mutex;
use utils::{addr::*, size::*};
#[cfg(debug_assertions)]
use super::ALLOC_COUNT;
#[thread_local]
static KERNEL_PAGETABLE: Mutex<Option<Table<Level0>>> = Mutex::new(None);
pub const KERNEL_PAGETABLE_SIZE: usize = 3;
pub unsafe fn setup_memory(fdt_addr: usize) {
info!("Setting up memory");
let fdt = unsafe { fdt::Fdt::from_ptr(fdt_addr as *const u8).unwrap() };
let mut mem = RAM_ALLOCATOR.lock();
// Add main memory regions to allocator
for region in fdt.memory().regions() {
debug!(
"Adding free memory:\tstart: {:x?}, size: {:x?}",
region.starting_address, region.size
);
mem.dealloc(
PhysAddr::from(region.starting_address),
align_up(region.size.unwrap(), PAGE_SIZE),
);
}
// Exclude memory occupied by UEFI
for region in fdt.memory_reservations() {
debug!(
"Reserving memory:\tstart: {:x?}, size: {:x?}",
region.address(),
region.size()
);
mem.reserve(PhysAddr::from(region.address()), align_up(region.size(), PAGE_SIZE));
}
// Exclude memory occupied by OpenSBI
let reserved_memory = fdt
.find_node("/reserved-memory")
.map(|node| node.children())
.expect("No reserved memory found in device tree");
for child in reserved_memory {
let region = child.reg().unwrap().next().unwrap();
debug!(
"Reserving memory:\tstart: {:x?}, size: {:x?}",
region.starting_address, region.size
);
mem.reserve(
PhysAddr::from(region.starting_address),
align_up(region.size.unwrap(), PAGE_SIZE),
);
}
// Exclude kernel memory
let kernel_start = kernel_virt_to_phys(KERNEL_START.as_virt_addr());
let kernel_end = kernel_virt_to_phys(KERNEL_END.as_virt_addr()).align_up(PAGE_SIZE);
debug!("Reserving kernel memory:\tstart: {:x?}, end: {:x?}", kernel_start, kernel_end);
mem.reserve(kernel_start, (kernel_end - kernel_start).as_usize());
// Exclude FDT table
let fdt_addr = PhysAddr::from(fdt_addr);
let fdt_size = align_up(fdt.total_size(), PAGE_SIZE);
debug!("Reserving FDT memory:\tstart: {:x?}, size: {:x?}", fdt_addr, fdt_size);
mem.reserve(fdt_addr, fdt_size);
}
pub unsafe fn setup_kernel_paging() {
info!("Setting up kernel paging");
assert!(!is_kernel_pagetable_allocated(), "Kernel pagetable already allocated");
let root_pt = alloc_page();
let mut kernel_pt = Table::<Level0>::new(root_pt.as_usize().into());
macro_rules! map_section {
(($($section:ident),+),$attr:expr) => {
$(
let start = concat_idents!($section, _START).as_virt_addr().align_down(PAGE_SIZE);
let end = concat_idents!($section, _END).as_virt_addr().align_up(PAGE_SIZE);
debug!("Mapping section {}:\t[{:?}, {:?}]", stringify!($section), start, end);
map_range(&mut kernel_pt, start, kernel_virt_to_phys(start), (end - start).as_usize(), $attr);
)+
};
}
map_section!((TEXT), MapAttr::READABLE | MapAttr::EXECUTABLE);
map_section!((RODATA), MapAttr::READABLE);
map_section!((DATA, BSS), MapAttr::READABLE | MapAttr::WRITABLE);
// TODO: every core must have a separate TSS section
map_section!((TSS), MapAttr::READABLE | MapAttr::WRITABLE);
// map 4 GiB physical memory
// TODO: walk fdt to get all memory region? put it in driver init
map_range(
&mut kernel_pt,
mmap_phys_to_virt(PhysAddr(0)),
PhysAddr(0),
4 * GIB, // NOTE: will fail on 32-bit system
MapAttr::READABLE | MapAttr::WRITABLE,
);
#[cfg(debug_assertions)]
{
let count = ALLOC_COUNT.load(core::sync::atomic::Ordering::Acquire);
log::trace!("Kernel page table size: {:?}", count);
assert!(KERNEL_PAGETABLE_SIZE == count, "Kernel page table size mismatch");
}
*KERNEL_PAGETABLE.lock() = Some(kernel_pt);
}
pub unsafe fn install_kernel_pagetable() {
info!("Setting up new kernel pagetable");
let kernel_pt = KERNEL_PAGETABLE.lock();
let kernel_pt = kernel_pt.as_ref().expect("No kernel pagetable found");
install_pagetable(kernel_pt)
}
#[inline]
pub fn is_kernel_pagetable_allocated() -> bool {
KERNEL_PAGETABLE.lock().is_some()
}
unsafe fn map_kernel(from: VirtAddr, to: PhysAddr, size: usize, attr: MapAttr) {
let mut guard = KERNEL_PAGETABLE.lock();
let pt = guard.as_mut().unwrap();
map_range(pt, from, to, size, attr);
}

View File

@ -1,7 +1,9 @@
mod addr;
mod entry;
mod kmem;
mod level;
mod table;
mod utils;
pub use kmem::*;
pub use utils::*;

View File

@ -1,75 +1,23 @@
use core::panic;
use crate::{arch::layout::*, vspace::*};
use allocator::RamBlock;
use log::{debug, info};
use core::panic;
use log::debug;
use spin::Mutex;
use utils::{addr::*, size::GIB};
use utils::addr::*;
#[thread_local]
static KERNEL_PAGETABLE: Mutex<Option<Table<Level0>>> = Mutex::new(None);
pub static RAM_ALLOCATOR: Mutex<RamBlock<8>> = Mutex::new(RamBlock::new());
pub static KERNEL_ALLOCATOR: Mutex<RamBlock<8>> = Mutex::new(RamBlock::new());
#[cfg(debug_assertions)]
pub static ALLOC_COUNT: core::sync::atomic::AtomicUsize = core::sync::atomic::AtomicUsize::new(0);
pub unsafe fn setup_memory(fdt_addr: usize) {
info!("Setting up memory");
let fdt = unsafe { fdt::Fdt::from_ptr(fdt_addr as *const u8).unwrap() };
let mut mem = KERNEL_ALLOCATOR.lock();
// Add main memory regions to allocator
for region in fdt.memory().regions() {
debug!(
"Adding free memory:\tstart: {:x?}, size: {:x?}",
region.starting_address, region.size
);
mem.dealloc(
PhysAddr::from(region.starting_address),
align_up(region.size.unwrap(), PAGE_SIZE),
);
#[cfg(debug_assertions)]
fn alloc_callback() {
let cnt = ALLOC_COUNT.fetch_add(1, core::sync::atomic::Ordering::SeqCst);
}
// Exclude memory occupied by UEFI
for region in fdt.memory_reservations() {
debug!(
"Reserving memory:\tstart: {:x?}, size: {:x?}",
region.address(),
region.size()
);
mem.reserve(PhysAddr::from(region.address()), align_up(region.size(), PAGE_SIZE));
}
// Exclude memory occupied by OpenSBI
let reserved_memory = fdt
.find_node("/reserved-memory")
.map(|node| node.children())
.expect("No reserved memory found in device tree");
for child in reserved_memory {
let region = child.reg().unwrap().next().unwrap();
debug!(
"Reserving memory:\tstart: {:x?}, size: {:x?}",
region.starting_address, region.size
);
mem.reserve(
PhysAddr::from(region.starting_address),
align_up(region.size.unwrap(), PAGE_SIZE),
);
}
// Exclude kernel memory
let kernel_start = kernel_virt_to_phys(KERNEL_START.as_virt_addr());
let kernel_end = kernel_virt_to_phys(KERNEL_END.as_virt_addr()).align_up(PAGE_SIZE);
debug!("Reserving kernel memory:\tstart: {:x?}, end: {:x?}", kernel_start, kernel_end);
mem.reserve(kernel_start, (kernel_end - kernel_start).as_usize());
// Exclude FDT table
let fdt_addr = PhysAddr::from(fdt_addr);
let fdt_size = align_up(fdt.total_size(), PAGE_SIZE);
debug!("Reserving FDT memory:\tstart: {:x?}, size: {:x?}", fdt_addr, fdt_size);
mem.reserve(fdt_addr, fdt_size);
}
fn alloc_page() -> PhysAddr {
let addr = KERNEL_ALLOCATOR.lock().alloc(PAGE_LAYOUT).expect("Failed to allocate page");
#[cfg_attr(debug_assertions, tracer::trace_callback(log = true, callback = alloc_callback))]
pub fn alloc_page() -> PhysAddr {
let addr = RAM_ALLOCATOR.lock().alloc(PAGE_LAYOUT).expect("Failed to allocate page");
unsafe {
// zero page
@ -80,7 +28,7 @@ fn alloc_page() -> PhysAddr {
addr
}
fn map_range(pt: &mut Table<Level0>, from: VirtAddr, to: PhysAddr, size: usize, attr: MapAttr) {
pub fn map_range(pt: &mut Table<Level0>, from: VirtAddr, to: PhysAddr, size: usize, attr: MapAttr) {
let mut virt_start = from;
let mut phys_start = to;
let virt_end = from + size;
@ -132,18 +80,7 @@ fn map_range(pt: &mut Table<Level0>, from: VirtAddr, to: PhysAddr, size: usize,
}
}
pub unsafe fn map(from: VirtAddr, to: PhysAddr, size: usize, attr: MapAttr) {
let mut guard = KERNEL_PAGETABLE.lock();
let pt = guard.as_mut().unwrap();
map_range(pt, from, to, size, attr);
}
#[inline]
pub fn is_kernel_pagetable_installed() -> bool {
KERNEL_PAGETABLE.lock().is_some()
}
pub fn page_table_mode() -> riscv::register::satp::Mode {
fn page_table_mode() -> riscv::register::satp::Mode {
#[cfg(feature = "riscv.pagetable.sv39")]
return riscv::register::satp::Mode::Sv39;
#[cfg(feature = "riscv.pagetable.sv48")]
@ -152,46 +89,9 @@ pub fn page_table_mode() -> riscv::register::satp::Mode {
return riscv::register::satp::Mode::Sv57;
}
pub unsafe fn setup_kernel_paging() {
info!("Setting up kernel paging");
assert!(!is_kernel_pagetable_installed(), "Kernel pagetable already installed");
pub unsafe fn install_pagetable(pt: &Table<Level0>) {
let root_pt = PhysAddr::from(pt.entries as *const _ as *const usize);
let root_pt = alloc_page();
let mut kernel_pt = Table::<Level0>::new(root_pt.as_usize().into());
macro_rules! map_section {
(($($section:ident),+),$attr:expr) => {
$(
let start = concat_idents!($section, _START).as_virt_addr().align_down(PAGE_SIZE);
let end = concat_idents!($section, _END).as_virt_addr().align_up(PAGE_SIZE);
debug!("Mapping section {}:\t[{:?}, {:?}]", stringify!($section), start, end);
map_range(&mut kernel_pt, start, kernel_virt_to_phys(start), (end - start).as_usize(), $attr);
)+
};
}
map_section!((TEXT), MapAttr::READABLE | MapAttr::EXECUTABLE);
map_section!((RODATA), MapAttr::READABLE);
map_section!((DATA, BSS), MapAttr::READABLE | MapAttr::WRITABLE);
// TODO: every core must have a separate TSS section
map_section!((TSS), MapAttr::READABLE | MapAttr::WRITABLE);
// map 4 GiB physical memory
// TODO: walk fdt to get all memory region? put it in driver init
map_range(
&mut kernel_pt,
mmap_phys_to_virt(PhysAddr(0)),
PhysAddr(0),
4 * GIB, // NOTE: will fail on 32-bit system
MapAttr::READABLE | MapAttr::WRITABLE,
);
// setup new pagetable
debug!("Setting up new kernel pagetable");
riscv::register::satp::set(page_table_mode(), 0, root_pt.extract_ppn());
riscv::asm::sfence_vma_all();
// switch to virtual address
*KERNEL_PAGETABLE.lock() = Some(kernel_pt);
}

View File

@ -137,7 +137,7 @@ impl<'a> CNodeCap<'a> {
#[cfg(test)]
mod tests {
use super::*;
use crate::arch::vspace::KERNEL_ALLOCATOR;
use crate::arch::vspace::RAM_ALLOCATOR;
use crate::objects::cap::CapEntry;
use crate::objects::null::NullCap;
use core::alloc::Layout;
@ -148,7 +148,7 @@ mod tests {
let create_cnode = |radix: usize, guard_size: usize, guard: usize| {
let size = ObjectType::CNode.size(radix);
let layout = Layout::from_size_align(size, 1).unwrap();
let ptr = KERNEL_ALLOCATOR.lock().alloc(layout).unwrap();
let ptr = RAM_ALLOCATOR.lock().alloc(layout).unwrap();
let raw = CNodeCap::mint(radix, guard_size, guard, ptr);
CapEntry::new(raw)
};

View File

@ -1,5 +1,9 @@
use super::{cap::RawCap, Cap, KernelObject};
use crate::{arch::layout::mmap_phys_to_virt, objects::cap::CapEntry, vspace::*};
use crate::{
arch::layout::{mmap_phys_to_virt, PAGE_SIZE},
objects::cap::CapEntry,
vspace::*,
};
use uapi::{
cap::ObjectType,
error::{SysError, SysResult},
@ -50,8 +54,7 @@ impl<'a> FrameCap<'a> {
pub fn mint(ptr: PhysAddr, size: usize, attr: MapAttr, is_device: bool) -> RawCap {
let size_bits = size.ilog2() as usize;
debug_assert!(size_bits <= FrameCap::FRAME_SIZE_BITS);
// NOTE: we are not checking frame size
assert!(size >= PAGE_SIZE);
let arg0 = 0
| ((attr.bits() & Self::VM_RIGHT_MASK) << Self::VM_RIGHT_OFFSET)
@ -126,14 +129,10 @@ impl<'a> FrameCap<'a> {
self.as_object_mut().fill(fill.unwrap_or(0));
}
pub fn map_page<T: TableLevel>(&self, root: &mut Table<T>, vaddr: VirtAddr, attr: MapAttr) -> SysResult {
pub fn map<T: TableLevel>(&self, root: &mut Table<T>, vaddr: VirtAddr, attr: MapAttr) -> SysResult {
let masked_attr = attr & self.attr();
root.map(vaddr, self.cte.cap.get().ptr, masked_attr).map_err(|e| match e {
PageError::AlreadyMapped => SysError::AlreadyMapped,
PageError::MissingEntry => SysError::MissingEntry,
PageError::NotAligned => SysError::InvalidArgument,
})?;
root.map(vaddr, self.cte.cap.get().ptr, masked_attr)?;
self.set_mapped_asid(0);
self.set_mapped_vaddr(vaddr);
@ -147,11 +146,13 @@ impl<'a> FrameCap<'a> {
return Err(SysError::NotMapped);
}
match root.lookup_mut(self.mapped_vaddr()) {
match root.lookup_mut(vaddr) {
Some(entry) if entry.is_leaf() && entry.paddr() == self.cte.cap.get().ptr => {
entry.set_paddr(PhysAddr::default());
entry.set_attr(MapAttr::empty());
// todo: sfence.vma
self.set_mapped_asid(0);
self.set_mapped_vaddr(VirtAddr(0));
Ok(())

View File

@ -100,13 +100,8 @@ impl<'a> TableCap<'a> {
array.fill(0);
}
pub fn map_table<T: TableLevel>(&self, root: &mut Table<T>, vaddr: VirtAddr) -> SysResult {
root.map(vaddr, self.cte.cap.get().ptr, MapAttr::PAGE_TABLE)
.map_err(|e| match e {
PageError::AlreadyMapped => SysError::AlreadyMapped,
PageError::MissingEntry => SysError::MissingEntry,
PageError::NotAligned => SysError::InvalidArgument,
})?;
pub fn map<T: TableLevel>(&self, root: &mut Table<T>, vaddr: VirtAddr) -> SysResult {
root.map(vaddr, self.cte.cap.get().ptr, MapAttr::PAGE_TABLE)?;
self.set_mapped_asid(0);
self.set_mapped_vaddr(vaddr);
@ -120,11 +115,13 @@ impl<'a> TableCap<'a> {
return Err(SysError::NotMapped);
}
match root.lookup_mut(self.mapped_vaddr()) {
match root.lookup_mut(vaddr) {
Some(entry) if !entry.is_leaf() && entry.paddr() == self.cte.cap.get().ptr => {
entry.set_paddr(PhysAddr::default());
entry.set_attr(MapAttr::empty());
// todo: sfence.vma
self.set_mapped_asid(0);
self.set_mapped_vaddr(VirtAddr(0));
Ok(())

View File

@ -134,14 +134,14 @@ impl UntypedCap<'_> {
#[cfg(test)]
mod tests {
use super::*;
use crate::{arch::vspace::KERNEL_ALLOCATOR, objects::cap::CapEntry};
use crate::{arch::vspace::RAM_ALLOCATOR, objects::cap::CapEntry};
use core::alloc::Layout;
use log::info;
fn alloc_mem(size: usize) -> PhysAddr {
assert!(size.is_power_of_two());
let layout = Layout::from_size_align(size, 1).unwrap();
KERNEL_ALLOCATOR.lock().alloc(layout).unwrap()
RAM_ALLOCATOR.lock().alloc(layout).unwrap()
}
fn create_untyped_cte(size: usize, is_device: bool) -> CapEntry {

View File

@ -0,0 +1,20 @@
use uapi::error::SysError;
#[derive(Debug)]
pub enum PageError {
AlreadyMapped,
MissingEntry,
NotAligned,
}
pub type PageResult<T = ()> = Result<T, PageError>;
impl From<PageError> for SysError {
fn from(e: PageError) -> Self {
match e {
PageError::AlreadyMapped => SysError::AlreadyMapped,
PageError::MissingEntry => SysError::MissingEntry,
PageError::NotAligned => SysError::InvalidArgument,
}
}
}

View File

@ -1,9 +1,11 @@
mod addr;
mod entry;
mod error;
mod level;
mod table;
pub use addr::*;
pub use entry::*;
pub use error::*;
pub use level::*;
pub use table::*;

View File

@ -1,16 +1,6 @@
use super::{MapAttr, TableLevel};
use core::fmt::Debug;
use super::{MapAttr, PageResult, TableLevel};
use utils::addr::{PhysAddr, VirtAddr};
#[derive(Debug)]
pub enum PageError {
AlreadyMapped,
MissingEntry,
NotAligned,
}
pub type PageResult<T = ()> = Result<T, PageError>;
pub trait TableOps<'a, T: TableLevel> {
/// # Safety
/// `location` must be a page-aligned virtual address and will not be dropped.

View File

@ -151,6 +151,17 @@ impl Entry {
}
}
impl core::fmt::Debug for Entry {
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
f.debug_struct("Entry")
.field("filename", &self.filename())
.field("file_size", &self.file_size())
.field("is_tailer", &self.is_trailer())
.field("is_valid", &self.is_valid())
.finish()
}
}
impl<'a> Reader<'a> {
pub fn new(data: &'a [u8]) -> Self {
Reader { data }

11
lib/tracer/Cargo.toml Normal file
View File

@ -0,0 +1,11 @@
[package]
name = "tracer"
version = "0.1.0"
edition = "2021"
[lib]
proc-macro = true
[dependencies]
syn = { version = "2.0", features = ["full"] }
quote = "1.0"

60
lib/tracer/src/lib.rs Normal file
View File

@ -0,0 +1,60 @@
extern crate proc_macro;
use proc_macro::TokenStream;
use quote::quote;
use syn::{
parse::Parser, parse_macro_input, punctuated::Punctuated, Expr, ExprLit, ExprPath, ItemFn, Lit, MetaNameValue, Token,
};
#[proc_macro_attribute]
pub fn trace_callback(attr: TokenStream, item: TokenStream) -> TokenStream {
let args = Punctuated::<MetaNameValue, Token![,]>::parse_terminated.parse(attr).unwrap();
let mut debug_print = false;
let mut callback = vec![];
for arg in args {
if !arg.path.is_ident("log") && !arg.path.is_ident("callback") {
panic!("Unknown attribute: {:?}.", arg.path.get_ident());
}
if arg.path.is_ident("log") {
if let Expr::Lit(ExprLit { lit: Lit::Bool(b), .. }) = &arg.value {
debug_print = b.value;
} else {
panic!("Unsupported log option provided.");
}
} else if arg.path.is_ident("callback") {
if let Expr::Path(ExprPath { path, .. }) = arg.value {
let ident = path.get_ident().expect("Invalid callback function provided.");
callback.push(ident.clone());
} else {
panic!("Unsupported callback option provided.");
}
}
}
let input = parse_macro_input!(item as ItemFn);
let fn_name = &input.sig.ident;
let fn_block = &input.block;
let fn_vis = &input.vis;
let fn_sig = &input.sig;
let call_callback = callback.iter().map(|ident| {
quote! {
if #debug_print { log::trace!("[tracer][callback] invoking {}", stringify!(#ident)) }
#ident();
}
});
let expanded = quote! {
#fn_vis #fn_sig {
if #debug_print { log::trace!("[tracer] tracing {}", stringify!(#fn_name)) }
{ #(#call_callback)* }
if #debug_print { log::trace!("[tracer][function] invoking {}", stringify!(#fn_name)) }
(|| #fn_block)()
}
};
TokenStream::from(expanded)
}