Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: kernel virtmem cont #189

Merged
merged 42 commits into from
Dec 29, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
208e3b0
fix(wavltree): return tree lifetimes from `Cursor`
JonasKruckenberg Dec 18, 2024
c1e2e78
feat(loader): report stacks & TLS region and online hart mask
JonasKruckenberg Dec 18, 2024
f3baa84
wip
JonasKruckenberg Dec 18, 2024
00d08e2
Merge branch 'main' into jonas/feat/kernel-virtmem
JonasKruckenberg Dec 18, 2024
0f7372e
fmt & clippy
JonasKruckenberg Dec 18, 2024
2b5a77b
Update aspace.rs
JonasKruckenberg Dec 18, 2024
e1685b6
refactor: properly setup address space range constants
JonasKruckenberg Dec 19, 2024
ac64c0f
fix tests
JonasKruckenberg Dec 19, 2024
a254b3f
fmt
JonasKruckenberg Dec 19, 2024
e83c1ca
fix: fix tests by identity mapping RTC driver
JonasKruckenberg Dec 19, 2024
5d4c116
fmt
JonasKruckenberg Dec 19, 2024
5fb8160
wip
JonasKruckenberg Dec 19, 2024
1768a22
refactor: rename `pmm` to `mmu`
JonasKruckenberg Dec 19, 2024
04c2a8f
refactor(loader): report physical address offset and physical memory …
JonasKruckenberg Dec 22, 2024
d7b3564
fix(mmu): report error if Flush is dropped with unflushed changes
JonasKruckenberg Dec 22, 2024
c299420
wip
JonasKruckenberg Dec 22, 2024
e6417ea
refactor(mmu): allow any `FramesIterator` implementation
JonasKruckenberg Dec 22, 2024
f7d4abc
fix(dtb-parser): implement `Iterator` for `Strings`
JonasKruckenberg Dec 22, 2024
7fcd97b
refactor: don't explicitly map RTC device
JonasKruckenberg Dec 22, 2024
f7d2deb
chore(mmu): better printing of addresses
JonasKruckenberg Dec 22, 2024
55cbfe1
fix(wavltree): return `Pin<&mut T>` from `insert`
JonasKruckenberg Dec 22, 2024
d91e4da
fix(mmu): correct pretty printing of addresses
JonasKruckenberg Dec 25, 2024
ab95d6e
wip
JonasKruckenberg Dec 25, 2024
dfe403b
fmt & fixes
JonasKruckenberg Dec 26, 2024
5c2feec
fix: expand initial heap for elf processing
JonasKruckenberg Dec 26, 2024
d3ca827
fix startup
JonasKruckenberg Dec 26, 2024
a21a111
fix: print stack trace on panic
JonasKruckenberg Dec 26, 2024
e4c6580
feat: support short backtrace
JonasKruckenberg Dec 26, 2024
119d3bf
fix tests
JonasKruckenberg Dec 26, 2024
240e2af
don't panic in drop
JonasKruckenberg Dec 26, 2024
86a216b
fix(kernel/vm): don't panic when virt allocation range is empty
JonasKruckenberg Dec 26, 2024
e8e2ece
fmt
JonasKruckenberg Dec 26, 2024
188ea02
fix(loader): prevent frame allocator from handing out loader memory
JonasKruckenberg Dec 28, 2024
b66b360
test: add tests for `mmu` crate
JonasKruckenberg Dec 28, 2024
22d9609
fix: better address arithmetic
JonasKruckenberg Dec 28, 2024
ca9f574
fix(mmu): correct `align_down` implementation
JonasKruckenberg Dec 28, 2024
2f999ab
tests(mmu): fix tests
JonasKruckenberg Dec 28, 2024
13c85c3
fix(kernel/vm): return gap size zero instead of panic
JonasKruckenberg Dec 28, 2024
485e14b
Merge branch 'main' into jonas/feat/kernel-virtmem
JonasKruckenberg Dec 29, 2024
f53a165
enable debug assertions in release mode
JonasKruckenberg Dec 29, 2024
3d13f23
fix: increase QEMU physical memory for tests
JonasKruckenberg Dec 29, 2024
3a0c09d
clippy & fmt
JonasKruckenberg Dec 29, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
fix(mmu): correct align_down implementation
The previously used `align_down` implementation was incorrect as it would cause previously aligned addresses (where the offset was 0) to become unaligned. This patch fixes both this behaviour and makes both `align_down` and `align_up` more efficient
  • Loading branch information
JonasKruckenberg committed Dec 28, 2024
commit ca9f574954c6242a7ee3b135757c0c86b36bb9e0
2 changes: 1 addition & 1 deletion kernel/src/vm/aspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ impl AddressSpace {
if flags.contains(PageFaultFlags::ACCESS) {
return self.access_fault(virt);
}
let virt = virt.checked_align_down(arch::PAGE_SIZE).unwrap();
let virt = virt.align_down(arch::PAGE_SIZE);

// check if the address is within the last fault range
// if so, we can save ourselves a tree lookup
Expand Down
2 changes: 1 addition & 1 deletion kernel/src/vm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ fn reserve_wired_regions(
);

aspace.reserve(
virt.checked_align_down(arch::PAGE_SIZE).unwrap()
virt.align_down(arch::PAGE_SIZE)
..virt
.checked_add(ph.mem_size() as usize)
.unwrap()
Expand Down
111 changes: 37 additions & 74 deletions libs/mmu/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -185,62 +185,48 @@ macro_rules! address_impl {
}

// SAFETY: `align` has been checked to be a power of 2 above
let offset = unsafe { align_offset(self.0, align) };

let Some(out) = self.checked_add(offset) else {
return None;
};
debug_assert!(out.is_aligned_to(align));
Some(out)
}

#[must_use]
#[inline]
pub const fn checked_align_down(self, align: usize) -> Option<Self> {
if !align.is_power_of_two() {
panic!("checked_align_up: align is not a power-of-two");
let align_minus_one = unsafe { align.unchecked_sub(1) };

// addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align)
if let Some(addr_plus_align) = self.0.checked_add(align_minus_one) {
let aligned = Self(addr_plus_align & 0usize.wrapping_sub(align));
debug_assert!(aligned.is_aligned_to(align));
debug_assert!(aligned.0 >= self.0);
Some(aligned)
} else {
None
}

// SAFETY: `align` has been checked to be a power of 2 above
let offset = align - unsafe { align_offset(self.0, align) };

let Some(out) = self.checked_sub(offset) else {
return None;
};
debug_assert!(out.is_aligned_to(align));
Some(out)
}

// #[must_use]
// #[inline]
// pub const fn saturating_align_up(self, align: usize) -> Self {
// if !align.is_power_of_two() {
// panic!("checked_align_up: align is not a power-of-two");
// }
//
// // SAFETY: `align` has been checked to be a power of 2 above
// let offset = unsafe { align_offset(self.0, align) };
//
// let out = self.saturating_add(offset);
// debug_assert!(out.is_aligned_to(align));
// out
// }
//
// #[must_use]
// #[inline]
// pub const fn saturating_align_down(self, align: usize) -> Self {
// pub const fn wrapping_align_up(self, align: usize) -> Self {
// if !align.is_power_of_two() {
// panic!("checked_align_up: align is not a power-of-two");
// }
//
// // SAFETY: `align` has been checked to be a power of 2 above
// let offset = align - unsafe { align_offset(self.0, align) };
// let align_minus_one = unsafe { align.unchecked_sub(1) };
//
// let out = self.saturating_sub(offset);
// // addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align)
// let out = addr.wrapping_add(align_minus_one) & 0usize.wrapping_sub(align);
// debug_assert!(out.is_aligned_to(align));
// out
// }

#[must_use]
#[inline]
pub const fn align_down(self, align: usize) -> Self {
if !align.is_power_of_two() {
panic!("checked_align_up: align is not a power-of-two");
}

let aligned = Self(self.0 & 0usize.wrapping_sub(align));
debug_assert!(aligned.is_aligned_to(align));
debug_assert!(aligned.0 <= self.0);
aligned
}

#[inline]
pub const fn as_ptr(self) -> *const u8 {
self.0 as *const u8
Expand Down Expand Up @@ -302,13 +288,17 @@ macro_rules! address_range_impl {
where
Self: Sized,
{
Some(self.start.checked_align_up(align)?..self.end.checked_align_down(align)?)
let res = self.start.checked_align_up(align)?..self.end.align_down(align);
Some(res)
}
fn checked_align_out(self, align: usize) -> Option<Self>
where
Self: Sized,
{
Some(self.start.checked_align_down(align)?..self.end.checked_align_up(align)?)
let res = self.start.align_down(align)..self.end.checked_align_up(align)?;
// aligning outwards can only increase the size
debug_assert!(res.start.0 <= res.end.0);
Some(res)
}
// fn saturating_align_in(self, align: usize) -> Self {
// self.start.saturating_align_up(align)..self.end.saturating_align_down(align)
Expand Down Expand Up @@ -415,37 +405,10 @@ impl AddressRangeExt for Range<VirtualAddress> {
}
}

const unsafe fn align_offset(addr: usize, a: usize) -> usize {
// SAFETY: `a` is a power-of-two, therefore non-zero.
let a_minus_one = unsafe { a.unchecked_sub(1) };

// SPECIAL_CASE: In cases where the `a` is divisible by `STRIDE`, byte offset to align a
// pointer can be computed more simply through `-p (mod a)`. In the off-chance the byte
// offset is not a multiple of `STRIDE`, the input pointer was misaligned and no pointer
// offset will be able to produce a `p` aligned to the specified `a`.
//
// The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions
// like `lea`. We compute `(round_up_to_next_alignment(p, a) - p)` instead. This
// redistributes operations around the load-bearing, but pessimizing `and` instruction
// sufficiently for LLVM to be able to utilize the various optimizations it knows about.
//
// LLVM handles the branch here particularly nicely. If this branch needs to be evaluated
// at runtime, it will produce a mask `if addr_mod_stride == 0 { 0 } else { usize::MAX }`
// in a branch-free way and then bitwise-OR it with whatever result the `-p mod a`
// computation produces.

let aligned_address = addr.wrapping_add(a_minus_one) & 0usize.wrapping_sub(a);
let byte_offset = aligned_address.wrapping_sub(addr);
// FIXME: Remove the assume after <https://github.com/llvm/llvm-project/issues/62502>
// SAFETY: Masking by `-a` can only affect the low bits, and thus cannot have reduced
// the value by more than `a-1`, so even though the intermediate values might have
// wrapped, the byte_offset is always in `[0, a)`.
unsafe {
core::hint::assert_unchecked(byte_offset < a);
}

byte_offset
}
static_assertions::const_assert!(VirtualAddress(0xffffffc000000000).is_aligned_to(4096));
static_assertions::const_assert_eq!(VirtualAddress(0xffffffc0000156e8).align_down(4096).0, 0xffffffc000015000);
static_assertions::const_assert_eq!(VirtualAddress(0xffffffc000000000).checked_align_up(4096).unwrap().0, 0xffffffc000000000);
static_assertions::const_assert_eq!(VirtualAddress(0xffffffc0000156e8).checked_align_up(4096).unwrap().0, 0xffffffc000016000);

#[cfg(test)]
mod tests {
Expand Down
2 changes: 1 addition & 1 deletion loader/src/machine_info.rs
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ impl<'dt> MachineInfo<'dt> {
// but we can't make use of it either way
info.memories.iter_mut().for_each(|region| {
region.start = region.start.checked_align_up(arch::PAGE_SIZE).unwrap();
region.end = region.end.checked_align_down(arch::PAGE_SIZE).unwrap();
region.end = region.end.align_down(arch::PAGE_SIZE);
});

// ensure the memory regions are sorted.
Expand Down
10 changes: 4 additions & 6 deletions loader/src/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -309,13 +309,11 @@ fn handle_bss_section(
let last_page = virt_start
.checked_add(ph.file_size - 1)
.unwrap()
.checked_align_down(ph.align)
.unwrap();
.align_down(ph.align);
let last_frame = phys_base
.checked_add(ph.offset + ph.file_size - 1)
.unwrap()
.checked_align_down(ph.align)
.unwrap();
.align_down(ph.align);

let new_frame = frame_alloc
.allocate_contiguous_zeroed(
Expand Down Expand Up @@ -489,8 +487,8 @@ fn handle_relro_segment(
};

let virt_aligned = {
virt.start.checked_align_down(arch::PAGE_SIZE).unwrap()
..virt.end.checked_align_down(arch::PAGE_SIZE).unwrap()
virt.start.align_down(arch::PAGE_SIZE)
..virt.end.align_down(arch::PAGE_SIZE)
};

log::debug!("Marking RELRO segment {virt_aligned:?} as read-only");
Expand Down