Skip to content

Commit

Permalink
use local memory_addr for memory_set, update memory_set, move g…
Browse files Browse the repository at this point in the history
…eneric params of mapping structures into `MappingBackEnd` as associated types
  • Loading branch information
aarkegz committed Aug 20, 2024
1 parent 41b0864 commit 7e1cf44
Show file tree
Hide file tree
Showing 7 changed files with 172 additions and 120 deletions.
4 changes: 1 addition & 3 deletions memory_set/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,4 @@ repository.workspace = true
categories.workspace = true

[dependencies]
memory_addr = "0.2"
# todo: update it to local one once it gets adapted to the current version
# memory_addr = { path = "../memory_addr", version = "0.2" }
memory_addr = { path = "../memory_addr", version = "0.3.0-dev" }
8 changes: 6 additions & 2 deletions memory_set/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ type MockPageTable = [MockFlags; MAX_ADDR];
struct MockBackend;

let mut pt = [0; MAX_ADDR];
let mut memory_set = MemorySet::<MockFlags, MockPageTable, MockBackend>::new();
let mut memory_set = MemorySet::<MockBackend>::new();

// Map [0x1000..0x5000).
memory_set.map(
Expand All @@ -46,7 +46,11 @@ assert_eq!(areas[0].va_range(), va_range!(0x1000..0x2000));
assert_eq!(areas[1].va_range(), va_range!(0x4000..0x5000));

// Underlying operations to do when manipulating mappings.
impl MappingBackend<MockFlags, MockPageTable> for MockBackend {
impl MappingBackend for MockBackend {
type Addr = VirtAddr;
type Flags = MockFlags;
type PageTable = MockPageTable;

fn map(&self, start: VirtAddr, size: usize, flags: MockFlags, pt: &mut MockPageTable) -> bool {
for entry in pt.iter_mut().skip(start.as_usize()).take(size) {
if *entry != 0 {
Expand Down
109 changes: 51 additions & 58 deletions memory_set/src/area.rs
Original file line number Diff line number Diff line change
@@ -1,71 +1,52 @@
use core::fmt;
use core::marker::PhantomData;

use memory_addr::{VirtAddr, VirtAddrRange};
use memory_addr::AddrRange;

use crate::{MappingError, MappingResult};

/// Underlying operations to do when manipulating mappings within the specific
/// [`MemoryArea`].
///
/// The backend can be different for different memory areas. e.g., for linear
/// mappings, the target physical address is known when it is added to the page
/// table. For lazy mappings, an empty mapping needs to be added to the page table
/// to trigger a page fault.
pub trait MappingBackend<F: Copy, P>: Clone {
/// What to do when mapping a region within the area with the given flags.
fn map(&self, start: VirtAddr, size: usize, flags: F, page_table: &mut P) -> bool;
/// What to do when unmaping a memory region within the area.
fn unmap(&self, start: VirtAddr, size: usize, page_table: &mut P) -> bool;
/// What to do when changing access flags.
fn protect(&self, start: VirtAddr, size: usize, new_flags: F, page_table: &mut P) -> bool;
}
use crate::{MappingBackend, MappingError, MappingResult};

/// A memory area represents a continuous range of virtual memory with the same
/// flags.
///
/// The target physical memory frames are determined by [`MappingBackend`] and
/// may not be contiguous.
pub struct MemoryArea<F: Copy, P, B: MappingBackend<F, P>> {
va_range: VirtAddrRange,
flags: F,
pub struct MemoryArea<B: MappingBackend> {
va_range: AddrRange<B::Addr>,
flags: B::Flags,
backend: B,
_phantom: PhantomData<(F, P)>,
}

impl<F: Copy, P, B: MappingBackend<F, P>> MemoryArea<F, P, B> {
impl<B: MappingBackend> MemoryArea<B> {
/// Creates a new memory area.
pub const fn new(start: VirtAddr, size: usize, flags: F, backend: B) -> Self {
pub fn new(start: B::Addr, size: usize, flags: B::Flags, backend: B) -> Self {
Self {
va_range: VirtAddrRange::from_start_size(start, size),
va_range: AddrRange::from_start_size(start, size),
flags,
backend,
_phantom: PhantomData,
}
}

/// Returns the virtual address range.
pub const fn va_range(&self) -> VirtAddrRange {
pub const fn va_range(&self) -> AddrRange<B::Addr> {
self.va_range
}

/// Returns the memory flags, e.g., the permission bits.
pub const fn flags(&self) -> F {
pub const fn flags(&self) -> B::Flags {
self.flags
}

/// Returns the start address of the memory area.
pub const fn start(&self) -> VirtAddr {
pub const fn start(&self) -> B::Addr {
self.va_range.start
}

/// Returns the end address of the memory area.
pub const fn end(&self) -> VirtAddr {
pub const fn end(&self) -> B::Addr {
self.va_range.end
}

/// Returns the size of the memory area.
pub const fn size(&self) -> usize {
pub fn size(&self) -> usize {
self.va_range.size()
}

Expand All @@ -75,35 +56,39 @@ impl<F: Copy, P, B: MappingBackend<F, P>> MemoryArea<F, P, B> {
}
}

impl<F: Copy, P, B: MappingBackend<F, P>> MemoryArea<F, P, B> {
impl<B: MappingBackend> MemoryArea<B> {
/// Changes the flags.
pub(crate) fn set_flags(&mut self, new_flags: F) {
pub(crate) fn set_flags(&mut self, new_flags: B::Flags) {
self.flags = new_flags;
}

/// Changes the end address of the memory area.
pub(crate) fn set_end(&mut self, new_end: VirtAddr) {
pub(crate) fn set_end(&mut self, new_end: B::Addr) {
self.va_range.end = new_end;
}

/// Maps the whole memory area in the page table.
pub(crate) fn map_area(&self, page_table: &mut P) -> MappingResult {
pub(crate) fn map_area(&self, page_table: &mut B::PageTable) -> MappingResult {
self.backend
.map(self.start(), self.size(), self.flags, page_table)
.then_some(())
.ok_or(MappingError::BadState)
}

/// Unmaps the whole memory area in the page table.
pub(crate) fn unmap_area(&self, page_table: &mut P) -> MappingResult {
pub(crate) fn unmap_area(&self, page_table: &mut B::PageTable) -> MappingResult {
self.backend
.unmap(self.start(), self.size(), page_table)
.then_some(())
.ok_or(MappingError::BadState)
}

/// Changes the flags in the page table.
pub(crate) fn protect_area(&mut self, new_flags: F, page_table: &mut P) -> MappingResult {
pub(crate) fn protect_area(
&mut self,
new_flags: B::Flags,
page_table: &mut B::PageTable,
) -> MappingResult {
self.backend
.protect(self.start(), self.size(), new_flags, page_table);
Ok(())
Expand All @@ -113,28 +98,37 @@ impl<F: Copy, P, B: MappingBackend<F, P>> MemoryArea<F, P, B> {
///
/// The start address of the memory area is increased by `new_size`. The
/// shrunk part is unmapped.
pub(crate) fn shrink_left(&mut self, new_size: usize, page_table: &mut P) -> MappingResult {
pub(crate) fn shrink_left(
&mut self,
new_size: usize,
page_table: &mut B::PageTable,
) -> MappingResult {
let unmap_size = self.size() - new_size;
if !self.backend.unmap(self.start(), unmap_size, page_table) {
return Err(MappingError::BadState);
}
self.va_range.start += unmap_size;
self.va_range.start = (self.va_range.start.into() + unmap_size).into();
Ok(())
}

/// Shrinks the memory area at the right side.
///
/// The end address of the memory area is decreased by `new_size`. The
/// shrunk part is unmapped.
pub(crate) fn shrink_right(&mut self, new_size: usize, page_table: &mut P) -> MappingResult {
pub(crate) fn shrink_right(
&mut self,
new_size: usize,
page_table: &mut B::PageTable,
) -> MappingResult {
let unmap_size = self.size() - new_size;
if !self
.backend
.unmap(self.start() + new_size, unmap_size, page_table)
{
if !self.backend.unmap(
(self.start().into() + new_size).into(),
unmap_size,
page_table,
) {
return Err(MappingError::BadState);
}
self.va_range.end -= unmap_size;
self.va_range.end = (self.va_range.end.into() - unmap_size).into();
Ok(())
}

Expand All @@ -145,27 +139,26 @@ impl<F: Copy, P, B: MappingBackend<F, P>> MemoryArea<F, P, B> {
///
/// Returns `None` if the given position is not in the memory area, or one
/// of the parts is empty after splitting.
pub(crate) fn split(&mut self, pos: VirtAddr) -> Option<Self> {
let start = self.start();
let end = self.end();
pub(crate) fn split(&mut self, pos: B::Addr) -> Option<Self> {
let pos: usize = pos.into();

let start: usize = self.start().into();
let end: usize = self.end().into();
// todo: is it a bug when `pos == end - 1`?
if start < pos && pos < end {
let new_area = Self::new(
pos,
end.as_usize() - pos.as_usize(),
self.flags,
self.backend.clone(),
);
self.va_range.end = pos;
let new_area = Self::new(pos.into(), end - pos, self.flags, self.backend.clone());
self.va_range.end = pos.into();
Some(new_area)
} else {
None
}
}
}

impl<F, P, B: MappingBackend<F, P>> fmt::Debug for MemoryArea<F, P, B>
impl<B: MappingBackend> fmt::Debug for MemoryArea<B>
where
F: fmt::Debug + Copy,
B::Addr: fmt::Debug,
B::Flags: fmt::Debug + Copy,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("MemoryArea")
Expand Down
33 changes: 33 additions & 0 deletions memory_set/src/backend.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
use memory_addr::MemoryAddr;

/// Underlying operations to do when manipulating mappings within the specific
/// [`MemoryArea`].
///
/// The backend can be different for different memory areas. e.g., for linear
/// mappings, the target physical address is known when it is added to the page
/// table. For lazy mappings, an empty mapping needs to be added to the page table
/// to trigger a page fault.
pub trait MappingBackend: Clone {
type Addr: MemoryAddr;
type Flags: Copy;
type PageTable;

/// What to do when mapping a region within the area with the given flags.
fn map(
&self,
start: Self::Addr,
size: usize,
flags: Self::Flags,
page_table: &mut Self::PageTable,
) -> bool;
/// What to do when unmaping a memory region within the area.
fn unmap(&self, start: Self::Addr, size: usize, page_table: &mut Self::PageTable) -> bool;
/// What to do when changing access flags.
fn protect(
&self,
start: Self::Addr,
size: usize,
new_flags: Self::Flags,
page_table: &mut Self::PageTable,
) -> bool;
}
4 changes: 3 additions & 1 deletion memory_set/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,14 @@
extern crate alloc;

mod area;
mod backend;
mod set;

#[cfg(test)]
mod tests;

pub use self::area::{MappingBackend, MemoryArea};
pub use self::area::MemoryArea;
pub use self::backend::MappingBackend;
pub use self::set::MemorySet;

/// Error type for memory mapping operations.
Expand Down
Loading

0 comments on commit 7e1cf44

Please sign in to comment.