From 6f3db8df3607e302301855276a0ab2b3cf70523b Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Fri, 2 Dec 2022 19:30:55 -0800 Subject: [PATCH 1/8] [no ci] WIP transitioning from old `EntryFlags` to new `PteFlags` API --- Cargo.lock | 15 +- applications/bm/src/lib.rs | 4 +- applications/loadc/src/lib.rs | 16 +- kernel/acpi/hpet/src/lib.rs | 2 +- kernel/apic/src/lib.rs | 2 +- kernel/entryflags_x86_64/Cargo.toml | 14 -- kernel/entryflags_x86_64/src/lib.rs | 195 ---------------------- kernel/framebuffer/src/lib.rs | 2 +- kernel/ioapic/src/lib.rs | 2 +- kernel/iommu/src/lib.rs | 2 +- kernel/memory/Cargo.toml | 14 +- kernel/memory/src/lib.rs | 54 +++--- kernel/memory/src/paging/mapper.rs | 64 ++++--- kernel/memory/src/paging/mod.rs | 10 +- kernel/memory/src/paging/table.rs | 4 +- kernel/memory_x86_64/Cargo.toml | 4 +- kernel/memory_x86_64/src/lib.rs | 41 +++-- kernel/nic_buffers/src/lib.rs | 2 +- kernel/nic_initialization/src/lib.rs | 2 +- kernel/nic_queues/src/lib.rs | 2 +- kernel/page_table_entry/Cargo.toml | 4 +- kernel/page_table_entry/src/lib.rs | 18 +- kernel/pte_flags/src/lib.rs | 22 ++- kernel/pte_flags/src/pte_flags_aarch64.rs | 150 ++++++++++++++++- kernel/pte_flags/src/pte_flags_x86_64.rs | 187 ++++++++++++++++++++- 25 files changed, 483 insertions(+), 349 deletions(-) delete mode 100644 kernel/entryflags_x86_64/Cargo.toml delete mode 100644 kernel/entryflags_x86_64/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 24277915be..38f16f0565 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -832,16 +832,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3be565ca5c557d7f59e7cfcf1844f9e3033650c929c6566f511e8005f205c1d0" -[[package]] -name = "entryflags_x86_64" -version = "0.1.0" -dependencies = [ - "bitflags", - "multiboot2", - "static_assertions", - "xmas-elf", -] - [[package]] name = "environment" version = "0.1.0" @@ -1692,6 +1682,7 @@ dependencies = [ "no_drop", "page_allocator", "page_table_entry", + "pte_flags", "spin 0.9.0", "x86_64", "xmas-elf", @@ -1734,11 +1725,11 @@ checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" name = "memory_x86_64" version = "0.1.0" dependencies = [ - "entryflags_x86_64", "kernel_config", "log", "memory_structs", "multiboot2", + "pte_flags", "x86_64", ] @@ -2199,7 +2190,7 @@ dependencies = [ "frame_allocator", "kernel_config", "memory_structs", - "memory_x86_64", + "pte_flags", "zerocopy", ] diff --git a/applications/bm/src/lib.rs b/applications/bm/src/lib.rs index d9caddd226..acf840bcb6 100644 --- a/applications/bm/src/lib.rs +++ b/applications/bm/src/lib.rs @@ -38,7 +38,7 @@ use heapfile::HeapFile; use path::Path; use fs_node::{DirRef, FileOrDir, FileRef}; use libtest::*; -use memory::{create_mapping, EntryFlags}; +use memory::{create_mapping, PteFlags}; use getopts::Options; use mod_mgmt::crate_name_from_path; @@ -514,7 +514,7 @@ fn do_memory_map_inner(overhead_ct: u64, th: usize, nr: usize) -> Result, /// The proper flags for this segment specified by the ELF file. - flags: EntryFlags, + flags: PteFlags, /// The indices of the sections in the ELF file /// that were grouped ("mapped") into this segment by the linker. section_ndxs: BTreeSet, @@ -276,11 +276,11 @@ fn parse_and_load_elf_executable<'f>( // debug!("Successfully split pages into {:?} and {:?}", this_ap, all_pages); // debug!("Adjusted segment vaddr: {:#X}, size: {:#X}, {:?}", start_vaddr, memory_size_in_bytes, this_ap.start_address()); - let initial_flags = EntryFlags::from_elf_program_flags(prog_hdr.flags()); + let initial_flags = convert_to_pte_flags(prog_hdr.flags()); let mmi = task::with_current_task(|t| t.mmi.clone()).unwrap(); // Must initially map the memory as writable so we can copy the segment data to it later. let mut mp = mmi.lock().page_table - .map_allocated_pages(this_ap, initial_flags | EntryFlags::WRITABLE) + .map_allocated_pages(this_ap, initial_flags.writable(true)) .map_err(String::from)?; // Copy data from this section into the correct offset into our newly-mapped pages @@ -496,11 +496,17 @@ fn overwrite_relocations( Ok(()) } +/// Converts the given ELF program flags into `PteFlags`. +fn convert_to_pte_flags(prog_flags: xmas_elf::program::Flags) -> PteFlags { + PteFlags::new() + .valid(prog_flags.is_read()) + .writable(prog_flags.is_read()) + .executable(prog_flags.is_execute()) +} fn print_usage(opts: Options) { println!("{}", opts.usage(USAGE)); } - const USAGE: &'static str = "Usage: loadc [ARGS] PATH Loads C language ELF executables on Theseus."; diff --git a/kernel/acpi/hpet/src/lib.rs b/kernel/acpi/hpet/src/lib.rs index acb462868f..91e722b8e9 100644 --- a/kernel/acpi/hpet/src/lib.rs +++ b/kernel/acpi/hpet/src/lib.rs @@ -176,7 +176,7 @@ impl HpetAcpiTable { let hpet_mp = page_table.map_allocated_pages_to( pages, frames, - EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_CACHE | EntryFlags::NO_EXECUTE, + EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::CACHE_DISABLE | EntryFlags::NO_EXECUTE, )?; let mut hpet = hpet_mp.into_borrowed_mut::(phys_addr.frame_offset()) diff --git a/kernel/apic/src/lib.rs b/kernel/apic/src/lib.rs index da453bd9c8..cc4a6e439a 100644 --- a/kernel/apic/src/lib.rs +++ b/kernel/apic/src/lib.rs @@ -158,7 +158,7 @@ fn map_apic(page_table: &mut PageTable) -> Result { page_table, new_page, frame, - EntryFlags::WRITABLE | EntryFlags::NO_CACHE | EntryFlags::NO_EXECUTE, + EntryFlags::WRITABLE | EntryFlags::CACHE_DISABLE | EntryFlags::NO_EXECUTE, ) } } diff --git a/kernel/entryflags_x86_64/Cargo.toml b/kernel/entryflags_x86_64/Cargo.toml deleted file mode 100644 index bcdca1c0c5..0000000000 --- a/kernel/entryflags_x86_64/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -authors = ["Kevin Boos ", "Wenqiu Yu "] -name = "entryflags_x86_64" -description = "Defines the structure of page table entry flags on x86_64" -version = "0.1.0" - -[dependencies] -static_assertions = "1.1.0" -bitflags = "1.0.4" -multiboot2 = "0.14.0" -xmas-elf = { version = "0.6.2", git = "https://github.com/theseus-os/xmas-elf.git" } - -[lib] -crate-type = ["rlib"] diff --git a/kernel/entryflags_x86_64/src/lib.rs b/kernel/entryflags_x86_64/src/lib.rs deleted file mode 100644 index 2113669d8a..0000000000 --- a/kernel/entryflags_x86_64/src/lib.rs +++ /dev/null @@ -1,195 +0,0 @@ -//! This crate defines the structure of page table entry flags on x86_64. - -#![no_std] - -#[macro_use] extern crate bitflags; -#[macro_use] extern crate static_assertions; -extern crate multiboot2; -extern crate xmas_elf; - - -bitflags! { - /// Page table entry flags on the x86_64 architecture. - /// - /// The designation of bits in each `PageTableEntry` is as such: - /// * Bits `[0:8]` (inclusive) are reserved by hardware for access flags. - /// * Bits `[9:11]` (inclusive) are available for custom OS usage. - /// * Bits `[12:51]` (inclusive) are reserved by hardware to hold the physical frame address. - /// * Bits `[52:62]` (inclusive) are available for custom OS usage. - /// * Bit `63` is reserved by hardware for access flags (noexec). - /// - pub struct EntryFlags: u64 { - /// If set, this page is currently "present" in memory. - /// If not set, this page is not in memory, e.g., not mapped, paged to disk, etc. - const PRESENT = 1 << 0; - /// If set, writes to this page are allowed. - /// If not set, this page is read-only. - const WRITABLE = 1 << 1; - /// If set, userspace (ring 3) can access this page. - /// If not set, only kernelspace (ring 0) can access this page. - const USER_ACCESSIBLE = 1 << 2; - /// If set, writes to this page go directly through the cache to memory. - const WRITE_THROUGH = 1 << 3; - /// If set, this page's content is never cached, neither for read nor writes. - const NO_CACHE = 1 << 4; - /// The hardware will set this bit when the page is accessed. - const ACCESSED = 1 << 5; - /// The hardware will set this bit when the page has been written to. - const DIRTY = 1 << 6; - /// Set this bit if this page table entry represents a "huge" page. - /// This bit may be used as follows: - /// * For a P4-level PTE, it must be not set. - /// * If set for a P3-level PTE, it means this PTE maps a 1GiB huge page. - /// * If set for a P2-level PTE, it means this PTE maps a 1MiB huge page. - /// * For a P1-level PTE, it must be not set. - const HUGE_PAGE = 1 << 7; - /// Set this bit to indicate that this page is mapped across all address spaces - /// (all root page tables) and doesn't need to be flushed out of the TLB - /// when switching to another page table. - const GLOBAL = 0 << 8; // 1 << 8; // Currently disabling GLOBAL bit. - - /// Set this bit to indicate that the frame pointed to by this page table entry - /// is owned **exclusively** by that page table entry. - /// Currently, in Theseus, we only set the `EXCLUSIVE` bit for P1-level PTEs - /// that we **know** are bijective (1-to-1 virtual-to-physical) mappings. - /// If this bit is set, the pointed frame will be safely deallocated - /// once this page table entry is unmapped. - const EXCLUSIVE = 1 << 9; - - /// Set this bit to forbid execution of the mapped page. - /// In other words, if you want the page to be executable, do NOT set this bit. - const NO_EXECUTE = 1 << 63; - } -} - -/// A mask for the bits of a page table entry that contain the physical frame address. -pub const PAGE_TABLE_ENTRY_FRAME_MASK: u64 = 0x000_FFFFFFFFFF_000; - -// Ensure that we never expose reserved bits [12:51] as part of the `EntryFlags` interface. -const_assert_eq!(EntryFlags::all().bits() & PAGE_TABLE_ENTRY_FRAME_MASK, 0); - -impl EntryFlags { - /// Returns a new, all-zero `EntryFlags` with no bits enabled. - /// - /// This is a `const` version of `Default::default`. - pub const fn zero() -> EntryFlags { - EntryFlags::from_bits_truncate(0) - } - - /// Returns `true` if the page the entry points to is a huge page. - pub const fn is_huge(&self) -> bool { - self.intersects(EntryFlags::HUGE_PAGE) - } - - /// Copies this new `EntryFlags` object and sets the huge page bit. - pub const fn into_huge(&self) -> EntryFlags { - EntryFlags::from_bits_truncate( - self.bits() | EntryFlags::HUGE_PAGE.bits() - ) - } - - /// Returns `true` if the page is writable. - pub const fn is_writable(&self) -> bool { - self.intersects(EntryFlags::WRITABLE) - } - - /// Copies this new `EntryFlags` object and sets the writable bit. - pub const fn into_writable(&self) -> EntryFlags { - EntryFlags::from_bits_truncate( - self.bits() | EntryFlags::WRITABLE.bits() - ) - } - - /// Returns `true` if these flags are executable. - pub const fn is_executable(&self) -> bool { - // On x86_64, this means that the `NO_EXECUTE` bit is *not* set. - !self.intersects(EntryFlags::NO_EXECUTE) - } - - /// Returns `true` if these flags are exclusive. - pub const fn is_exclusive(&self) -> bool { - self.intersects(EntryFlags::EXCLUSIVE) - } - - /// Copies this `EntryFlags` into a new one with the exclusive bit cleared. - pub const fn into_non_exclusive(&self) -> EntryFlags { - // This is a const way to write: `self | EntryFlags::WRITABLE` - EntryFlags::from_bits_truncate( - self.bits() & !EntryFlags::EXCLUSIVE.bits() - ) - } - - /// Copies this `EntryFlags` into a new one with the exclusive bit cleared. - pub const fn into_exclusive(&self) -> EntryFlags { - // This is a const way to write: `self | EntryFlags::WRITABLE` - EntryFlags::from_bits_truncate( - self.bits() | EntryFlags::EXCLUSIVE.bits() - ) - } - - /// Gets flags according to the properties of a section from multiboot2. - pub fn from_multiboot2_section_flags(section: &multiboot2::ElfSection) -> EntryFlags { - use multiboot2::ElfSectionFlags; - - let mut flags = EntryFlags::empty(); - - if section.flags().contains(ElfSectionFlags::ALLOCATED) { - // section is loaded to memory - flags |= EntryFlags::PRESENT; - } - if section.flags().contains(ElfSectionFlags::WRITABLE) { - flags |= EntryFlags::WRITABLE; - } - if !section.flags().contains(ElfSectionFlags::EXECUTABLE) { - flags |= EntryFlags::NO_EXECUTE; - } - - flags - } - - /// Gets flags according to the properties of a section from elf flags. - pub fn from_elf_section_flags(elf_flags: u64) -> EntryFlags { - use xmas_elf::sections::{SHF_ALLOC, SHF_EXECINSTR, SHF_WRITE}; - - let mut flags = EntryFlags::empty(); - - if elf_flags & SHF_ALLOC == SHF_ALLOC { - // section is loaded to memory - flags |= EntryFlags::PRESENT; - } - if elf_flags & SHF_WRITE == SHF_WRITE { - flags |= EntryFlags::WRITABLE; - } - if elf_flags & SHF_EXECINSTR == 0 { - // only mark no execute if the execute flag isn't 1 - flags |= EntryFlags::NO_EXECUTE; - } - - flags - } - - /// Gets flags according to the properties of a program. - pub fn from_elf_program_flags(prog_flags: xmas_elf::program::Flags) -> EntryFlags { - let mut flags = EntryFlags::empty(); - - if prog_flags.is_read() { - // section is loaded to memory - flags |= EntryFlags::PRESENT; - } - if prog_flags.is_write() { - flags |= EntryFlags::WRITABLE; - } - if !prog_flags.is_execute() { - // only mark no execute if the execute flag isn't 1 - flags |= EntryFlags::NO_EXECUTE; - } - - flags - } -} - -impl Default for EntryFlags { - fn default() -> Self { - Self::zero() - } -} diff --git a/kernel/framebuffer/src/lib.rs b/kernel/framebuffer/src/lib.rs index 4df2405a07..d5544cab3c 100644 --- a/kernel/framebuffer/src/lib.rs +++ b/kernel/framebuffer/src/lib.rs @@ -78,7 +78,7 @@ impl Framebuffer

{ let kernel_mmi_ref = memory::get_kernel_mmi_ref().ok_or("KERNEL_MMI was not yet initialized!")?; let vesa_display_flags: EntryFlags = - EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::GLOBAL | EntryFlags::NO_CACHE; + EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::GLOBAL | EntryFlags::CACHE_DISABLE; let size = width * height * core::mem::size_of::

(); let pages = memory::allocate_pages_by_bytes(size).ok_or("could not allocate pages for a new framebuffer")?; diff --git a/kernel/ioapic/src/lib.rs b/kernel/ioapic/src/lib.rs index c4bbb77a9a..35f67583f1 100644 --- a/kernel/ioapic/src/lib.rs +++ b/kernel/ioapic/src/lib.rs @@ -76,7 +76,7 @@ impl IoApic { let ioapic_mapped_page = page_table.map_allocated_pages_to( new_page, frame, - EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_CACHE | EntryFlags::NO_EXECUTE, + EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::CACHE_DISABLE | EntryFlags::NO_EXECUTE, )?; let ioapic_regs = ioapic_mapped_page.into_borrowed_mut(0).map_err(|(_mp, err)| err)?; diff --git a/kernel/iommu/src/lib.rs b/kernel/iommu/src/lib.rs index e8a5317005..4683b947f5 100644 --- a/kernel/iommu/src/lib.rs +++ b/kernel/iommu/src/lib.rs @@ -58,7 +58,7 @@ pub fn init(host_address_width: u8, let mp = { let frames = allocate_frames_at(register_base_address, 1)?; let pages = allocate_pages(1).ok_or("Unable to find virtual page!")?; - let flags = EntryFlags::WRITABLE | EntryFlags::NO_CACHE | EntryFlags::NO_EXECUTE; + let flags = EntryFlags::WRITABLE | EntryFlags::CACHE_DISABLE | EntryFlags::NO_EXECUTE; page_table.map_allocated_pages_to(pages, frames, flags)? }; diff --git a/kernel/memory/Cargo.toml b/kernel/memory/Cargo.toml index 7f64e13907..2b68982673 100644 --- a/kernel/memory/Cargo.toml +++ b/kernel/memory/Cargo.toml @@ -1,8 +1,9 @@ [package] authors = ["Kevin Boos "] name = "memory" -description = "The virtual memory subsystem." +description = "The memory management subsystem" version = "0.1.0" +edition = "2021" [dependencies] spin = "0.9.0" @@ -10,9 +11,12 @@ bitflags = "1.1.0" multiboot2 = "0.14.0" xmas-elf = { version = "0.6.2", git = "https://github.com/theseus-os/xmas-elf.git" } bit_field = "0.7.0" -x86_64 = "0.14.8" zerocopy = "0.5.0" +[target.'cfg(target_arch = "x86_64")'.dependencies] +x86_64 = "0.14.8" +memory_x86_64 = { path = "../memory_x86_64" } + [dependencies.log] version = "0.4.8" @@ -29,15 +33,15 @@ path = "../../libs/atomic_linked_list" [dependencies.kernel_config] path = "../kernel_config" -[dependencies.memory_x86_64] -path = "../memory_x86_64" - [dependencies.memory_structs] path = "../memory_structs" [dependencies.page_table_entry] path = "../page_table_entry" +[dependencies.pte_flags] +path = "../pte_flags" + [dependencies.page_allocator] path = "../page_allocator" diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index 87819cf79f..b46f802c9d 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -1,33 +1,20 @@ -//! This crate implements the virtual memory subsystem for Theseus, -//! which is fairly robust and provides a unification between -//! arbitrarily mapped sections of memory and Rust's lifetime system. -//! Originally based on Phil Opp's blog_os. +//! This crate implements the main memory management subsystem for Theseus. +//! +//! The primary type of interest is [`MappedPages`], which offers a robust +//! interface that unifies the usage of arbitrary memory regions +//! with that of Rust's safe type system and lifetimes. +//! +//! ## Acknowledgments +//! Some of the internal page table management code was based on +//! Philipp Oppermann's [blog_os], but has since changed significantly. +//! +//! [blog_os]: https://github.com/phil-opp/blog_os #![no_std] #![feature(ptr_internals)] #![feature(unboxed_closures)] #![feature(result_option_inspect)] -extern crate spin; -extern crate multiboot2; -extern crate alloc; -#[macro_use] extern crate log; -extern crate irq_safety; -extern crate kernel_config; -extern crate atomic_linked_list; -extern crate xmas_elf; -extern crate bit_field; -#[cfg(target_arch = "x86_64")] -extern crate memory_x86_64; -extern crate x86_64; -extern crate memory_structs; -extern crate page_table_entry; -extern crate page_allocator; -extern crate frame_allocator; -extern crate zerocopy; -extern crate no_drop; - - #[cfg(not(mapper_spillful))] mod paging; @@ -43,15 +30,18 @@ pub use memory_structs::{Frame, Page, FrameRange, PageRange, VirtualAddress, Phy pub use page_allocator::{AllocatedPages, allocate_pages, allocate_pages_at, allocate_pages_by_bytes, allocate_pages_by_bytes_at}; -pub use frame_allocator::{AllocatedFrames, MemoryRegionType, PhysicalMemoryRegion, - allocate_frames_by_bytes_at, allocate_frames_by_bytes, allocate_frames_at}; +pub use frame_allocator::{ + AllocatedFrames, MemoryRegionType, PhysicalMemoryRegion, + allocate_frames_by_bytes_at, allocate_frames_by_bytes, allocate_frames_at, +}; #[cfg(target_arch = "x86_64")] -use memory_x86_64::{BootInformation, get_kernel_address, get_boot_info_mem_area, find_section_memory_bounds, - get_vga_mem_addr, get_modules_address, tlb_flush_virt_addr, tlb_flush_all, get_p4}; +use memory_x86_64::{ + BootInformation, get_kernel_address, get_boot_info_mem_area, find_section_memory_bounds, + get_vga_mem_addr, get_modules_address, tlb_flush_virt_addr, tlb_flush_all, get_p4, +}; -#[cfg(target_arch = "x86_64")] -pub use memory_x86_64::EntryFlags;// Export EntryFlags so that others does not need to get access to memory_. +pub use pte_flags::*; use spin::Once; use irq_safety::MutexIrqSafe; @@ -95,7 +85,7 @@ pub struct MemoryManagementInfo { /// # Locking / Deadlock /// Currently, this function acquires the lock on the frame allocator and the kernel's `MemoryManagementInfo` instance. /// Thus, the caller should ensure that the locks on those two variables are not held when invoking this function. -pub fn create_contiguous_mapping(size_in_bytes: usize, flags: EntryFlags) -> Result<(MappedPages, PhysicalAddress), &'static str> { +pub fn create_contiguous_mapping(size_in_bytes: usize, flags: PteFlags) -> Result<(MappedPages, PhysicalAddress), &'static str> { let kernel_mmi_ref = get_kernel_mmi_ref().ok_or("create_contiguous_mapping(): KERNEL_MMI was not yet initialized!")?; let allocated_pages = allocate_pages_by_bytes(size_in_bytes).ok_or("memory::create_contiguous_mapping(): couldn't allocate contiguous pages!")?; let allocated_frames = allocate_frames_by_bytes(size_in_bytes).ok_or("memory::create_contiguous_mapping(): couldn't allocate contiguous frames!")?; @@ -113,7 +103,7 @@ pub fn create_contiguous_mapping(size_in_bytes: usize, flags: EntryFlags) -> Res /// # Locking / Deadlock /// Currently, this function acquires the lock on the kernel's `MemoryManagementInfo` instance. /// Thus, the caller should ensure that lock is not held when invoking this function. -pub fn create_mapping(size_in_bytes: usize, flags: EntryFlags) -> Result { +pub fn create_mapping(size_in_bytes: usize, flags: PteFlags) -> Result { let kernel_mmi_ref = get_kernel_mmi_ref().ok_or("create_contiguous_mapping(): KERNEL_MMI was not yet initialized!")?; let allocated_pages = allocate_pages_by_bytes(size_in_bytes).ok_or("memory::create_mapping(): couldn't allocate pages!")?; kernel_mmi_ref.lock().page_table.map_allocated_pages(allocated_pages, flags) diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 35f244f8f2..08a5fefd15 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -26,7 +26,7 @@ use paging::{ }; use spin::Once; use kernel_config::memory::{PAGE_SIZE, ENTRIES_PER_PAGE_TABLE}; -use super::{EntryFlags, tlb_flush_virt_addr}; +use super::{PteFlags, tlb_flush_virt_addr}; use zerocopy::FromBytes; use page_table_entry::UnmapResult; @@ -211,11 +211,11 @@ impl Mapper { /// Maps the given virtual `AllocatedPages` to the given physical `AllocatedFrames`. /// /// Consumes the given `AllocatedPages` and returns a `MappedPages` object which contains those `AllocatedPages`. - pub fn map_allocated_pages_to( + pub fn map_allocated_pages_to>( &mut self, pages: AllocatedPages, frames: AllocatedFrames, - flags: EntryFlags, + flags: F, ) -> Result { let (mapped_pages, frames) = self.internal_map_to(pages, frames, flags)?; @@ -232,7 +232,7 @@ impl Mapper { /// Maps the given `AllocatedPages` to randomly chosen (allocated) physical frames. /// /// Consumes the given `AllocatedPages` and returns a `MappedPages` object which contains those `AllocatedPages`. - pub fn map_allocated_pages(&mut self, pages: AllocatedPages, flags: EntryFlags) + pub fn map_allocated_pages>(&mut self, pages: AllocatedPages, flags: F) -> Result { let mut top_level_flags = flags.clone() | EntryFlags::PRESENT; @@ -286,21 +286,25 @@ impl Mapper { /// /// Consumes the given `AllocatedPages` and returns a `MappedPages` object which contains those `AllocatedPages`. #[doc(hidden)] - pub unsafe fn map_to_non_exclusive(mapper: &mut Self, pages: AllocatedPages, frames: &AllocatedFrames, flags: EntryFlags) - -> Result - { - let mut top_level_flags = flags.clone() | EntryFlags::PRESENT; - // P4, P3, and P2 entries should never set NO_EXECUTE, only the lowest-level P1 entry should. - // top_level_flags.set(EntryFlags::WRITABLE, true); // is the same true for the WRITABLE bit? - top_level_flags.set(EntryFlags::NO_EXECUTE, false); - // Currently we cannot use the EXCLUSIVE bit for page table frames (P4, P3, P2), - // because another page table frame may re-use (create another alias for) it without us knowing here. - top_level_flags.set(EntryFlags::EXCLUSIVE, false); - // In fact, in this function, none of the frames can be mapped as exclusive - // because we're not accepting the `AllocatedFrames` type. - let mut actual_flags = flags | EntryFlags::PRESENT; - actual_flags.set(EntryFlags::EXCLUSIVE, false); + pub unsafe fn map_to_non_exclusive>( + mapper: &mut Self, + pages: AllocatedPages, + frames: &AllocatedFrames, + flags: F, + ) -> Result { + let flags = flags.into(); + let top_level_flags = flags.valid(true) + // P4, P3, and P2 entries should never set NOT_EXECUTABLE; only the lowest-level P1 entry should. + .executable(true) + // Currently we cannot use the EXCLUSIVE bit for page table frames (P4, P3, P2), + // because another page table frame may re-use (create another alias for) it without us knowing here. + .exclusive(false); + // In fact, in this function, none of the frames can be mapped as exclusive + // because we're accepting a *reference* to an `AllocatedFrames`, not consuming it. + let actual_flags = flags + .exclusive(false) + .valid(true); let pages_count = pages.size_in_pages(); let frames_count = frames.size_in_frames(); @@ -348,8 +352,8 @@ pub struct MappedPages { page_table_p4: Frame, /// The range of allocated virtual pages contained by this mapping. pages: AllocatedPages, - // The EntryFlags that define the page permissions of this mapping - flags: EntryFlags, + // The PTE flags that define the page permissions of this mapping. + flags: PteFlagsArch, } impl Deref for MappedPages { type Target = PageRange; @@ -365,12 +369,12 @@ impl MappedPages { MappedPages { page_table_p4: Frame::containing_address(PhysicalAddress::zero()), pages: AllocatedPages::empty(), - flags: EntryFlags::zero(), + flags: PteFlagsArch::new(), } } /// Returns the flags that describe this `MappedPages` page table permissions. - pub fn flags(&self) -> EntryFlags { + pub fn flags(&self) -> PteFlagsArch { self.flags } @@ -472,7 +476,11 @@ impl MappedPages { /// /// Returns a new `MappedPages` object with the same in-memory contents /// as this object, but at a completely new memory region. - pub fn deep_copy(&self, new_flags: Option, active_table_mapper: &mut Mapper) -> Result { + pub fn deep_copy>( + &self, + active_table_mapper: &mut Mapper, + new_flags: Option, + ) -> Result { warn!("MappedPages::deep_copy() has not been adequately tested yet."); let size_in_pages = self.size_in_pages(); @@ -480,11 +488,11 @@ impl MappedPages { let new_pages = allocate_pages(size_in_pages).ok_or_else(|| "Couldn't allocate_pages()")?; // we must temporarily map the new pages as Writable, since we're about to copy data into them - let new_flags = new_flags.unwrap_or(self.flags); + let new_flags = new_flags.map_or(self.flags, Into::into); let needs_remapping = !new_flags.is_writable(); let mut new_mapped_pages = active_table_mapper.map_allocated_pages( new_pages, - new_flags | EntryFlags::WRITABLE, // force writable + new_flags.writable(true), // force writable )?; // perform the actual copy of in-memory content @@ -508,7 +516,11 @@ impl MappedPages { /// /// Note that attempting to change certain "reserved" flags will have no effect. /// For example, arbitrarily setting the `EXCLUSIVE` bit would cause unsafety, so it cannot be changed. - pub fn remap(&mut self, active_table_mapper: &mut Mapper, new_flags: EntryFlags) -> Result<(), &'static str> { + pub fn remap>( + &mut self, + active_table_mapper: &mut Mapper, + new_flags: F, + ) -> Result<(), &'static str> { if self.size_in_pages() == 0 { return Ok(()); } // Use the existing value of the `EXCLUSIVE` flag rather than whatever value was passed in. diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index d1eb6b4fb2..f7ea9d0efe 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -29,7 +29,7 @@ use core::{ fmt, }; use super::{Frame, FrameRange, PageRange, VirtualAddress, PhysicalAddress, - AllocatedPages, allocate_pages, AllocatedFrames, EntryFlags, + AllocatedPages, allocate_pages, AllocatedFrames, PteFlags, tlb_flush_all, tlb_flush_virt_addr, get_p4, find_section_memory_bounds, get_vga_mem_addr, KERNEL_OFFSET}; use no_drop::NoDrop; @@ -101,7 +101,7 @@ impl PageTable { let mut temporary_page = TemporaryPage::create_and_map_table_frame(page, new_p4_frame, current_page_table)?; temporary_page.with_table_and_frame(|table, frame| { table.zero(); - table[RECURSIVE_P4_INDEX].set_entry(frame.as_allocated_frame(), EntryFlags::PRESENT | EntryFlags::WRITABLE); + table[RECURSIVE_P4_INDEX].set_entry(frame.as_allocated_frame(), PteFlags::new_writable()); })?; let (_temp_page, inited_new_p4_frame) = temporary_page.unmap_into_parts(current_page_table)?; @@ -136,7 +136,7 @@ impl PageTable { let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, this_p4, self)?; // overwrite recursive mapping - self.p4_mut()[RECURSIVE_P4_INDEX].set_entry(other_table.p4_table.as_allocated_frame(), EntryFlags::PRESENT | EntryFlags::WRITABLE); + self.p4_mut()[RECURSIVE_P4_INDEX].set_entry(other_table.p4_table.as_allocated_frame(), PteFlags::new_writable()); tlb_flush_all(); // set mapper's target frame to reflect that future mappings will be mapped into the other_table @@ -150,7 +150,7 @@ impl PageTable { // restore recursive mapping to original p4 table temporary_page.with_table_and_frame(|p4_table, frame| { - p4_table[RECURSIVE_P4_INDEX].set_entry(frame.as_allocated_frame(), EntryFlags::PRESENT | EntryFlags::WRITABLE); + p4_table[RECURSIVE_P4_INDEX].set_entry(frame.as_allocated_frame(), PteFlags::new_writable()); })?; tlb_flush_all(); @@ -343,7 +343,7 @@ pub fn init( boot_info_mapped_pages = Some(mapper.map_allocated_pages_to( boot_info_pages, boot_info_frames, - EntryFlags::PRESENT, + PteFlags::new(), )?); debug!("identity_mapped_pages: {:?}", &identity_mapped_pages[..index]); diff --git a/kernel/memory/src/paging/table.rs b/kernel/memory/src/paging/table.rs index d9afc764b5..b832bb2a91 100644 --- a/kernel/memory/src/paging/table.rs +++ b/kernel/memory/src/paging/table.rs @@ -9,7 +9,7 @@ use super::PageTableEntry; use kernel_config::memory::{PAGE_SHIFT, ENTRIES_PER_PAGE_TABLE}; -use super::super::{VirtualAddress, EntryFlags}; +use super::super::{VirtualAddress, PteFlags}; use core::ops::{Index, IndexMut}; use core::marker::PhantomData; use zerocopy::FromBytes; @@ -50,7 +50,7 @@ impl Table { /// if `self` is a P4-level `Table`, then this returns a P3-level `Table`, /// and so on for P3 -> P3 and P2 -> P1. fn next_table_address(&self, index: usize) -> Option { - let entry_flags = self[index].flags(); + let pte_flags = self[index].flags(); if entry_flags.contains(EntryFlags::PRESENT) && !entry_flags.is_huge() { let table_address = self as *const _ as usize; let next_table_vaddr: usize = (table_address << 9) | (index << PAGE_SHIFT); diff --git a/kernel/memory_x86_64/Cargo.toml b/kernel/memory_x86_64/Cargo.toml index 08c0e2edba..45dadf93b0 100644 --- a/kernel/memory_x86_64/Cargo.toml +++ b/kernel/memory_x86_64/Cargo.toml @@ -14,8 +14,8 @@ version = "0.4.8" [dependencies.kernel_config] path = "../kernel_config" -[dependencies.entryflags_x86_64] -path = "../entryflags_x86_64" +[dependencies.pte_flags] +path = "../pte_flags" [dependencies.memory_structs] path = "../memory_structs" diff --git a/kernel/memory_x86_64/src/lib.rs b/kernel/memory_x86_64/src/lib.rs index 8a03148710..4974b70924 100644 --- a/kernel/memory_x86_64/src/lib.rs +++ b/kernel/memory_x86_64/src/lib.rs @@ -1,8 +1,7 @@ //! This crate implements the virtual memory subsystem interfaces for Theseus on x86_64. -//! `memory` uses this crate to get the memory layout and do other arch-specific operations on x86_64. -//! -//! This is the top-level arch-specific memory crate. -//! All arch-specific definitions for memory system are exported from this crate. +//! +//! The `memory` crate uses this crate to obtain the multiboot2-provided memory layout +//! of the base kernel image (nano_core), and to do other arch-specific operations on x86_64. #![no_std] #![feature(ptr_internals)] @@ -12,11 +11,11 @@ extern crate multiboot2; #[macro_use] extern crate log; extern crate kernel_config; extern crate memory_structs; -extern crate entryflags_x86_64; +extern crate pte_flags; extern crate x86_64; pub use multiboot2::BootInformation; -pub use entryflags_x86_64::{EntryFlags, PAGE_TABLE_ENTRY_FRAME_MASK}; +use pte_flags::PteFlags; use kernel_config::memory::KERNEL_OFFSET; use memory_structs::{PhysicalAddress, VirtualAddress}; @@ -99,7 +98,7 @@ pub struct SectionMemoryBounds { /// The ending virtual address and physical address. pub end: (VirtualAddress, PhysicalAddress), /// The page table entry flags that should be used for mapping this section. - pub flags: EntryFlags, + pub flags: PteFlags, } /// The address bounds and flags of the initial kernel sections that need mapping. @@ -149,9 +148,9 @@ pub fn find_section_memory_bounds(boot_info: &BootInformation) -> Result<(Aggreg let mut page_table_start: Option<(VirtualAddress, PhysicalAddress)> = None; let mut page_table_end: Option<(VirtualAddress, PhysicalAddress)> = None; - let mut text_flags: Option = None; - let mut rodata_flags: Option = None; - let mut data_flags: Option = None; + let mut text_flags: Option = None; + let mut rodata_flags: Option = None; + let mut data_flags: Option = None; let mut sections_memory_bounds: [Option; 32] = Default::default(); @@ -166,7 +165,7 @@ pub fn find_section_memory_bounds(boot_info: &BootInformation) -> Result<(Aggreg } debug!("Looking at loaded section {} at {:#X}, size {:#X}", section.name(), section.start_address(), section.size()); - let flags = EntryFlags::from_multiboot2_section_flags(§ion) | EntryFlags::GLOBAL; + let flags = convert_to_pte_flags(§ion); // even though the linker stipulates that the kernel sections have a higher-half virtual address, // they are still loaded at a lower physical address, in which phys_addr = virt_addr - KERNEL_OFFSET. @@ -338,15 +337,16 @@ pub fn find_section_memory_bounds(boot_info: &BootInformation) -> Result<(Aggreg /// Gets the physical memory occupied by vga. /// -/// Returns (start_physical_address, size, entryflags). +/// Returns (start_physical_address, size, PteFlags). pub fn get_vga_mem_addr( -) -> Result<(PhysicalAddress, usize, EntryFlags), &'static str> { +) -> Result<(PhysicalAddress, usize, PteFlags), &'static str> { const VGA_DISPLAY_PHYS_START: usize = 0xA_0000; const VGA_DISPLAY_PHYS_END: usize = 0xC_0000; let vga_size_in_bytes: usize = VGA_DISPLAY_PHYS_END - VGA_DISPLAY_PHYS_START; - let vga_display_flags = - EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::GLOBAL | EntryFlags::NO_CACHE; - + let vga_display_flags = PteFlags::new() + .valid(true) + .writable(true) + .device_memory(true); // TODO: set as write-combining (WC) Ok(( PhysicalAddress::new(VGA_DISPLAY_PHYS_START).ok_or("invalid VGA starting physical address")?, vga_size_in_bytes, @@ -370,3 +370,12 @@ pub fn get_p4() -> PhysicalAddress { Cr3::read_raw().0.start_address().as_u64() as usize ) } + +/// Converts the given multiboot2 section's flags into `PteFlags`. +fn convert_to_pte_flags(section: &multiboot2::ElfSection) -> PteFlags { + use multiboot2::ElfSectionFlags; + PteFlags::new() + .valid(section.flags().contains(ElfSectionFlags::ALLOCATED)) + .writable(section.flags().contains(ElfSectionFlags::WRITABLE)) + .executable(section.flags().contains(ElfSectionFlags::EXECUTABLE)) +} diff --git a/kernel/nic_buffers/src/lib.rs b/kernel/nic_buffers/src/lib.rs index 05e27cfe20..79f72904c3 100644 --- a/kernel/nic_buffers/src/lib.rs +++ b/kernel/nic_buffers/src/lib.rs @@ -26,7 +26,7 @@ impl TransmitBuffer { pub fn new(size_in_bytes: u16) -> Result { let (mp, starting_phys_addr) = create_contiguous_mapping( size_in_bytes as usize, - EntryFlags::WRITABLE | EntryFlags::NO_CACHE | EntryFlags::NO_EXECUTE, + EntryFlags::WRITABLE | EntryFlags::CACHE_DISABLE | EntryFlags::NO_EXECUTE, )?; Ok(TransmitBuffer { mp: mp, diff --git a/kernel/nic_initialization/src/lib.rs b/kernel/nic_initialization/src/lib.rs index c840c0989e..ed02e92225 100644 --- a/kernel/nic_initialization/src/lib.rs +++ b/kernel/nic_initialization/src/lib.rs @@ -25,7 +25,7 @@ use nic_queues::{RxQueueRegisters, TxQueueRegisters}; pub const NIC_MAPPING_FLAGS: EntryFlags = EntryFlags::from_bits_truncate( EntryFlags::PRESENT.bits() | EntryFlags::WRITABLE.bits() | - EntryFlags::NO_CACHE.bits() | + EntryFlags::CACHE_DISABLE.bits() | EntryFlags::NO_EXECUTE.bits() ); diff --git a/kernel/nic_queues/src/lib.rs b/kernel/nic_queues/src/lib.rs index 0629bd20b0..301ebaffa9 100644 --- a/kernel/nic_queues/src/lib.rs +++ b/kernel/nic_queues/src/lib.rs @@ -26,7 +26,7 @@ use nic_buffers::{ReceiveBuffer, ReceivedFrame, TransmitBuffer}; pub const NIC_MAPPING_FLAGS: EntryFlags = EntryFlags::from_bits_truncate( EntryFlags::PRESENT.bits() | EntryFlags::WRITABLE.bits() | - EntryFlags::NO_CACHE.bits() | + EntryFlags::CACHE_DISABLE.bits() | EntryFlags::NO_EXECUTE.bits() ); diff --git a/kernel/page_table_entry/Cargo.toml b/kernel/page_table_entry/Cargo.toml index c271e9651b..b608a7e943 100644 --- a/kernel/page_table_entry/Cargo.toml +++ b/kernel/page_table_entry/Cargo.toml @@ -18,8 +18,8 @@ path = "../memory_structs" [dependencies.frame_allocator] path = "../frame_allocator" -[target.'cfg(target_arch = "x86_64")'.dependencies] -memory_x86_64 = { path = "../memory_x86_64" } +[dependencies.pte_flags] +path = "../pte_flags" [lib] crate-type = ["rlib"] diff --git a/kernel/page_table_entry/src/lib.rs b/kernel/page_table_entry/src/lib.rs index 586b8eceff..c55c8cc3e1 100644 --- a/kernel/page_table_entry/src/lib.rs +++ b/kernel/page_table_entry/src/lib.rs @@ -14,8 +14,7 @@ use core::ops::Deref; use memory_structs::{Frame, FrameRange, PhysicalAddress}; -#[cfg(target_arch = "x86_64")] -use memory_x86_64::{ PAGE_TABLE_ENTRY_FRAME_MASK, EntryFlags}; +use pte_flags::{PteFlagsArch, PTE_FRAME_MASK, PteFlags}; use bit_field::BitField; use kernel_config::memory::PAGE_SHIFT; use zerocopy::FromBytes; @@ -24,7 +23,7 @@ use frame_allocator::AllocatedFrame; /// A page table entry, which is a `u64` value under the hood. /// /// It contains a the physical address of the `Frame` being mapped by this entry -/// and the access bits (encoded `EntryFlags`) that describes how it's mapped, +/// and the access bits (encoded `PteFlags`) that describes how it's mapped, /// e.g., readable, writable, no exec, etc. /// /// There isn't and shouldn't be any way to create/instantiate a new `PageTableEntry` directly. @@ -67,14 +66,14 @@ impl PageTableEntry { } /// Returns this `PageTableEntry`'s flags. - pub fn flags(&self) -> EntryFlags { - EntryFlags::from_bits_truncate(self.0) + pub fn flags(&self) -> PteFlagsArch { + PteFlagsArch::from_bits_truncate(self.0) } /// Returns the physical `Frame` pointed to (mapped by) this `PageTableEntry`. /// If this page table entry is not `PRESENT`, this returns `None`. pub fn pointed_frame(&self) -> Option { - if self.flags().intersects(EntryFlags::PRESENT) { + if self.flags().is_valid() { Some(self.frame_value()) } else { None @@ -93,15 +92,16 @@ impl PageTableEntry { /// This is the actual mapping action that informs the MMU of a new mapping. /// /// Note: this performs no checks about the current value of this page table entry. - pub fn set_entry(&mut self, frame: AllocatedFrame, flags: EntryFlags) { + pub fn set_entry(&mut self, frame: AllocatedFrame, flags: PteFlagsArch) { self.0 = (frame.start_address().value() as u64) | flags.bits(); } /// Sets the flags components of this `PageTableEntry` to `new_flags`. /// /// This does not modify the frame part of the page table entry. - pub fn set_flags(&mut self, new_flags: EntryFlags) { - self.0 = self.0 & PAGE_TABLE_ENTRY_FRAME_MASK | new_flags.bits(); + pub fn set_flags(&mut self, new_flags: PteFlagsArch) { + let sanitized_flag_bits = new_flags.bits() & !PTE_FRAME_MASK; + self.0 = (self.0 & PTE_FRAME_MASK) | sanitized_flag_bits; } pub fn value(&self) -> u64 { diff --git a/kernel/pte_flags/src/lib.rs b/kernel/pte_flags/src/lib.rs index 1e209b3b7c..954055d692 100644 --- a/kernel/pte_flags/src/lib.rs +++ b/kernel/pte_flags/src/lib.rs @@ -4,6 +4,8 @@ //! * [`PteFlags`]: the set of bit flags that apply to all architectures. //! * [`PteFlagsX86_64`] or [`PteFlagsAarch64`]: the arch-specific set of bit flags //! that apply to only the given platform. +//! * This crate also exports `PteFlagsArch`, an alias for the currently-active +//! arch-specific type above (either `PteFlagsX86_64` or `PteFlagsAarch64`). //! //! ## Type conversions //! *Notably*, you can convert to and from these architecture-specific types @@ -40,10 +42,10 @@ cfg_if!{ if #[cfg(any(target_arch = "aarch64", doc))] { }} cfg_if! { if #[cfg(target_arch = "x86_64")] { - use pte_flags_x86_64::PteFlagsX86_64 as PteFlagsArch; + pub use pte_flags_x86_64::PteFlagsX86_64 as PteFlagsArch; pub use pte_flags_x86_64::PTE_FRAME_MASK; } else if #[cfg(target_arch = "aarch64")] { - use pte_flags_aarch64::PteFlagsAarch64 as PteFlagsArch; + pub use pte_flags_aarch64::PteFlagsAarch64 as PteFlagsArch; pub use pte_flags_aarch64::PTE_FRAME_MASK; }} @@ -170,7 +172,7 @@ impl Default for PteFlags { } impl PteFlags { - /// Returns a new `PteFlagsX86_64` with the default value, in which: + /// Returns a new `PteFlags` with the default value, in which: /// * `ACCESSED` is set. /// * the `NOT_EXECUTABLE` bit is set. /// @@ -188,6 +190,20 @@ impl PteFlags { ) } + /// A convenience function that returns a new `PteFlags` with only the + /// default flags and the [`PteFlags::WRITABLE`] bit set. + /// + /// This is identical to: + /// ```rust + /// PteFlags::new().writable(true) + /// ``` + pub const fn new_writable() -> Self { + Self::from_bits_truncate( + Self::new().bits + | Self::WRITABLE.bits + ) + } + /// Returns a copy of this `PteFlags` with the `VALID` bit set or cleared. /// /// * If `enable` is `true`, this PTE will be considered "present" and "valid", diff --git a/kernel/pte_flags/src/pte_flags_aarch64.rs b/kernel/pte_flags/src/pte_flags_aarch64.rs index 28154cd35a..23c02fa73d 100644 --- a/kernel/pte_flags/src/pte_flags_aarch64.rs +++ b/kernel/pte_flags/src/pte_flags_aarch64.rs @@ -7,7 +7,7 @@ use static_assertions::const_assert_eq; /// A mask for the bits of a page table entry that contain the physical frame address. pub const PTE_FRAME_MASK: u64 = 0x0000_FFFF_FFFF_F000; -// Ensure that we never expose reserved bits [12:47] as part of the ` interface. +// Ensure that we never expose reserved bits [12:47] as part of the `PteFlagsAarch64` interface. const_assert_eq!(PteFlagsAarch64::all().bits() & PTE_FRAME_MASK, 0); @@ -132,6 +132,7 @@ bitflags! { /// /// Thus, Theseus currently *always* sets this bit by default. const ACCESSED = 1 << 10; + /// * If set, this page is mapped into only one or less than all address spaces, /// or is mapped differently across different address spaces, /// and thus be flushed out of the TLB when switching address spaces (page tables). @@ -151,10 +152,12 @@ bitflags! { /// /// This is currently not used in Theseus. const _GUARDED_PAGE = 1 << 50; + /// * The hardware will set this bit when the page has been written to. /// * The OS can then clear this bit once it has acknowledged that the page was written to, /// which is primarily useful for paging/swapping to disk. const DIRTY = 1 << 51; + /// * If set, this translation table entry is part of a set that is contiguous in memory /// with adjacent entries that also have this bit set. /// * If not set, this translation table entry is not contiguous in memory @@ -186,6 +189,9 @@ bitflags! { } } +const SHAREABLE_BITS_MASK: PteFlagsAarch64 = PteFlagsAarch64::_INNER_SHAREABLE; +const MAIR_BITS_MASK: PteFlagsAarch64 = PteFlagsAarch64::_MAIR_INDEX_7; + /// See [`PteFlagsAarch64::new()`] for what bits are set by default. impl Default for PteFlagsAarch64 { fn default() -> Self { @@ -201,7 +207,7 @@ impl PteFlagsAarch64 { /// * The three bits `[2:4]` for MAIR index values. /// * The two bits `[8:9]` for shareability. pub const MASKED_BITS_FOR_CONVERSION: PteFlagsAarch64 = PteFlagsAarch64::from_bits_truncate( - PteFlagsAarch64::_INNER_SHAREABLE.bits | PteFlagsAarch64::_MAIR_INDEX_7.bits + SHAREABLE_BITS_MASK.bits | MAIR_BITS_MASK.bits ); /// Returns a new `PteFlagsAarch64` with the default value, in which: @@ -229,6 +235,144 @@ impl PteFlagsAarch64 { | Self::NOT_EXECUTABLE.bits ) } + + /// A convenience function that returns a new `PteFlagsAarch64` with only the + /// default flags set and the [`PteFlagsAarch64::READ_ONLY`] bit not set (meaning "writable"). + /// + /// This is identical to: + /// ```rust + /// PteFlagsAarch64::new().writable(true) + /// ``` + pub const fn new_writable() -> Self { + Self::from_bits_truncate( + Self::new().bits + & !Self::READ_ONLY.bits + ) + } + + /// Returns a copy of this `PteFlagsAarch64` with the `VALID` bit set or cleared. + /// + /// * If `enable` is `true`, this PTE will be considered "present" and "valid", + /// meaning that the mapping from this page to a physical frame is valid + /// and that the translation of a virtual address in this page should succeed. + /// * If `enable` is `false`, this PTE will be considered "invalid", + /// and any attempt to access it for translation purposes will cause a page fault. + #[must_use] + #[doc(alias("present"))] + pub fn valid(mut self, enable: bool) -> Self { + self.set(Self::VALID, enable); + self + } + + /// Returns a copy of this `PteFlagsAarch64` with the `WRITABLE` bit set or cleared. + /// + /// * If `enable` is `true`, this will be writable. + /// * If `enable` is `false`, this will be read-only. + #[must_use] + #[doc(alias("read_only"))] + pub fn writable(mut self, enable: bool) -> Self { + self.set(Self::READ_ONLY, !enable); + self + } + + /// Returns a copy of this `PteFlagsAarch64` with the `NOT_EXECUTABLE` bit cleared or set. + /// + /// * If `enable` is `true`, this page will be executable (`NOT_EXECUTABLE` will be cleared). + /// * If `enable` is `false`, this page will be non-executable, which is the default + /// (`NOT_EXECUTABLE` will be set). + #[must_use] + #[doc(alias("no_exec"))] + pub fn executable(mut self, enable: bool) -> Self { + self.set(Self::NOT_EXECUTABLE, !enable); + self + } + + /// Returns a copy of this `PteFlagsAarch64` with the `DEVICE_MEMORY` bit set or cleared. + /// + /// * If `enable` is `true`, this will be non-cacheable device memory. + /// * If `enable` is `false`, this will be "normal" memory, the default. + #[must_use] + #[doc(alias("cache", "cacheable", "non-cacheable"))] + pub fn device_memory(mut self, enable: bool) -> Self { + self.remove(PteFlagsAarch64::_MAIR_INDEX_7); + if enable { + self.insert(PteFlagsAarch64::DEVICE_MEMORY); + } else { + self.insert(PteFlagsAarch64::NORMAL_MEMORY); + } + self + } + + /// Returns a copy of this `PteFlagsAarch64` with the `EXCLUSIVE` bit set or cleared. + /// + /// * If `enable` is `true`, this page will exclusively map its frame. + /// * If `enable` is `false`, this page will NOT exclusively map its frame. + #[must_use] + pub fn exclusive(mut self, enable: bool) -> Self { + self.set(Self::EXCLUSIVE, enable); + self + } + + /// Returns a copy of this `PteFlagsAarch64` with the `ACCESSED` bit set or cleared. + /// + /// Typically this is used to clear the `ACCESSED` bit, in order to indicate + /// that the OS has "acknowledged" the fact that this page was accessed + /// since the last time it checked. + /// + /// * If `enable` is `true`, this page will be marked as accessed. + /// * If `enable` is `false`, this page will be marked as not accessed. + #[must_use] + pub fn accessed(mut self, enable: bool) -> Self { + self.set(Self::ACCESSED, enable); + self + } + + /// Returns a copy of this `PteFlagsAarch64` with the `DIRTY` bit set or cleared. + /// + /// Typically this is used to clear the `DIRTY` bit, in order to indicate + /// that the OS has "acknowledged" the fact that this page was written to + /// since the last time it checked. + /// This bit is typically set by the hardware. + /// + /// * If `enable` is `true`, this page will be marked as dirty. + /// * If `enable` is `false`, this page will be marked as clean. + #[must_use] + pub fn dirty(mut self, enable: bool) -> Self { + self.set(Self::DIRTY, enable); + self + } + + #[doc(alias("present"))] + pub fn is_valid(&self) -> bool { + self.contains(Self::VALID) + } + + #[doc(alias("read_only"))] + pub fn is_writable(&self) -> bool { + !self.contains(Self::READ_ONLY) + } + + #[doc(alias("no_exec"))] + pub fn is_executable(&self) -> bool { + !self.contains(Self::NOT_EXECUTABLE) + } + + #[doc(alias("cache", "cacheable", "non-cacheable"))] + pub fn is_device_memory(&self) -> bool { + self.contains(Self::DEVICE_MEMORY) + } + + pub fn is_dirty(&self) -> bool { + self.contains(Self::DIRTY) + } + + pub fn is_accessed(&self) -> bool { + self.contains(Self::ACCESSED) + } + + pub fn is_exclusive(&self) -> bool { + self.contains(Self::EXCLUSIVE) + } } impl From for PteFlagsAarch64 { @@ -262,7 +406,7 @@ impl From for PteFlags { // Otherwise, `DEVICE_MEMORY` may accidentally be misinterpreted as enabled // if another MAIR index that had overlapping bits (bit 2) was specified, // e.g., _MAIR_INDEX_3, _MAIR_INDEX_5, or _MAIR_INDEX_7. - if specific & PteFlagsAarch64::_MAIR_INDEX_7 == PteFlagsAarch64::DEVICE_MEMORY { + if specific & MAIR_BITS_MASK == PteFlagsAarch64::DEVICE_MEMORY { general |= Self::DEVICE_MEMORY; } general diff --git a/kernel/pte_flags/src/pte_flags_x86_64.rs b/kernel/pte_flags/src/pte_flags_x86_64.rs index 8e4a9a7ecf..812006ccbf 100644 --- a/kernel/pte_flags/src/pte_flags_x86_64.rs +++ b/kernel/pte_flags/src/pte_flags_x86_64.rs @@ -7,7 +7,7 @@ use static_assertions::const_assert_eq; /// A mask for the bits of a page table entry that contain the physical frame address. pub const PTE_FRAME_MASK: u64 = 0x000_FFFFFFFFFF_000; -// Ensure that we never expose reserved bits [12:51] as part of the ` interface. +// Ensure that we never expose reserved bits [12:51] as part of the `PteFlagsX86_64` interface. const_assert_eq!(PteFlagsX86_64::all().bits() & PTE_FRAME_MASK, 0); @@ -30,39 +30,66 @@ bitflags! { /// * The page has been temporarily paged/swapped to disk /// * The page is waiting to be mapped, i.e., for demand paging. const VALID = 1 << 0; + /// * If set, this page is writable. /// * If not set, this page is read-only. const WRITABLE = 1 << 1; + /// * If set, userspace (ring 3) can access this page. /// * If not set, only kernelspace (ring 0) can access this page. /// /// This is unused in Theseus because it is a single privilege level OS. - const _USER_ACCESSIBLE = 1 << 2; + const _USER_ACCESSIBLE = 1 << 2; + /// * If set, writes to this page go directly to memory. /// * It not set, writes are first written to the CPU cache, and then written to memory. /// This is also known as "write-back". - const _WRITE_THROUGH = 1 << 3; + /// + /// If the Page Attribute Table (PAT) feature is enabled, this represents + /// the least-significant bit of the 3-bit index into the Page Attribute Table; + /// that index is used to determine the PAT entry that holds the + /// memory caching type that is applied to this page. + const _WRITE_THROUGH = 1 << 3; + /// * If set, this page's content is never cached, neither for read nor writes. /// * If not set, this page's content is cached as normal, both for read nor writes. - const NO_CACHE = 1 << 4; - /// An alias for [`Self::NO_CACHE`] in order to ease compatibility with aarch64. - const DEVICE_MEMORY = Self::NO_CACHE.bits; + /// + /// If the Page Attribute Table (PAT) feature is enabled, this represents + /// the middle bit of the 3-bit index into the Page Attribute Table; + /// that index is used to determine the PAT entry that holds the + /// memory caching type that is applied to this page. + const CACHE_DISABLE = 1 << 4; + /// An alias for [`Self::CACHE_DISABLE`] in order to ease compatibility with aarch64. + const DEVICE_MEMORY = Self::CACHE_DISABLE.bits; + /// * The hardware will set this bit when the page is accessed. /// * The OS can then clear this bit once it has acknowledged that the page was accessed, /// if it cares at all about this information. const ACCESSED = 1 << 5; + /// * The hardware will set this bit when the page has been written to. /// * The OS can then clear this bit once it has acknowledged that the page was written to, /// which is primarily useful for paging/swapping to disk. const DIRTY = 1 << 6; + /// * If set, this page table entry represents a "huge" page. /// This bit may be used as follows: /// * For a P4-level PTE, it must be not set. /// * If set for a P3-level PTE, it means this PTE maps a 1GiB huge page. /// * If set for a P2-level PTE, it means this PTE maps a 1MiB huge page. - /// * For a P1-level PTE, it must be not set. + /// * A P1-level PTE cannot map a huge page, so this bit is interpreted + /// as [`Self::PAT_FOR_P1`] instead. /// * If not set, this is a normal 4KiB page mapping. const HUGE_PAGE = 1 << 7; + /// (For P1-level (lowest level) page tables ONLY): + /// If the Page Attribute Table (PAT) feature is enabled, this represents + /// the most-significant bit of the 3-bit index into the Page Attribute Table; + /// that index is used to determine the PAT entry that holds the + /// memory caching type that is applied to this page. + /// + /// This *cannot* be used for PAT index bits in a mid-level (P2 or P3) entry. + const PAT_FOR_P1 = 1 << 7; + /// * If set, this page is mapped identically across all address spaces /// (all root page tables) and doesn't need to be flushed out of the TLB /// when switching to another address space (page table). @@ -71,7 +98,18 @@ bitflags! { /// and thus be flushed out of the TLB when switching address spaces (page tables). /// /// Note: Theseus is a single address space system, so this flag makes no difference. - const _GLOBAL = 1 << 8; + const _GLOBAL = 1 << 8; + + // Note: Theseus currently only supports setting PAT bits for P1-level PTEs. + // + // /// (For P2- and P3- level (mid-level) page tables ONLY): + // /// If the Page Attribute Table (PAT) feature is enabled, this represents + // /// the most-significant bit of the 3-bit index into the Page Attribute Table; + // /// that index is used to determine the PAT entry that holds the + // /// memory caching type that is applied to this page. + // /// + // /// This *cannot* be used for PAT index bits in a lowest-level (P1) PTE. + // const PAT_FOR_P2_P3 = 1 << 12; /// See [PteFlags::EXCLUSIVE]. /// We use bit 55 because it is available for custom OS usage on both x86_64 and aarch64. @@ -96,6 +134,139 @@ impl PteFlagsX86_64 { pub const fn new() -> Self { Self::NOT_EXECUTABLE } + + /// A convenience function that returns a new `PteFlagsX86_64` with only the + /// default flags and the [`PteFlagsX86_64::WRITABLE`] bit set. + /// + /// This is identical to: + /// ```rust + /// PteFlagsX86_64::new().writable(true) + /// ``` + pub const fn new_writable() -> Self { + Self::from_bits_truncate( + Self::new().bits + | Self::WRITABLE.bits + ) + } + + /// Returns a copy of this `PteFlagsX86_64` with the `VALID` bit set or cleared. + /// + /// * If `enable` is `true`, this PTE will be considered "present" and "valid", + /// meaning that the mapping from this page to a physical frame is valid + /// and that the translation of a virtual address in this page should succeed. + /// * If `enable` is `false`, this PTE will be considered "invalid", + /// and any attempt to access it for translation purposes will cause a page fault. + #[must_use] + #[doc(alias("present"))] + pub fn valid(mut self, enable: bool) -> Self { + self.set(Self::VALID, enable); + self + } + + /// Returns a copy of this `PteFlagsX86_64` with the `WRITABLE` bit set or cleared. + /// + /// * If `enable` is `true`, this will be writable. + /// * If `enable` is `false`, this will be read-only. + #[must_use] + #[doc(alias("read_only"))] + pub fn writable(mut self, enable: bool) -> Self { + self.set(Self::WRITABLE, enable); + self + } + + /// Returns a copy of this `PteFlagsX86_64` with the `NOT_EXECUTABLE` bit cleared or set. + /// + /// * If `enable` is `true`, this page will be executable (`NOT_EXECUTABLE` will be cleared). + /// * If `enable` is `false`, this page will be non-executable, which is the default + /// (`NOT_EXECUTABLE` will be set). + #[must_use] + #[doc(alias("no_exec"))] + pub fn executable(mut self, enable: bool) -> Self { + self.set(Self::NOT_EXECUTABLE, !enable); + self + } + + /// Returns a copy of this `PteFlagsX86_64` with the `DEVICE_MEMORY` bit set or cleared. + /// + /// * If `enable` is `true`, this will be non-cacheable device memory. + /// * If `enable` is `false`, this will be "normal" memory, the default. + #[must_use] + #[doc(alias("cache", "cacheable", "non-cacheable"))] + pub fn device_memory(mut self, enable: bool) -> Self { + self.set(Self::DEVICE_MEMORY, enable); + self + } + + /// Returns a copy of this `PteFlagsX86_64` with the `EXCLUSIVE` bit set or cleared. + /// + /// * If `enable` is `true`, this page will exclusively map its frame. + /// * If `enable` is `false`, this page will NOT exclusively map its frame. + #[must_use] + pub fn exclusive(mut self, enable: bool) -> Self { + self.set(Self::EXCLUSIVE, enable); + self + } + + /// Returns a copy of this `PteFlagsX86_64` with the `ACCESSED` bit set or cleared. + /// + /// Typically this is used to clear the `ACCESSED` bit, in order to indicate + /// that the OS has "acknowledged" the fact that this page was accessed + /// since the last time it checked. + /// + /// * If `enable` is `true`, this page will be marked as accessed. + /// * If `enable` is `false`, this page will be marked as not accessed. + #[must_use] + pub fn accessed(mut self, enable: bool) -> Self { + self.set(Self::ACCESSED, enable); + self + } + + /// Returns a copy of this `PteFlagsX86_64` with the `DIRTY` bit set or cleared. + /// + /// Typically this is used to clear the `DIRTY` bit, in order to indicate + /// that the OS has "acknowledged" the fact that this page was written to + /// since the last time it checked. + /// This bit is typically set by the hardware. + /// + /// * If `enable` is `true`, this page will be marked as dirty. + /// * If `enable` is `false`, this page will be marked as clean. + #[must_use] + pub fn dirty(mut self, enable: bool) -> Self { + self.set(Self::DIRTY, enable); + self + } + + #[doc(alias("present"))] + pub fn is_valid(&self) -> bool { + self.contains(Self::VALID) + } + + #[doc(alias("read_only"))] + pub fn is_writable(&self) -> bool { + self.contains(Self::WRITABLE) + } + + #[doc(alias("no_exec"))] + pub fn is_executable(&self) -> bool { + !self.contains(Self::NOT_EXECUTABLE) + } + + #[doc(alias("cache", "cacheable", "non-cacheable"))] + pub fn is_device_memory(&self) -> bool { + self.contains(Self::DEVICE_MEMORY) + } + + pub fn is_dirty(&self) -> bool { + self.contains(Self::DIRTY) + } + + pub fn is_accessed(&self) -> bool { + self.contains(Self::ACCESSED) + } + + pub fn is_exclusive(&self) -> bool { + self.contains(Self::EXCLUSIVE) + } } impl From for PteFlagsX86_64 { From ae0479da8abc16fa386ca6737abe520dd7c62b8c Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Sat, 3 Dec 2022 02:08:31 -0800 Subject: [PATCH 2/8] [no ci] WIP `memory` crates build --- Cargo.lock | 5 + kernel/memory/Cargo.toml | 4 + kernel/memory/src/lib.rs | 3 + kernel/memory/src/paging/mapper.rs | 156 ++++++++++----------- kernel/memory/src/paging/mod.rs | 17 ++- kernel/memory/src/paging/table.rs | 16 ++- kernel/memory/src/paging/temporary_page.rs | 7 +- kernel/page_table_entry/src/lib.rs | 6 +- kernel/pte_flags/src/lib.rs | 14 -- kernel/pte_flags/src/pte_flags_aarch64.rs | 14 -- kernel/pte_flags/src/pte_flags_x86_64.rs | 18 +-- libs/owned_borrowed_trait/Cargo.lock | 7 + libs/owned_borrowed_trait/Cargo.toml | 8 ++ libs/owned_borrowed_trait/src/lib.rs | 87 ++++++++++++ 14 files changed, 223 insertions(+), 139 deletions(-) create mode 100644 libs/owned_borrowed_trait/Cargo.lock create mode 100644 libs/owned_borrowed_trait/Cargo.toml create mode 100644 libs/owned_borrowed_trait/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 38f16f0565..f6e6b5e135 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1680,6 +1680,7 @@ dependencies = [ "memory_x86_64", "multiboot2", "no_drop", + "owned_borrowed_trait", "page_allocator", "page_table_entry", "pte_flags", @@ -2161,6 +2162,10 @@ dependencies = [ "task", ] +[[package]] +name = "owned_borrowed_trait" +version = "0.1.0" + [[package]] name = "owning_ref" version = "0.4.1" diff --git a/kernel/memory/Cargo.toml b/kernel/memory/Cargo.toml index 2b68982673..d6f4531665 100644 --- a/kernel/memory/Cargo.toml +++ b/kernel/memory/Cargo.toml @@ -51,5 +51,9 @@ path = "../frame_allocator" [dependencies.no_drop] path = "../no_drop" +[dependencies.owned_borrowed_trait] +path = "../../libs/owned_borrowed_trait" + + [lib] crate-type = ["rlib"] diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index b46f802c9d..a3b37fefc7 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -15,6 +15,8 @@ #![feature(unboxed_closures)] #![feature(result_option_inspect)] +extern crate alloc; + #[cfg(not(mapper_spillful))] mod paging; @@ -43,6 +45,7 @@ use memory_x86_64::{ pub use pte_flags::*; +use log::debug; use spin::Once; use irq_safety::MutexIrqSafe; use alloc::vec::Vec; diff --git a/kernel/memory/src/paging/mapper.rs b/kernel/memory/src/paging/mapper.rs index 08a5fefd15..ba742d3d68 100644 --- a/kernel/memory/src/paging/mapper.rs +++ b/kernel/memory/src/paging/mapper.rs @@ -18,17 +18,20 @@ use core::{ ptr::{NonNull, Unique}, slice, }; -use {BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, Frame, FrameRange, AllocatedPages, AllocatedFrames}; -use paging::{ +use log::{error, warn, debug, trace}; +use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, Frame, FrameRange, AllocatedPages, AllocatedFrames}; +use crate::paging::{ get_current_p4, PageRange, table::{P4, Table, Level4}, }; +use pte_flags::PteFlagsArch; use spin::Once; use kernel_config::memory::{PAGE_SIZE, ENTRIES_PER_PAGE_TABLE}; -use super::{PteFlags, tlb_flush_virt_addr}; +use super::tlb_flush_virt_addr; use zerocopy::FromBytes; use page_table_entry::UnmapResult; +use owned_borrowed_trait::{OwnedOrBorrowed, Owned, Borrowed}; /// This is a private callback used to convert `UnmappedFrames` into `AllocatedFrames`. /// @@ -76,7 +79,7 @@ impl Mapper { } /// Dumps all page table entries at all four page table levels for the given `VirtualAddress`, - /// and also shows their `EntryFlags`. + /// and also shows their `PteFlags`. /// /// The page table details are written to the the given `writer`. pub fn dump_pte(&self, writer: &mut W, virtual_address: VirtualAddress) -> fmt::Result { @@ -116,6 +119,7 @@ impl Mapper { pub fn translate_page(&self, page: Page) -> Option { let p3 = self.p4().next_table(page.p4_index()); + #[cfg(target_arch = "x86_64")] let huge_page = || { p3.and_then(|p3| { let p3_entry = &p3[page.p3_index()]; @@ -145,6 +149,8 @@ impl Mapper { None }) }; + #[cfg(target_arch = "aarch64")] + let huge_page = || { todo!("huge page (block descriptor) translation for aarch64") }; p3.and_then(|p3| p3.next_table(page.p3_index())) .and_then(|p2| p2.next_table(page.p2_index())) @@ -158,33 +164,42 @@ impl Mapper { /// /// Returns a tuple of the new `MappedPages` object containing the allocated `pages` /// and the allocated `frames` object. - pub(super) fn internal_map_to( + pub(super) fn internal_map_to( &mut self, pages: AllocatedPages, - frames: AllocatedFrames, - flags: EntryFlags, - ) -> Result<(MappedPages, AllocatedFrames), &'static str> { - let mut top_level_flags = flags.clone() | EntryFlags::PRESENT; - // P4, P3, and P2 entries should never set NO_EXECUTE, only the lowest-level P1 entry should. - // top_level_flags.set(EntryFlags::WRITABLE, true); // is the same true for the WRITABLE bit? - top_level_flags.set(EntryFlags::NO_EXECUTE, false); - // Currently we cannot use the EXCLUSIVE bit for page table frames (P4, P3, P2), - // because another page table frame may re-use (create another alias for) it without us knowing here. - // Only the lowest-level P1 entry can be considered exclusive, only if it's mapped truly exclusively using this function. - top_level_flags.set(EntryFlags::EXCLUSIVE, false); - let actual_flags = flags | EntryFlags::EXCLUSIVE | EntryFlags::PRESENT; + frames: Frames, + flags: Flags, + ) -> Result<(MappedPages, Frames::Inner), &'static str> + where + Frames: OwnedOrBorrowed, + Flags: Into, + { + let frames = frames.into_inner(); + let flags = flags.into(); + let top_level_flags = flags.valid(true) + // P4, P3, and P2 entries should never set NOT_EXECUTABLE; only the lowest-level P1 entry should. + .executable(true) + // Currently we cannot use the EXCLUSIVE bit for page table frames (P4, P3, P2), + // because another page table frame may re-use (create another alias for) it without us knowing here. + .exclusive(false); + + // Only the lowest-level P1 entry can be considered exclusive, and only when + // we are mapping it exclusively (i.e., owned `AllocatedFrames` are passed in). + let actual_flags = flags + .valid(true) + .exclusive(Frames::OWNED); let pages_count = pages.size_in_pages(); - let frames_count = frames.size_in_frames(); + let frames_count = frames.borrow().size_in_frames(); if pages_count != frames_count { error!("map_allocated_pages_to(): pages {:?} count {} must equal frames {:?} count {}!", - pages, pages_count, frames, frames_count + pages, pages_count, frames.borrow(), frames_count ); return Err("map_allocated_pages_to(): page count must equal frame count"); } // iterate over pages and frames in lockstep - for (page, frame) in pages.deref().clone().into_iter().zip(frames.into_iter()) { + for (page, frame) in pages.deref().clone().into_iter().zip(frames.borrow().into_iter()) { let p3 = self.p4_mut().next_table_create(page.p4_index(), top_level_flags); let p2 = p3.next_table_create(page.p3_index(), top_level_flags); let p1 = p2.next_table_create(page.p2_index(), top_level_flags); @@ -217,7 +232,7 @@ impl Mapper { frames: AllocatedFrames, flags: F, ) -> Result { - let (mapped_pages, frames) = self.internal_map_to(pages, frames, flags)?; + let (mapped_pages, frames) = self.internal_map_to(pages, Owned(frames), flags)?; // Currently we forget the actual `AllocatedFrames` object because // there is no easy/efficient way to store a dynamic list of non-contiguous frames (would require Vec). @@ -232,18 +247,25 @@ impl Mapper { /// Maps the given `AllocatedPages` to randomly chosen (allocated) physical frames. /// /// Consumes the given `AllocatedPages` and returns a `MappedPages` object which contains those `AllocatedPages`. - pub fn map_allocated_pages>(&mut self, pages: AllocatedPages, flags: F) - -> Result - { - let mut top_level_flags = flags.clone() | EntryFlags::PRESENT; - // P4, P3, and P2 entries should never set NO_EXECUTE, only the lowest-level P1 entry should. - // top_level_flags.set(EntryFlags::WRITABLE, true); // is the same true for the WRITABLE bit? - top_level_flags.set(EntryFlags::NO_EXECUTE, false); - // Currently we cannot use the EXCLUSIVE bit for page table frames (P4, P3, P2), - // because another page table frame may re-use (create another alias for) it without us knowing here. - // Only the lowest-level P1 entry can be considered exclusive, only if it's mapped truly exclusively using this function. - top_level_flags.set(EntryFlags::EXCLUSIVE, false); - let actual_flags = flags | EntryFlags::EXCLUSIVE | EntryFlags::PRESENT; + pub fn map_allocated_pages>( + &mut self, + pages: AllocatedPages, + flags: F, + ) -> Result { + let flags = flags.into(); + let top_level_flags = flags + // P4, P3, and P2 entries should never set NOT_EXECUTABLE; only the lowest-level P1 entry should. + .executable(true) + // Currently we cannot use the EXCLUSIVE bit for page table frames (P4, P3, P2), + // because another page table frame may re-use (create another alias for) it without us knowing here. + .exclusive(false) + .valid(true); + + // Only the lowest-level P1 entry can be considered exclusive, and only when + // we are mapping it exclusively (i.e., owned `AllocatedFrames` are passed in). + let actual_flags = flags + .exclusive(true) + .valid(true); for page in pages.deref().clone() { let af = frame_allocator::allocate_frames(1).ok_or("map_allocated_pages(): couldn't allocate new frame, out of memory")?; @@ -282,9 +304,12 @@ impl Mapper { /// in which only one virtual page can map to a given physical frame, /// which preserves Rust's knowledge of language-level aliasing and thus its safety checks. /// - /// As such, the pages mapped here will be marked as non-`EXCLUSIVE`, regardless of the `flags` passed in. + /// As such, the pages mapped here will be marked as non-exclusive, + /// regardless of the `flags` passed in. + /// This means that the `frames` they map will NOT be deallocated upon unmapping. /// - /// Consumes the given `AllocatedPages` and returns a `MappedPages` object which contains those `AllocatedPages`. + /// Consumes the given `AllocatedPages` and returns a `MappedPages` object + /// which contains those `AllocatedPages`. #[doc(hidden)] pub unsafe fn map_to_non_exclusive>( mapper: &mut Self, @@ -292,48 +317,10 @@ impl Mapper { frames: &AllocatedFrames, flags: F, ) -> Result { - let flags = flags.into(); - let top_level_flags = flags.valid(true) - // P4, P3, and P2 entries should never set NOT_EXECUTABLE; only the lowest-level P1 entry should. - .executable(true) - // Currently we cannot use the EXCLUSIVE bit for page table frames (P4, P3, P2), - // because another page table frame may re-use (create another alias for) it without us knowing here. - .exclusive(false); - - // In fact, in this function, none of the frames can be mapped as exclusive + // In this function, none of the frames can be mapped as exclusive // because we're accepting a *reference* to an `AllocatedFrames`, not consuming it. - let actual_flags = flags - .exclusive(false) - .valid(true); - - let pages_count = pages.size_in_pages(); - let frames_count = frames.size_in_frames(); - if pages_count != frames_count { - error!("map_to_non_exclusive(): pages {:?} count {} must equal frames {:?} count {}!", - pages, pages_count, frames, frames_count - ); - return Err("map_to_non_exclusive(): page count must equal frame count"); - } - - // iterate over pages and frames in lockstep - for (page, frame) in pages.deref().clone().into_iter().zip(frames.into_iter()) { - let p3 = mapper.p4_mut().next_table_create(page.p4_index(), top_level_flags); - let p2 = p3.next_table_create(page.p3_index(), top_level_flags); - let p1 = p2.next_table_create(page.p2_index(), top_level_flags); - - if !p1[page.p1_index()].is_unused() { - error!("map_to_non_exclusive(): page {:#X} -> frame {:#X}, page was already in use!", page.start_address(), frame.start_address()); - return Err("map_to_non_exclusive(): page was already in use"); - } - - p1[page.p1_index()].set_entry(frame, actual_flags); - } - - Ok(MappedPages { - page_table_p4: mapper.target_p4.clone(), - pages, - flags: actual_flags, - }) + mapper.internal_map_to(pages, Borrowed(frames), flags) + .map(|(mp, _af)| mp) } } @@ -484,7 +471,7 @@ impl MappedPages { warn!("MappedPages::deep_copy() has not been adequately tested yet."); let size_in_pages = self.size_in_pages(); - use paging::allocate_pages; + use crate::paging::allocate_pages; let new_pages = allocate_pages(size_in_pages).ok_or_else(|| "Couldn't allocate_pages()")?; // we must temporarily map the new pages as Writable, since we're about to copy data into them @@ -512,10 +499,11 @@ impl MappedPages { } - /// Change the permissions (`new_flags`) of this `MappedPages`'s page table entries. + /// Change the mapping flags of this `MappedPages`'s page table entries. /// /// Note that attempting to change certain "reserved" flags will have no effect. - /// For example, arbitrarily setting the `EXCLUSIVE` bit would cause unsafety, so it cannot be changed. + /// For example, the `EXCLUSIVE` flag cannot be changed beause arbitrarily setting it + /// would violate safety. pub fn remap>( &mut self, active_table_mapper: &mut Mapper, @@ -523,9 +511,11 @@ impl MappedPages { ) -> Result<(), &'static str> { if self.size_in_pages() == 0 { return Ok(()); } - // Use the existing value of the `EXCLUSIVE` flag rather than whatever value was passed in. - let mut new_flags = new_flags; - new_flags.set(EntryFlags::EXCLUSIVE, self.flags.is_exclusive()); + // Use the existing value of the `EXCLUSIVE` flag, ignoring whatever value was passed in. + // Also ensure these flags are PRESENT (valid), since they are currently being mapped. + let new_flags = new_flags.into() + .exclusive(self.flags.is_exclusive()) + .valid(true); if new_flags == self.flags { trace!("remap(): new_flags were the same as existing flags, doing nothing."); @@ -539,7 +529,7 @@ impl MappedPages { .and_then(|p2| p2.next_table_mut(page.p2_index())) .ok_or("mapping code does not support huge pages")?; - p1[page.p1_index()].set_flags(new_flags | EntryFlags::PRESENT); + p1[page.p1_index()].set_flags(new_flags); tlb_flush_virt_addr(page.start_address()); } diff --git a/kernel/memory/src/paging/mod.rs b/kernel/memory/src/paging/mod.rs index f7ea9d0efe..0db0641ec3 100644 --- a/kernel/memory/src/paging/mod.rs +++ b/kernel/memory/src/paging/mod.rs @@ -28,10 +28,12 @@ use core::{ ops::{Deref, DerefMut}, fmt, }; +use log::debug; use super::{Frame, FrameRange, PageRange, VirtualAddress, PhysicalAddress, AllocatedPages, allocate_pages, AllocatedFrames, PteFlags, tlb_flush_all, tlb_flush_virt_addr, get_p4, find_section_memory_bounds, get_vga_mem_addr, KERNEL_OFFSET}; +use pte_flags::PteFlagsArch; use no_drop::NoDrop; use kernel_config::memory::{RECURSIVE_P4_INDEX}; // use kernel_config::memory::{KERNEL_TEXT_P4_INDEX, KERNEL_HEAP_P4_INDEX, KERNEL_STACK_P4_INDEX}; @@ -101,7 +103,10 @@ impl PageTable { let mut temporary_page = TemporaryPage::create_and_map_table_frame(page, new_p4_frame, current_page_table)?; temporary_page.with_table_and_frame(|table, frame| { table.zero(); - table[RECURSIVE_P4_INDEX].set_entry(frame.as_allocated_frame(), PteFlags::new_writable()); + table[RECURSIVE_P4_INDEX].set_entry( + frame.as_allocated_frame(), + PteFlagsArch::new().valid(true).writable(true), + ); })?; let (_temp_page, inited_new_p4_frame) = temporary_page.unmap_into_parts(current_page_table)?; @@ -136,7 +141,10 @@ impl PageTable { let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, this_p4, self)?; // overwrite recursive mapping - self.p4_mut()[RECURSIVE_P4_INDEX].set_entry(other_table.p4_table.as_allocated_frame(), PteFlags::new_writable()); + self.p4_mut()[RECURSIVE_P4_INDEX].set_entry( + other_table.p4_table.as_allocated_frame(), + PteFlagsArch::new().valid(true).writable(true), + ); tlb_flush_all(); // set mapper's target frame to reflect that future mappings will be mapped into the other_table @@ -150,7 +158,10 @@ impl PageTable { // restore recursive mapping to original p4 table temporary_page.with_table_and_frame(|p4_table, frame| { - p4_table[RECURSIVE_P4_INDEX].set_entry(frame.as_allocated_frame(), PteFlags::new_writable()); + p4_table[RECURSIVE_P4_INDEX].set_entry( + frame.as_allocated_frame(), + PteFlagsArch::new().valid(true).writable(true), + ); })?; tlb_flush_all(); diff --git a/kernel/memory/src/paging/table.rs b/kernel/memory/src/paging/table.rs index b832bb2a91..b2ecdca006 100644 --- a/kernel/memory/src/paging/table.rs +++ b/kernel/memory/src/paging/table.rs @@ -7,11 +7,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::PageTableEntry; -use kernel_config::memory::{PAGE_SHIFT, ENTRIES_PER_PAGE_TABLE}; -use super::super::{VirtualAddress, PteFlags}; use core::ops::{Index, IndexMut}; use core::marker::PhantomData; +use super::PageTableEntry; +use crate::VirtualAddress; +use pte_flags::PteFlagsArch; +use kernel_config::memory::{PAGE_SHIFT, ENTRIES_PER_PAGE_TABLE}; use zerocopy::FromBytes; @@ -51,7 +52,7 @@ impl Table { /// and so on for P3 -> P3 and P2 -> P1. fn next_table_address(&self, index: usize) -> Option { let pte_flags = self[index].flags(); - if entry_flags.contains(EntryFlags::PRESENT) && !entry_flags.is_huge() { + if pte_flags.is_valid() && !pte_flags.is_huge() { let table_address = self as *const _ as usize; let next_table_vaddr: usize = (table_address << 9) | (index << PAGE_SHIFT); Some(VirtualAddress::new_canonical(next_table_vaddr)) @@ -84,12 +85,15 @@ impl Table { pub fn next_table_create( &mut self, index: usize, - flags: EntryFlags, + flags: PteFlagsArch, ) -> &mut Table { if self.next_table(index).is_none() { assert!(!self[index].flags().is_huge(), "mapping code does not support huge pages"); let af = frame_allocator::allocate_frames(1).expect("next_table_create(): no frames available"); - self[index].set_entry(af.as_allocated_frame(), flags.into_writable() | EntryFlags::PRESENT); // must be PRESENT | WRITABLE for x86_64 + self[index].set_entry( + af.as_allocated_frame(), + flags.valid(true).writable(true), // must be valid and writable on x86_64 + ); self.next_table_mut(index).unwrap().zero(); core::mem::forget(af); // we currently forget frames allocated as page table frames since we don't yet have a way to track them. } diff --git a/kernel/memory/src/paging/temporary_page.rs b/kernel/memory/src/paging/temporary_page.rs index ff06d2420d..37cece5bcf 100644 --- a/kernel/memory/src/paging/temporary_page.rs +++ b/kernel/memory/src/paging/temporary_page.rs @@ -8,11 +8,14 @@ // except according to those terms. use core::mem::ManuallyDrop; +use log::error; use super::{ AllocatedPages, AllocatedFrames, PageTable, MappedPages, VirtualAddress, table::{Table, Level1}, }; +use pte_flags::PteFlagsArch; use kernel_config::memory::{TEMPORARY_PAGE_VIRT_ADDR, PAGE_SIZE}; +use owned_borrowed_trait::Owned; /// A page that can be temporarily mapped to the recursive page table frame, @@ -56,8 +59,8 @@ impl TemporaryPage { } let (mapped_page, frame) = page_table.internal_map_to( page.ok_or("Couldn't allocate a new Page for the temporary P4 table frame")?, - frame, - super::EntryFlags::WRITABLE, + Owned(frame), + PteFlagsArch::new().valid(true).writable(true), )?; Ok(TemporaryPage { mapped_page, diff --git a/kernel/page_table_entry/src/lib.rs b/kernel/page_table_entry/src/lib.rs index c55c8cc3e1..4ef73ae8f3 100644 --- a/kernel/page_table_entry/src/lib.rs +++ b/kernel/page_table_entry/src/lib.rs @@ -14,7 +14,7 @@ use core::ops::Deref; use memory_structs::{Frame, FrameRange, PhysicalAddress}; -use pte_flags::{PteFlagsArch, PTE_FRAME_MASK, PteFlags}; +use pte_flags::{PteFlagsArch, PTE_FRAME_MASK}; use bit_field::BitField; use kernel_config::memory::PAGE_SHIFT; use zerocopy::FromBytes; @@ -100,8 +100,8 @@ impl PageTableEntry { /// /// This does not modify the frame part of the page table entry. pub fn set_flags(&mut self, new_flags: PteFlagsArch) { - let sanitized_flag_bits = new_flags.bits() & !PTE_FRAME_MASK; - self.0 = (self.0 & PTE_FRAME_MASK) | sanitized_flag_bits; + let only_flag_bits = new_flags.bits() & !PTE_FRAME_MASK; + self.0 = (self.0 & PTE_FRAME_MASK) | only_flag_bits; } pub fn value(&self) -> u64 { diff --git a/kernel/pte_flags/src/lib.rs b/kernel/pte_flags/src/lib.rs index 954055d692..bd289669e0 100644 --- a/kernel/pte_flags/src/lib.rs +++ b/kernel/pte_flags/src/lib.rs @@ -190,20 +190,6 @@ impl PteFlags { ) } - /// A convenience function that returns a new `PteFlags` with only the - /// default flags and the [`PteFlags::WRITABLE`] bit set. - /// - /// This is identical to: - /// ```rust - /// PteFlags::new().writable(true) - /// ``` - pub const fn new_writable() -> Self { - Self::from_bits_truncate( - Self::new().bits - | Self::WRITABLE.bits - ) - } - /// Returns a copy of this `PteFlags` with the `VALID` bit set or cleared. /// /// * If `enable` is `true`, this PTE will be considered "present" and "valid", diff --git a/kernel/pte_flags/src/pte_flags_aarch64.rs b/kernel/pte_flags/src/pte_flags_aarch64.rs index 23c02fa73d..89611e84ae 100644 --- a/kernel/pte_flags/src/pte_flags_aarch64.rs +++ b/kernel/pte_flags/src/pte_flags_aarch64.rs @@ -236,20 +236,6 @@ impl PteFlagsAarch64 { ) } - /// A convenience function that returns a new `PteFlagsAarch64` with only the - /// default flags set and the [`PteFlagsAarch64::READ_ONLY`] bit not set (meaning "writable"). - /// - /// This is identical to: - /// ```rust - /// PteFlagsAarch64::new().writable(true) - /// ``` - pub const fn new_writable() -> Self { - Self::from_bits_truncate( - Self::new().bits - & !Self::READ_ONLY.bits - ) - } - /// Returns a copy of this `PteFlagsAarch64` with the `VALID` bit set or cleared. /// /// * If `enable` is `true`, this PTE will be considered "present" and "valid", diff --git a/kernel/pte_flags/src/pte_flags_x86_64.rs b/kernel/pte_flags/src/pte_flags_x86_64.rs index 812006ccbf..6ddc42f395 100644 --- a/kernel/pte_flags/src/pte_flags_x86_64.rs +++ b/kernel/pte_flags/src/pte_flags_x86_64.rs @@ -135,20 +135,6 @@ impl PteFlagsX86_64 { Self::NOT_EXECUTABLE } - /// A convenience function that returns a new `PteFlagsX86_64` with only the - /// default flags and the [`PteFlagsX86_64::WRITABLE`] bit set. - /// - /// This is identical to: - /// ```rust - /// PteFlagsX86_64::new().writable(true) - /// ``` - pub const fn new_writable() -> Self { - Self::from_bits_truncate( - Self::new().bits - | Self::WRITABLE.bits - ) - } - /// Returns a copy of this `PteFlagsX86_64` with the `VALID` bit set or cleared. /// /// * If `enable` is `true`, this PTE will be considered "present" and "valid", @@ -264,6 +250,10 @@ impl PteFlagsX86_64 { self.contains(Self::ACCESSED) } + pub fn is_huge(&self) -> bool { + self.contains(Self::HUGE_PAGE) + } + pub fn is_exclusive(&self) -> bool { self.contains(Self::EXCLUSIVE) } diff --git a/libs/owned_borrowed_trait/Cargo.lock b/libs/owned_borrowed_trait/Cargo.lock new file mode 100644 index 0000000000..a492fd47a5 --- /dev/null +++ b/libs/owned_borrowed_trait/Cargo.lock @@ -0,0 +1,7 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "owned_borrowed_trait" +version = "0.1.0" diff --git a/libs/owned_borrowed_trait/Cargo.toml b/libs/owned_borrowed_trait/Cargo.toml new file mode 100644 index 0000000000..7d4c89c607 --- /dev/null +++ b/libs/owned_borrowed_trait/Cargo.toml @@ -0,0 +1,8 @@ +[package] +authors = ["Kevin Boos "] +name = "owned_borrowed_trait" +description = """ +An abstraction over an owned value or borrowed reference +based on traits and marker types instead of enums. +""" +version = "0.1.0" diff --git a/libs/owned_borrowed_trait/src/lib.rs b/libs/owned_borrowed_trait/src/lib.rs new file mode 100644 index 0000000000..aef41b16dd --- /dev/null +++ b/libs/owned_borrowed_trait/src/lib.rs @@ -0,0 +1,87 @@ +//! An abstraction over an owned value or borrowed reference +//! based on traits and marker types instead of enums. + +#![no_std] + +use core::{ + borrow::Borrow, + ops::Deref, +}; + +/// A trait for abstracting over an owned value or borrowed reference to a type `T`. +/// +/// You cannot implement this trait; it can only be used with [`Owned`] or [`Borrowed`]. +/// +/// The [`Owned`] and [`Borrowed`] wrapper types implement the following traits: +/// * [`AsRef`]. +/// * [`Deref`] where `Target = T`. +pub trait OwnedOrBorrowed: private::Sealed { + /// * `true` if the wrapper type contains an owned value, i.e., for [`Owned`]. + /// * `false` if the wrapper type contains a borrowed reference, i.e., for [`Borrowed`]. + const OWNED: bool; + /// The inner type of the owned value or borrowed reference. + type Inner: Borrow; + + /// Consumes this wrapper type and returns the contained value or borrowed reference. + fn into_inner(self) -> Self::Inner; + + /// Returns a reference to the inner value. + fn as_inner(&self) -> &Self::Inner; +} + +/// A wrapper that indicates the contained value is an owned value of type `T`. +/// +/// Implements the [`OwnedOrBorrowed`] trait. +pub struct Owned(pub T); + +/// A wrapper that indicates the contained value is a borrowed reference +/// to a value of type `T`. +/// +/// Implements the [`OwnedOrBorrowed`] trait. +pub struct Borrowed<'t, T>(pub &'t T); + +impl OwnedOrBorrowed for Owned { + const OWNED: bool = true; + type Inner = T; + fn into_inner(self) -> Self::Inner { self.0 } + fn as_inner(&self) -> &Self::Inner { &self.0 } +} + +impl<'t, T> OwnedOrBorrowed for Borrowed<'t, T> { + const OWNED: bool = false; + type Inner = &'t T; + fn into_inner(self) -> Self::Inner { self.0 } + fn as_inner(&self) -> &Self::Inner { &self.0 } +} + +impl AsRef for Owned { + fn as_ref(&self) -> &T { + self.as_inner().borrow() + } +} +impl Deref for Owned { + type Target = T; + fn deref(&self) -> &Self::Target { + self.as_ref() + } +} + +impl<'t, T> AsRef for Borrowed<'t, T> { + fn as_ref(&self) -> &T { + self.as_inner().borrow() + } +} +impl<'t, T> Deref for Borrowed<'t, T> { + type Target = T; + fn deref(&self) -> &Self::Target { + self.as_ref() + } +} + +// Ensure no foreign crates can implement the `OwnedBorrowed` trait. +impl private::Sealed for Owned { } +impl<'t, T> private::Sealed for Borrowed<'t, T> { } + +mod private { + pub trait Sealed { } +} From fd5042bc2ca5a59ca1ec4d761904c35a055d963a Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Sat, 3 Dec 2022 15:07:01 -0800 Subject: [PATCH 3/8] main repo crates build and run properly --- Cargo.lock | 1 + applications/loadc/src/lib.rs | 6 ++-- kernel/acpi/acpi_table/src/lib.rs | 8 ++--- kernel/acpi/hpet/src/lib.rs | 6 ++-- kernel/acpi/rsdp/src/lib.rs | 4 +-- kernel/apic/src/lib.rs | 4 +-- kernel/crate_metadata/Cargo.toml | 1 + kernel/crate_metadata/src/lib.rs | 39 ++++++++++++++++------- kernel/crate_swap/src/lib.rs | 6 ++-- kernel/debug_info/src/lib.rs | 15 ++++++--- kernel/framebuffer/src/lib.rs | 8 +++-- kernel/heap/src/lib.rs | 10 ++++-- kernel/ioapic/src/lib.rs | 4 +-- kernel/iommu/src/lib.rs | 4 +-- kernel/memfs/src/lib.rs | 4 +-- kernel/mod_mgmt/src/lib.rs | 28 ++++++++++------ kernel/multicore_bringup/src/lib.rs | 7 ++-- kernel/nic_buffers/src/lib.rs | 4 +-- kernel/nic_initialization/src/lib.rs | 11 ++----- kernel/nic_queues/src/lib.rs | 11 +++---- kernel/pte_flags/src/lib.rs | 14 ++++---- kernel/pte_flags/src/pte_flags_aarch64.rs | 14 ++++---- kernel/pte_flags/src/pte_flags_x86_64.rs | 16 +++++----- kernel/stack/src/lib.rs | 10 ++---- 24 files changed, 132 insertions(+), 103 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f6e6b5e135..f5b9eceee8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -561,6 +561,7 @@ dependencies = [ "qp-trie", "serde", "spin 0.9.0", + "static_assertions", "str_ref", "xmas-elf", ] diff --git a/applications/loadc/src/lib.rs b/applications/loadc/src/lib.rs index 607be639b6..9ba58a7316 100644 --- a/applications/loadc/src/lib.rs +++ b/applications/loadc/src/lib.rs @@ -24,7 +24,7 @@ use core::{ }; use alloc::{collections::BTreeSet, string::{String, ToString}, sync::Arc, vec::Vec}; use getopts::{Matches, Options}; -use memory::{Page, MappedPages, VirtualAddress, PteFlags}; +use memory::{Page, MappedPages, VirtualAddress, PteFlagsArch, PteFlags}; use mod_mgmt::{CrateNamespace, StrongDependency, find_symbol_table, RelocationEntry, write_relocation}; use rustc_demangle::demangle; use path::Path; @@ -140,7 +140,7 @@ pub struct LoadedSegment { /// (may be a subset) bounds: Range, /// The proper flags for this segment specified by the ELF file. - flags: PteFlags, + flags: PteFlagsArch, /// The indices of the sections in the ELF file /// that were grouped ("mapped") into this segment by the linker. section_ndxs: BTreeSet, @@ -318,7 +318,7 @@ fn parse_and_load_elf_executable<'f>( mapped_segments.push(LoadedSegment { mp, bounds: segment_bounds, - flags: initial_flags, + flags: initial_flags.into(), section_ndxs, sections_i_depend_on: Vec::new(), // this is populated later in `overwrite_relocations()` }); diff --git a/kernel/acpi/acpi_table/src/lib.rs b/kernel/acpi/acpi_table/src/lib.rs index d80549af68..562e55e6ac 100644 --- a/kernel/acpi/acpi_table/src/lib.rs +++ b/kernel/acpi/acpi_table/src/lib.rs @@ -14,7 +14,7 @@ extern crate sdt; extern crate zerocopy; use alloc::collections::BTreeMap; -use memory::{MappedPages, allocate_pages, allocate_frames_at, PageTable, EntryFlags, PhysicalAddress, Frame, FrameRange}; +use memory::{MappedPages, allocate_pages, allocate_frames_at, PageTable, PteFlags, PhysicalAddress, Frame, FrameRange}; use sdt::Sdt; use core::ops::Add; use zerocopy::FromBytes; @@ -89,7 +89,7 @@ impl AcpiTables { let new_mapped_pages = page_table.map_allocated_pages_to( new_pages, af, - EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE, + PteFlags::new().valid(true).writable(true), )?; self.adjust_mapping_offsets(new_frames, new_mapped_pages); @@ -114,7 +114,7 @@ impl AcpiTables { let new_mapped_pages = page_table.map_allocated_pages_to( new_pages, af, - EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE, + PteFlags::new().valid(true).writable(true), )?; self.adjust_mapping_offsets(new_frames, new_mapped_pages); @@ -142,7 +142,7 @@ impl AcpiTables { let new_mapped_pages = page_table.map_allocated_pages_to( new_pages, af, - EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE, + PteFlags::new().valid(true).writable(true), )?; // No real need to adjust mapping offsets here, since we've only appended frames (not prepended); // we call this just to set the new frames and new mapped pages diff --git a/kernel/acpi/hpet/src/lib.rs b/kernel/acpi/hpet/src/lib.rs index 91e722b8e9..e05814e216 100644 --- a/kernel/acpi/hpet/src/lib.rs +++ b/kernel/acpi/hpet/src/lib.rs @@ -6,7 +6,7 @@ use log::debug; use volatile::{Volatile, ReadOnly}; use zerocopy::FromBytes; use spin::{Once, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use memory::{allocate_pages, allocate_frames_by_bytes_at, PageTable, PhysicalAddress, EntryFlags, BorrowedMappedPages, Mutable}; +use memory::{allocate_pages, allocate_frames_by_bytes_at, PageTable, PhysicalAddress, PteFlags, BorrowedMappedPages, Mutable}; use sdt::{Sdt, GenericAddressStructure}; use acpi_table::{AcpiTables, AcpiSignature}; use static_assertions::const_assert_eq; @@ -175,8 +175,8 @@ impl HpetAcpiTable { .ok_or("Couldn't allocate pages for HPET")?; let hpet_mp = page_table.map_allocated_pages_to( pages, - frames, - EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::CACHE_DISABLE | EntryFlags::NO_EXECUTE, + frames, + PteFlags::new().valid(true).writable(true).device_memory(true), )?; let mut hpet = hpet_mp.into_borrowed_mut::(phys_addr.frame_offset()) diff --git a/kernel/acpi/rsdp/src/lib.rs b/kernel/acpi/rsdp/src/lib.rs index 88a0d26d22..5b10c09c2c 100644 --- a/kernel/acpi/rsdp/src/lib.rs +++ b/kernel/acpi/rsdp/src/lib.rs @@ -7,7 +7,7 @@ extern crate zerocopy; #[macro_use] extern crate static_assertions; use core::mem; -use memory::{PageTable, MappedPages, PhysicalAddress, allocate_pages_by_bytes, allocate_frames_by_bytes_at, EntryFlags, BorrowedMappedPages}; +use memory::{PageTable, MappedPages, PhysicalAddress, allocate_pages_by_bytes, allocate_frames_by_bytes_at, PteFlags, BorrowedMappedPages}; use zerocopy::FromBytes; /// The starting physical address of the region of memory where the RSDP table exists. @@ -48,7 +48,7 @@ impl Rsdp { let pages = allocate_pages_by_bytes(size).ok_or("couldn't allocate pages")?; let frames_to_search = allocate_frames_by_bytes_at(PhysicalAddress::new_canonical(RSDP_SEARCH_START), size) .map_err(|_e| "Couldn't allocate physical frames when searching for RSDP")?; - let mapped_pages = page_table.map_allocated_pages_to(pages, frames_to_search, EntryFlags::PRESENT)?; + let mapped_pages = page_table.map_allocated_pages_to(pages, frames_to_search, PteFlags::new().valid(true))?; Rsdp::search(mapped_pages) } diff --git a/kernel/apic/src/lib.rs b/kernel/apic/src/lib.rs index cc4a6e439a..371d13d5c6 100644 --- a/kernel/apic/src/lib.rs +++ b/kernel/apic/src/lib.rs @@ -8,7 +8,7 @@ use spin::Once; use raw_cpuid::CpuId; use msr::*; use irq_safety::RwLockIrqSafe; -use memory::{PageTable, PhysicalAddress, EntryFlags, MappedPages, allocate_pages, allocate_frames_at, AllocatedFrames, BorrowedMappedPages, Mutable}; +use memory::{PageTable, PhysicalAddress, PteFlags, MappedPages, allocate_pages, allocate_frames_at, AllocatedFrames, BorrowedMappedPages, Mutable}; use kernel_config::time::CONFIG_TIMESLICE_PERIOD_MICROSECONDS; use atomic_linked_list::atomic_map::AtomicMap; use crossbeam_utils::atomic::AtomicCell; @@ -158,7 +158,7 @@ fn map_apic(page_table: &mut PageTable) -> Result { page_table, new_page, frame, - EntryFlags::WRITABLE | EntryFlags::CACHE_DISABLE | EntryFlags::NO_EXECUTE, + PteFlags::new().valid(true).writable(true).device_memory(true), ) } } diff --git a/kernel/crate_metadata/Cargo.toml b/kernel/crate_metadata/Cargo.toml index 7d69837f22..fdaf503f7c 100644 --- a/kernel/crate_metadata/Cargo.toml +++ b/kernel/crate_metadata/Cargo.toml @@ -9,6 +9,7 @@ edition = "2018" spin = "0.9.0" xmas-elf = { version = "0.6.2", git = "https://github.com/theseus-os/xmas-elf.git" } qp-trie = "0.8.0" +static_assertions = "1.1.0" [dependencies.str_ref] path = "../../libs/str_ref" diff --git a/kernel/crate_metadata/src/lib.rs b/kernel/crate_metadata/src/lib.rs index 9e07ee1405..364a1d8a6a 100644 --- a/kernel/crate_metadata/src/lib.rs +++ b/kernel/crate_metadata/src/lib.rs @@ -57,13 +57,12 @@ use alloc::{ sync::{Arc, Weak}, vec::Vec, }; -use memory::{MappedPages, VirtualAddress, EntryFlags}; -#[cfg(internal_deps)] -use memory::PageTable; +use memory::{MappedPages, VirtualAddress, PteFlags}; use cow_arc::{CowArc, CowWeak}; use fs_node::{FileRef, WeakFileRef}; use hashbrown::HashMap; use goblin::elf::reloc::*; +use static_assertions::const_assert; pub use str_ref::StrRef; pub use crate_metadata_serde::{ @@ -90,11 +89,27 @@ pub type StrongSectionRef = Arc; pub type WeakSectionRef = Weak; /// `.text` sections are read-only and executable. -pub const TEXT_SECTION_FLAGS: EntryFlags = EntryFlags::PRESENT; +pub const TEXT_SECTION_FLAGS: PteFlags = PteFlags::from_bits_truncate( + (PteFlags::new().bits() | PteFlags::VALID.bits()) + & !PteFlags::NOT_EXECUTABLE.bits() // clear the no-exec bits +); /// `.rodata` sections are read-only and non-executable. -pub const RODATA_SECTION_FLAGS: EntryFlags = EntryFlags::from_bits_truncate(EntryFlags::PRESENT.bits() | EntryFlags::NO_EXECUTE.bits()); +pub const RODATA_SECTION_FLAGS: PteFlags = PteFlags::from_bits_truncate( + PteFlags::new().bits() + | PteFlags::VALID.bits() + & !PteFlags::WRITABLE.bits() +); /// `.data` and `.bss` sections are read-write and non-executable. -pub const DATA_BSS_SECTION_FLAGS: EntryFlags = EntryFlags::from_bits_truncate(EntryFlags::PRESENT.bits() | EntryFlags::NO_EXECUTE.bits() | EntryFlags::WRITABLE.bits()); +pub const DATA_BSS_SECTION_FLAGS: PteFlags = PteFlags::from_bits_truncate( + PteFlags::new().bits() + | PteFlags::VALID.bits() + | PteFlags::WRITABLE.bits() +); + +// Double-check section flags were defined correctly. +const_assert!(TEXT_SECTION_FLAGS.is_executable() && !TEXT_SECTION_FLAGS.is_writable()); +const_assert!(!RODATA_SECTION_FLAGS.is_writable() && !RODATA_SECTION_FLAGS.is_executable()); +const_assert!(DATA_BSS_SECTION_FLAGS.is_writable() && !DATA_BSS_SECTION_FLAGS.is_executable()); /// The Theseus Makefile appends prefixes onto bootloader module names, @@ -379,19 +394,19 @@ impl LoadedCrate { #[cfg(internal_deps)] pub fn deep_copy( &self, - page_table: &mut PageTable, + page_table: &mut memory::PageTable, ) -> Result { // This closure deep copies the given mapped_pages (mapping them as WRITABLE) // and recalculates the the range of addresses covered by the new mapping. - let mut deep_copy_mp = |old_mp_range: &(Arc>, Range), flags: EntryFlags| + let mut deep_copy_mp = |old_mp_range: &(Arc>, Range), flags: PteFlags| -> Result<(Arc>, Range), &'static str> { let old_mp_locked = old_mp_range.0.lock(); let old_start_address = old_mp_range.1.start.value(); let size = old_mp_range.1.end.value() - old_start_address; let offset = old_start_address - old_mp_locked.start_address().value(); - let new_mp = old_mp_range.0.lock().deep_copy(Some(flags | EntryFlags::WRITABLE), page_table)?; + let new_mp = old_mp_range.0.lock().deep_copy(page_table, Some(flags.writable(true)))?; let new_start_address = new_mp.start_address() + offset; Ok((Arc::new(Mutex::new(new_mp)), new_start_address .. (new_start_address + size))) }; @@ -523,7 +538,7 @@ impl LoadedCrate { strong_dep.relocation, new_sec_slice, new_sec_mapped_pages_offset, - source_sec.start_address(), + source_sec.virt_addr, true )?; @@ -549,10 +564,10 @@ impl LoadedCrate { // to ensure that we don't cause deadlock by trying to lock the same section twice. let source_sec_vaddr = if Arc::ptr_eq(source_sec, new_sec) { // here: the source_sec and new_sec are the same, so just use the already-locked new_sec - new_sec.start_address() + new_sec.virt_addr } else { // here: the source_sec and new_sec are different, so we can go ahead and safely lock the source_sec - source_sec.start_address() + source_sec.virt_addr }; write_relocation( internal_dep.relocation, diff --git a/kernel/crate_swap/src/lib.rs b/kernel/crate_swap/src/lib.rs index 78d6c8a0f7..6064a95f87 100644 --- a/kernel/crate_swap/src/lib.rs +++ b/kernel/crate_swap/src/lib.rs @@ -23,7 +23,6 @@ use core::{ fmt, ops::Deref, }; -use spin::Mutex; use alloc::{ borrow::Cow, collections::BTreeSet, @@ -31,8 +30,9 @@ use alloc::{ sync::Arc, vec::Vec, }; +use spin::Mutex; use hashbrown::HashMap; -use memory::{EntryFlags, MmiRef}; +use memory::MmiRef; use fs_node::{FsNode, FileOrDir, FileRef, DirRef}; use mod_mgmt::{ CrateNamespace, @@ -428,7 +428,7 @@ pub fn swap_crates( let mut target_sec_mapped_pages = target_sec.mapped_pages.lock(); let target_sec_initial_flags = target_sec_mapped_pages.flags(); if !target_sec_initial_flags.is_writable() { - target_sec_mapped_pages.remap(&mut kernel_mmi_ref.lock().page_table, target_sec_initial_flags | EntryFlags::WRITABLE)?; + target_sec_mapped_pages.remap(&mut kernel_mmi_ref.lock().page_table, target_sec_initial_flags.writable(true))?; } write_relocation( diff --git a/kernel/debug_info/src/lib.rs b/kernel/debug_info/src/lib.rs index e0363744b5..93059c0c3f 100644 --- a/kernel/debug_info/src/lib.rs +++ b/kernel/debug_info/src/lib.rs @@ -30,7 +30,7 @@ use alloc::{ }; use fs_node::WeakFileRef; use owning_ref::ArcRef; -use memory::{MappedPages, VirtualAddress, MmiRef, allocate_pages_by_bytes, EntryFlags}; +use memory::{MappedPages, VirtualAddress, MmiRef, allocate_pages_by_bytes, PteFlags}; use xmas_elf::{ ElfFile, sections::{SectionData, SectionData::Rela64, ShType}, @@ -799,7 +799,10 @@ impl DebugSymbols { // The .debug sections were initially mapped as writable so we could modify them, // but they should actually just be read-only as specified by the ELF file flags. - debug_sections_mp.remap(&mut kernel_mmi_ref.lock().page_table, EntryFlags::PRESENT)?; + debug_sections_mp.remap( + &mut kernel_mmi_ref.lock().page_table, + PteFlags::new().valid(true), + )?; let debug_sections_mp = Arc::new(debug_sections_mp); let create_debug_section_slice = |debug_sec: DebugSection| -> Result { @@ -879,8 +882,12 @@ fn allocate_debug_section_pages(elf_file: &ElfFile, kernel_mmi_ref: &MmiRef) -> return Err("no .debug sections found"); } - let allocated_pages = allocate_pages_by_bytes(ro_bytes).ok_or("Couldn't allocate_pages_by_bytes, out of virtual address space")?; - let mp = kernel_mmi_ref.lock().page_table.map_allocated_pages(allocated_pages, EntryFlags::PRESENT | EntryFlags::WRITABLE)?; + let allocated_pages = allocate_pages_by_bytes(ro_bytes) + .ok_or("Couldn't allocate_pages_by_bytes, out of virtual address space")?; + let mp = kernel_mmi_ref.lock().page_table.map_allocated_pages( + allocated_pages, + PteFlags::new().valid(true).writable(true), + )?; let start_address = mp.start_address(); let range = start_address .. (start_address + ro_bytes); Ok((mp, range)) diff --git a/kernel/framebuffer/src/lib.rs b/kernel/framebuffer/src/lib.rs index d5544cab3c..55123a1d21 100644 --- a/kernel/framebuffer/src/lib.rs +++ b/kernel/framebuffer/src/lib.rs @@ -13,7 +13,7 @@ extern crate zerocopy; pub mod pixel; use core::{ops::{DerefMut, Deref}, hash::{Hash, Hasher}}; -use memory::{EntryFlags, PhysicalAddress, Mutable, BorrowedSliceMappedPages}; +use memory::{PteFlags, PhysicalAddress, Mutable, BorrowedSliceMappedPages}; use shapes::Coord; pub use pixel::*; @@ -77,8 +77,10 @@ impl Framebuffer

{ // get a reference to the kernel's memory mapping information let kernel_mmi_ref = memory::get_kernel_mmi_ref().ok_or("KERNEL_MMI was not yet initialized!")?; - let vesa_display_flags: EntryFlags = - EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::GLOBAL | EntryFlags::CACHE_DISABLE; + let vesa_display_flags: PteFlags = PteFlags::new() + .valid(true) + .writable(true) + .device_memory(true); // TODO: use PAT write-combining instead of disabling caching let size = width * height * core::mem::size_of::

(); let pages = memory::allocate_pages_by_bytes(size).ok_or("could not allocate pages for a new framebuffer")?; diff --git a/kernel/heap/src/lib.rs b/kernel/heap/src/lib.rs index 2fb652a129..23b7e8bcc7 100644 --- a/kernel/heap/src/lib.rs +++ b/kernel/heap/src/lib.rs @@ -13,7 +13,7 @@ extern crate kernel_config; extern crate block_allocator; use alloc::alloc::{GlobalAlloc, Layout}; -use memory::EntryFlags; +use memory::PteFlags; use kernel_config::memory::{KERNEL_HEAP_START, KERNEL_HEAP_INITIAL_SIZE}; use irq_safety::MutexIrqSafe; use spin::Once; @@ -35,8 +35,12 @@ pub static DEFAULT_ALLOCATOR: Once> = Once::n /// Currently it is initialized with an instance of `MultipleHeaps`. static DEFAULT_ALLOCATOR: Once> = Once::new(); -/// The heap mapped pages should be writable -pub const HEAP_FLAGS: EntryFlags = EntryFlags::WRITABLE; +/// The heap mapped pages should be writable and non-executable. +pub const HEAP_FLAGS: PteFlags = PteFlags::from_bits_truncate( + PteFlags::new().bits() + | PteFlags::VALID.bits() + | PteFlags::WRITABLE.bits() +); /// The ending address of the initial heap. It is used to determine which heap should be used during deallocation. const INITIAL_HEAP_END_ADDR: usize = KERNEL_HEAP_START + KERNEL_HEAP_INITIAL_SIZE; diff --git a/kernel/ioapic/src/lib.rs b/kernel/ioapic/src/lib.rs index 35f67583f1..f6495d19f6 100644 --- a/kernel/ioapic/src/lib.rs +++ b/kernel/ioapic/src/lib.rs @@ -10,7 +10,7 @@ extern crate atomic_linked_list; use spin::{Mutex, MutexGuard}; use volatile::{Volatile, WriteOnly}; use zerocopy::FromBytes; -use memory::{PageTable, PhysicalAddress, EntryFlags, allocate_pages, allocate_frames_at, BorrowedMappedPages, Mutable}; +use memory::{PageTable, PhysicalAddress, PteFlags, allocate_pages, allocate_frames_at, BorrowedMappedPages, Mutable}; use atomic_linked_list::atomic_map::AtomicMap; @@ -76,7 +76,7 @@ impl IoApic { let ioapic_mapped_page = page_table.map_allocated_pages_to( new_page, frame, - EntryFlags::PRESENT | EntryFlags::WRITABLE | EntryFlags::CACHE_DISABLE | EntryFlags::NO_EXECUTE, + PteFlags::new().valid(true).writable(true).device_memory(true), )?; let ioapic_regs = ioapic_mapped_page.into_borrowed_mut(0).map_err(|(_mp, err)| err)?; diff --git a/kernel/iommu/src/lib.rs b/kernel/iommu/src/lib.rs index 4683b947f5..5ab896596f 100644 --- a/kernel/iommu/src/lib.rs +++ b/kernel/iommu/src/lib.rs @@ -16,7 +16,7 @@ extern crate bitflags; use spin::Once; use irq_safety::MutexIrqSafe; -use memory::{PageTable, EntryFlags, PhysicalAddress, allocate_frames_at, allocate_pages, BorrowedMappedPages, Mutable}; +use memory::{PageTable, PteFlags, PhysicalAddress, allocate_frames_at, allocate_pages, BorrowedMappedPages, Mutable}; mod regs; use regs::*; @@ -58,7 +58,7 @@ pub fn init(host_address_width: u8, let mp = { let frames = allocate_frames_at(register_base_address, 1)?; let pages = allocate_pages(1).ok_or("Unable to find virtual page!")?; - let flags = EntryFlags::WRITABLE | EntryFlags::CACHE_DISABLE | EntryFlags::NO_EXECUTE; + let flags = PteFlags::new().valid(true).writable(true).device_memory(true); page_table.map_allocated_pages_to(pages, frames, flags)? }; diff --git a/kernel/memfs/src/lib.rs b/kernel/memfs/src/lib.rs index a221757733..6c5c53d730 100644 --- a/kernel/memfs/src/lib.rs +++ b/kernel/memfs/src/lib.rs @@ -15,7 +15,7 @@ extern crate io; use alloc::string::String; use fs_node::{DirRef, WeakDirRef, File, FsNode}; -use memory::{MappedPages, get_kernel_mmi_ref, allocate_pages_by_bytes, EntryFlags}; +use memory::{MappedPages, get_kernel_mmi_ref, allocate_pages_by_bytes, PteFlags}; use alloc::sync::Arc; use spin::Mutex; use fs_node::{FileOrDir, FileRef}; @@ -94,7 +94,7 @@ impl ByteWriter for MemFile { else { // If the mapped pages are empty (this is the first allocation), we make them writable let prev_flags = if self.mp.size_in_bytes() == 0 { - EntryFlags::WRITABLE + PteFlags::new().valid(true).writable(true).into() } // Otherwise, use the existing mapped pages flags else { diff --git a/kernel/mod_mgmt/src/lib.rs b/kernel/mod_mgmt/src/lib.rs index 15947efb7a..5180dfce07 100644 --- a/kernel/mod_mgmt/src/lib.rs +++ b/kernel/mod_mgmt/src/lib.rs @@ -9,7 +9,7 @@ use alloc::{boxed::Box, collections::{BTreeMap, btree_map, BTreeSet}, string::{S use spin::{Mutex, Once}; use xmas_elf::{ElfFile, sections::{SHF_ALLOC, SHF_EXECINSTR, SHF_TLS, SHF_WRITE, SectionData, ShType}, symbol_table::{Binding, Type}}; use util::round_up_power_of_two; -use memory::{MmiRef, MemoryManagementInfo, VirtualAddress, MappedPages, EntryFlags, allocate_pages_by_bytes, allocate_frames_by_bytes_at}; +use memory::{MmiRef, MemoryManagementInfo, VirtualAddress, MappedPages, PteFlags, allocate_pages_by_bytes, allocate_frames_by_bytes_at}; use bootloader_modules::BootloaderModule; use cow_arc::CowArc; use rustc_demangle::demangle; @@ -168,7 +168,7 @@ fn parse_bootloader_modules_into_files( pages, frames, // we never need to write to bootloader-provided modules - EntryFlags::PRESENT | EntryFlags::NO_EXECUTE, + PteFlags::new().valid(true), )?; let name = m.name(); @@ -978,7 +978,7 @@ impl CrateNamespace { let mut target_sec_mapped_pages = target_sec.mapped_pages.lock(); let target_sec_initial_flags = target_sec_mapped_pages.flags(); if !target_sec_initial_flags.is_writable() { - target_sec_mapped_pages.remap(&mut kernel_mmi_ref.lock().page_table, target_sec_initial_flags | EntryFlags::WRITABLE)?; + target_sec_mapped_pages.remap(&mut kernel_mmi_ref.lock().page_table, target_sec_initial_flags.writable(true))?; } write_relocation( @@ -2968,13 +2968,23 @@ fn allocate_section_pages(elf_file: &ElfFile, kernel_mmi_ref: &MmiRef) -> Result } -/// A convenience function for allocating contiguous virtual memory pages and mapping them to random physical frames. +/// A convenience function for allocating virtual pages and mapping them to random physical frames. /// -/// The returned `MappedPages` will be at least as large as `size_in_bytes`, rounded up to the nearest `Page` size, -/// and is mapped as writable along with the other specified `flags` to ensure we can copy content into it. -fn allocate_and_map_as_writable(size_in_bytes: usize, flags: EntryFlags, kernel_mmi_ref: &MmiRef) -> Result { - let allocated_pages = allocate_pages_by_bytes(size_in_bytes).ok_or("Couldn't allocate_pages_by_bytes, out of virtual address space")?; - kernel_mmi_ref.lock().page_table.map_allocated_pages(allocated_pages, flags | EntryFlags::PRESENT | EntryFlags::WRITABLE) +/// The returned `MappedPages` will be at least as large as `size_in_bytes`, +/// rounded up to the nearest `Page` size, +/// and is mapped as writable along with the other specified `flags` +/// to ensure we can copy content into it. +fn allocate_and_map_as_writable( + size_in_bytes: usize, + flags: PteFlags, + kernel_mmi_ref: &MmiRef, +) -> Result { + let allocated_pages = allocate_pages_by_bytes(size_in_bytes) + .ok_or("Couldn't allocate_pages_by_bytes, out of virtual address space")?; + kernel_mmi_ref.lock().page_table.map_allocated_pages( + allocated_pages, + flags.valid(true).writable(true) + ) } diff --git a/kernel/multicore_bringup/src/lib.rs b/kernel/multicore_bringup/src/lib.rs index 8598955b4e..4d85ed6a10 100644 --- a/kernel/multicore_bringup/src/lib.rs +++ b/kernel/multicore_bringup/src/lib.rs @@ -28,7 +28,7 @@ use core::{ use spin::Mutex; use volatile::Volatile; use zerocopy::FromBytes; -use memory::{VirtualAddress, PhysicalAddress, MappedPages, EntryFlags, MmiRef}; +use memory::{VirtualAddress, PhysicalAddress, MappedPages, PteFlags, MmiRef}; use kernel_config::memory::{PAGE_SIZE, PAGE_SHIFT, KERNEL_STACK_SIZE_IN_PAGES}; use apic::{LocalApic, get_lapics, get_my_apic_id, has_x2apic, get_bsp_id, cpu_count}; use ap_start::{kstart_ap, AP_READY_FLAG}; @@ -153,15 +153,16 @@ pub fn handle_ap_cores( let ap_startup_pages = memory::allocate_pages_at(VirtualAddress::new_canonical(AP_STARTUP), ap_startup_frames.size_in_frames()) .map_err(|_e| "handle_ap_cores(): failed to allocate AP startup pages")?; + let flags = PteFlags::new().valid(true).writable(true); trampoline_mapped_pages = page_table.map_allocated_pages_to( trampoline_page, trampoline_frame, - EntryFlags::PRESENT | EntryFlags::WRITABLE, + flags, )?; ap_startup_mapped_pages = page_table.map_allocated_pages_to( ap_startup_pages, ap_startup_frames, - EntryFlags::PRESENT | EntryFlags::WRITABLE, + flags, )?; page_table_phys_addr = page_table.physical_address(); } diff --git a/kernel/nic_buffers/src/lib.rs b/kernel/nic_buffers/src/lib.rs index 79f72904c3..8b9121bebe 100644 --- a/kernel/nic_buffers/src/lib.rs +++ b/kernel/nic_buffers/src/lib.rs @@ -9,7 +9,7 @@ extern crate mpmc; use core::ops::{Deref, DerefMut}; use alloc::vec::Vec; -use memory::{PhysicalAddress, MappedPages, EntryFlags, create_contiguous_mapping}; +use memory::{PhysicalAddress, MappedPages, PteFlags, create_contiguous_mapping}; /// A buffer that stores a packet to be transmitted through the NIC @@ -26,7 +26,7 @@ impl TransmitBuffer { pub fn new(size_in_bytes: u16) -> Result { let (mp, starting_phys_addr) = create_contiguous_mapping( size_in_bytes as usize, - EntryFlags::WRITABLE | EntryFlags::CACHE_DISABLE | EntryFlags::NO_EXECUTE, + PteFlags::new().writable(true).device_memory(true), )?; Ok(TransmitBuffer { mp: mp, diff --git a/kernel/nic_initialization/src/lib.rs b/kernel/nic_initialization/src/lib.rs index ed02e92225..c08824f15e 100644 --- a/kernel/nic_initialization/src/lib.rs +++ b/kernel/nic_initialization/src/lib.rs @@ -14,21 +14,14 @@ extern crate nic_buffers; extern crate volatile; extern crate nic_queues; -use memory::{EntryFlags, PhysicalAddress, allocate_pages_by_bytes, allocate_frames_by_bytes_at, get_kernel_mmi_ref, MappedPages, create_contiguous_mapping, Mutable, BorrowedSliceMappedPages}; +use memory::{PhysicalAddress, allocate_pages_by_bytes, allocate_frames_by_bytes_at, get_kernel_mmi_ref, MappedPages, create_contiguous_mapping, Mutable, BorrowedSliceMappedPages}; use pci::PciDevice; use alloc::vec::Vec; use intel_ethernet::descriptors::{RxDescriptor, TxDescriptor}; use nic_buffers::ReceiveBuffer; use nic_queues::{RxQueueRegisters, TxQueueRegisters}; -/// The mapping flags used for pages that the NIC will map. -pub const NIC_MAPPING_FLAGS: EntryFlags = EntryFlags::from_bits_truncate( - EntryFlags::PRESENT.bits() | - EntryFlags::WRITABLE.bits() | - EntryFlags::CACHE_DISABLE.bits() | - EntryFlags::NO_EXECUTE.bits() -); - +pub use nic_queues::NIC_MAPPING_FLAGS; /// Allocates memory for the NIC registers /// diff --git a/kernel/nic_queues/src/lib.rs b/kernel/nic_queues/src/lib.rs index 301ebaffa9..fc91848b64 100644 --- a/kernel/nic_queues/src/lib.rs +++ b/kernel/nic_queues/src/lib.rs @@ -18,16 +18,15 @@ use alloc::{ vec::Vec, collections::VecDeque }; -use memory::{create_contiguous_mapping, EntryFlags, BorrowedSliceMappedPages, Mutable}; +use memory::{create_contiguous_mapping, PteFlags, BorrowedSliceMappedPages, Mutable}; use intel_ethernet::descriptors::{RxDescriptor, TxDescriptor}; use nic_buffers::{ReceiveBuffer, ReceivedFrame, TransmitBuffer}; /// The mapping flags used for pages that the NIC will map. -pub const NIC_MAPPING_FLAGS: EntryFlags = EntryFlags::from_bits_truncate( - EntryFlags::PRESENT.bits() | - EntryFlags::WRITABLE.bits() | - EntryFlags::CACHE_DISABLE.bits() | - EntryFlags::NO_EXECUTE.bits() +pub const NIC_MAPPING_FLAGS: PteFlags = PteFlags::from_bits_truncate( + PteFlags::new().bits() + | PteFlags::VALID.bits() + | PteFlags::DEVICE_MEMORY.bits() ); /// The register trait that gives access to only those registers required for receiving a packet. diff --git a/kernel/pte_flags/src/lib.rs b/kernel/pte_flags/src/lib.rs index bd289669e0..71a7a30098 100644 --- a/kernel/pte_flags/src/lib.rs +++ b/kernel/pte_flags/src/lib.rs @@ -278,34 +278,34 @@ impl PteFlags { } #[doc(alias("present"))] - pub fn is_valid(&self) -> bool { + pub const fn is_valid(&self) -> bool { self.contains(Self::VALID) } #[doc(alias("read_only"))] - pub fn is_writable(&self) -> bool { + pub const fn is_writable(&self) -> bool { self.contains(Self::WRITABLE) } #[doc(alias("no_exec"))] - pub fn is_executable(&self) -> bool { + pub const fn is_executable(&self) -> bool { !self.contains(Self::NOT_EXECUTABLE) } #[doc(alias("cache", "cacheable", "non-cacheable"))] - pub fn is_device_memory(&self) -> bool { + pub const fn is_device_memory(&self) -> bool { self.contains(Self::DEVICE_MEMORY) } - pub fn is_dirty(&self) -> bool { + pub const fn is_dirty(&self) -> bool { self.contains(Self::DIRTY) } - pub fn is_accessed(&self) -> bool { + pub const fn is_accessed(&self) -> bool { self.contains(Self::ACCESSED) } - pub fn is_exclusive(&self) -> bool { + pub const fn is_exclusive(&self) -> bool { self.contains(Self::EXCLUSIVE) } } diff --git a/kernel/pte_flags/src/pte_flags_aarch64.rs b/kernel/pte_flags/src/pte_flags_aarch64.rs index 89611e84ae..3314d69be8 100644 --- a/kernel/pte_flags/src/pte_flags_aarch64.rs +++ b/kernel/pte_flags/src/pte_flags_aarch64.rs @@ -329,34 +329,34 @@ impl PteFlagsAarch64 { } #[doc(alias("present"))] - pub fn is_valid(&self) -> bool { + pub const fn is_valid(&self) -> bool { self.contains(Self::VALID) } #[doc(alias("read_only"))] - pub fn is_writable(&self) -> bool { + pub const fn is_writable(&self) -> bool { !self.contains(Self::READ_ONLY) } #[doc(alias("no_exec"))] - pub fn is_executable(&self) -> bool { + pub const fn is_executable(&self) -> bool { !self.contains(Self::NOT_EXECUTABLE) } #[doc(alias("cache", "cacheable", "non-cacheable"))] - pub fn is_device_memory(&self) -> bool { + pub const fn is_device_memory(&self) -> bool { self.contains(Self::DEVICE_MEMORY) } - pub fn is_dirty(&self) -> bool { + pub const fn is_dirty(&self) -> bool { self.contains(Self::DIRTY) } - pub fn is_accessed(&self) -> bool { + pub const fn is_accessed(&self) -> bool { self.contains(Self::ACCESSED) } - pub fn is_exclusive(&self) -> bool { + pub const fn is_exclusive(&self) -> bool { self.contains(Self::EXCLUSIVE) } } diff --git a/kernel/pte_flags/src/pte_flags_x86_64.rs b/kernel/pte_flags/src/pte_flags_x86_64.rs index 6ddc42f395..4369cedb72 100644 --- a/kernel/pte_flags/src/pte_flags_x86_64.rs +++ b/kernel/pte_flags/src/pte_flags_x86_64.rs @@ -223,38 +223,38 @@ impl PteFlagsX86_64 { } #[doc(alias("present"))] - pub fn is_valid(&self) -> bool { + pub const fn is_valid(&self) -> bool { self.contains(Self::VALID) } #[doc(alias("read_only"))] - pub fn is_writable(&self) -> bool { + pub const fn is_writable(&self) -> bool { self.contains(Self::WRITABLE) } #[doc(alias("no_exec"))] - pub fn is_executable(&self) -> bool { + pub const fn is_executable(&self) -> bool { !self.contains(Self::NOT_EXECUTABLE) } #[doc(alias("cache", "cacheable", "non-cacheable"))] - pub fn is_device_memory(&self) -> bool { + pub const fn is_device_memory(&self) -> bool { self.contains(Self::DEVICE_MEMORY) } - pub fn is_dirty(&self) -> bool { + pub const fn is_dirty(&self) -> bool { self.contains(Self::DIRTY) } - pub fn is_accessed(&self) -> bool { + pub const fn is_accessed(&self) -> bool { self.contains(Self::ACCESSED) } - pub fn is_huge(&self) -> bool { + pub const fn is_huge(&self) -> bool { self.contains(Self::HUGE_PAGE) } - pub fn is_exclusive(&self) -> bool { + pub const fn is_exclusive(&self) -> bool { self.contains(Self::EXCLUSIVE) } } diff --git a/kernel/stack/src/lib.rs b/kernel/stack/src/lib.rs index d8c321bb2a..3d6782e176 100644 --- a/kernel/stack/src/lib.rs +++ b/kernel/stack/src/lib.rs @@ -13,7 +13,7 @@ extern crate page_allocator; use core::ops::{Deref, DerefMut}; use kernel_config::memory::PAGE_SIZE; use memory_structs::VirtualAddress; -use memory::{EntryFlags, MappedPages, Mapper}; +use memory::{PteFlags, MappedPages, Mapper}; use page_allocator::AllocatedPages; @@ -44,14 +44,10 @@ fn inner_alloc_stack( let (guard_page, stack_pages) = pages.split(start_of_stack_pages).ok()?; // For stack memory, the minimum required flag is WRITABLE. - let flags = EntryFlags::WRITABLE; - // if usermode { flags |= EntryFlags::USER_ACCESSIBLE; } + let flags = PteFlags::new().writable(true); // Map stack pages to physical frames, leave the guard page unmapped. - let pages = match page_table.map_allocated_pages( - stack_pages, - flags, - ) { + let pages = match page_table.map_allocated_pages(stack_pages, flags) { Ok(pages) => pages, Err(e) => { error!("alloc_stack(): couldn't map pages for the new Stack, error: {}", e); From f1156514ed536c6fe91fff47c3943c96deaebfd0 Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Sat, 3 Dec 2022 15:34:05 -0800 Subject: [PATCH 4/8] Complete the transition from `EntryFlags` to `PteFlags` --- applications/test_filerw/src/lib.rs | 3 +-- kernel/mod_mgmt/src/lib.rs | 2 +- ports/region | 2 +- ports/wasmtime | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/applications/test_filerw/src/lib.rs b/applications/test_filerw/src/lib.rs index aeb7eb6555..effafae296 100644 --- a/applications/test_filerw/src/lib.rs +++ b/applications/test_filerw/src/lib.rs @@ -69,8 +69,7 @@ fn test_filerw() -> Result<(), &'static str> { // we'll allocate the buffer length plus the offset because that's guranteed to be the most bytes we // need (because it entered this conditional statement) let pages = memory::allocate_pages_by_bytes(1).ok_or("could not allocate pages")?; - // the default flag is that the MappedPages are not writable - let mapped_pages = kernel_mmi_ref.lock().page_table.map_allocated_pages(pages, Default::default())?; + let mapped_pages = kernel_mmi_ref.lock().page_table.map_allocated_pages(pages, memory::PteFlags::new())?; let non_writable_file = MemFile::from_mapped_pages(mapped_pages, "non-writable testfile".to_string(), 1, &parent)?; match non_writable_file.lock().write_at(&mut string_slice_as_bytes, 0) { diff --git a/kernel/mod_mgmt/src/lib.rs b/kernel/mod_mgmt/src/lib.rs index 5180dfce07..7ef3826f75 100644 --- a/kernel/mod_mgmt/src/lib.rs +++ b/kernel/mod_mgmt/src/lib.rs @@ -194,7 +194,7 @@ fn parse_bootloader_modules_into_files( let bytes = entry.file(); let size = bytes.len(); let mut mp = { - let flags = EntryFlags::WRITABLE | EntryFlags::NO_EXECUTE | EntryFlags::PRESENT; + let flags = PteFlags::new().valid(true).writable(true); let allocated_pages = allocate_pages_by_bytes(size).ok_or("couldn't allocate pages")?; kernel_mmi.page_table.map_allocated_pages(allocated_pages, flags)? }; diff --git a/ports/region b/ports/region index fc502486d5..635bcf863d 160000 --- a/ports/region +++ b/ports/region @@ -1 +1 @@ -Subproject commit fc502486d5811e3bca8dd7629b662ba62c080b82 +Subproject commit 635bcf863d15444b1962ed316a30a4caea567ae4 diff --git a/ports/wasmtime b/ports/wasmtime index c1dbff26bd..87a73c9af8 160000 --- a/ports/wasmtime +++ b/ports/wasmtime @@ -1 +1 @@ -Subproject commit c1dbff26bd866be432ce3b80d7880da10e33ecc2 +Subproject commit 87a73c9af85ae92603af7c2a096ad93528f9f0ae From 4b56ce8e81d50be8f98bb26b3c8253995b4c1cc8 Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Sun, 4 Dec 2022 00:15:16 -0800 Subject: [PATCH 5/8] clean up, fix flags --- kernel/crate_metadata/src/lib.rs | 6 ++---- kernel/memory/src/lib.rs | 2 +- kernel/nic_queues/src/lib.rs | 3 ++- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/kernel/crate_metadata/src/lib.rs b/kernel/crate_metadata/src/lib.rs index 364a1d8a6a..9da279eed6 100644 --- a/kernel/crate_metadata/src/lib.rs +++ b/kernel/crate_metadata/src/lib.rs @@ -95,14 +95,12 @@ pub const TEXT_SECTION_FLAGS: PteFlags = PteFlags::from_bits_truncate( ); /// `.rodata` sections are read-only and non-executable. pub const RODATA_SECTION_FLAGS: PteFlags = PteFlags::from_bits_truncate( - PteFlags::new().bits() - | PteFlags::VALID.bits() + (PteFlags::new().bits() | PteFlags::VALID.bits()) & !PteFlags::WRITABLE.bits() ); /// `.data` and `.bss` sections are read-write and non-executable. pub const DATA_BSS_SECTION_FLAGS: PteFlags = PteFlags::from_bits_truncate( - PteFlags::new().bits() - | PteFlags::VALID.bits() + (PteFlags::new().bits() | PteFlags::VALID.bits()) | PteFlags::WRITABLE.bits() ); diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index a3b37fefc7..270409976c 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -7,7 +7,7 @@ //! ## Acknowledgments //! Some of the internal page table management code was based on //! Philipp Oppermann's [blog_os], but has since changed significantly. -//! +//! //! [blog_os]: https://github.com/phil-opp/blog_os #![no_std] diff --git a/kernel/nic_queues/src/lib.rs b/kernel/nic_queues/src/lib.rs index fc91848b64..e26b5a5847 100644 --- a/kernel/nic_queues/src/lib.rs +++ b/kernel/nic_queues/src/lib.rs @@ -22,11 +22,12 @@ use memory::{create_contiguous_mapping, PteFlags, BorrowedSliceMappedPages, Muta use intel_ethernet::descriptors::{RxDescriptor, TxDescriptor}; use nic_buffers::{ReceiveBuffer, ReceivedFrame, TransmitBuffer}; -/// The mapping flags used for pages that the NIC will map. +/// The mapping flags used to map NIC device memory, e.g., MMIO registers. pub const NIC_MAPPING_FLAGS: PteFlags = PteFlags::from_bits_truncate( PteFlags::new().bits() | PteFlags::VALID.bits() | PteFlags::DEVICE_MEMORY.bits() + | PteFlags::WRITABLE.bits() ); /// The register trait that gives access to only those registers required for receiving a packet. From de5068c8508f49501b04ccc5a4950df869728d89 Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Sun, 4 Dec 2022 00:20:34 -0800 Subject: [PATCH 6/8] cleanup --- kernel/memory/src/lib.rs | 8 +++++--- libs/owned_borrowed_trait/Cargo.lock | 7 ------- 2 files changed, 5 insertions(+), 10 deletions(-) delete mode 100644 libs/owned_borrowed_trait/Cargo.lock diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index 270409976c..1625d0dff1 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -29,12 +29,14 @@ pub use self::paging::{ }; pub use memory_structs::{Frame, Page, FrameRange, PageRange, VirtualAddress, PhysicalAddress}; -pub use page_allocator::{AllocatedPages, allocate_pages, allocate_pages_at, - allocate_pages_by_bytes, allocate_pages_by_bytes_at}; +pub use page_allocator::{ + AllocatedPages, allocate_pages, allocate_pages_at, + allocate_pages_by_bytes, allocate_pages_by_bytes_at, +}; pub use frame_allocator::{ AllocatedFrames, MemoryRegionType, PhysicalMemoryRegion, - allocate_frames_by_bytes_at, allocate_frames_by_bytes, allocate_frames_at, + allocate_frames, allocate_frames_at, allocate_frames_by_bytes_at, allocate_frames_by_bytes, }; #[cfg(target_arch = "x86_64")] diff --git a/libs/owned_borrowed_trait/Cargo.lock b/libs/owned_borrowed_trait/Cargo.lock deleted file mode 100644 index a492fd47a5..0000000000 --- a/libs/owned_borrowed_trait/Cargo.lock +++ /dev/null @@ -1,7 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "owned_borrowed_trait" -version = "0.1.0" From 338d9708b011eba3c4fe18f1111290bd34b7a8ce Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Sun, 4 Dec 2022 00:30:52 -0800 Subject: [PATCH 7/8] use consistent `NIC_MAPPING_FLAGS` everywhere --- kernel/nic_buffers/src/lib.rs | 8 ++++++++ kernel/nic_queues/src/lib.rs | 11 ++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/kernel/nic_buffers/src/lib.rs b/kernel/nic_buffers/src/lib.rs index 8b9121bebe..ed00151ac4 100644 --- a/kernel/nic_buffers/src/lib.rs +++ b/kernel/nic_buffers/src/lib.rs @@ -11,6 +11,14 @@ use core::ops::{Deref, DerefMut}; use alloc::vec::Vec; use memory::{PhysicalAddress, MappedPages, PteFlags, create_contiguous_mapping}; +/// The mapping flags used to map NIC device memory, +/// e.g., MMIO registers, buffers, queues, etc. +pub const NIC_MAPPING_FLAGS: PteFlags = PteFlags::from_bits_truncate( + PteFlags::new().bits() + | PteFlags::VALID.bits() + | PteFlags::WRITABLE.bits() + | PteFlags::DEVICE_MEMORY.bits() +); /// A buffer that stores a packet to be transmitted through the NIC /// and is guaranteed to be contiguous in physical memory. diff --git a/kernel/nic_queues/src/lib.rs b/kernel/nic_queues/src/lib.rs index e26b5a5847..e115652c07 100644 --- a/kernel/nic_queues/src/lib.rs +++ b/kernel/nic_queues/src/lib.rs @@ -18,17 +18,10 @@ use alloc::{ vec::Vec, collections::VecDeque }; -use memory::{create_contiguous_mapping, PteFlags, BorrowedSliceMappedPages, Mutable}; +use memory::{create_contiguous_mapping, BorrowedSliceMappedPages, Mutable}; use intel_ethernet::descriptors::{RxDescriptor, TxDescriptor}; use nic_buffers::{ReceiveBuffer, ReceivedFrame, TransmitBuffer}; - -/// The mapping flags used to map NIC device memory, e.g., MMIO registers. -pub const NIC_MAPPING_FLAGS: PteFlags = PteFlags::from_bits_truncate( - PteFlags::new().bits() - | PteFlags::VALID.bits() - | PteFlags::DEVICE_MEMORY.bits() - | PteFlags::WRITABLE.bits() -); +pub use nic_buffers::NIC_MAPPING_FLAGS; /// The register trait that gives access to only those registers required for receiving a packet. /// The Rx queue control registers can only be accessed by the physical NIC. From e66a0d9ccbc9139e29af1ef9b1bfeecef4d0f69b Mon Sep 17 00:00:00 2001 From: Kevin Boos Date: Sun, 4 Dec 2022 22:25:15 -0800 Subject: [PATCH 8/8] Allow `create_[contiguous_]mapping()` functions to accept either `PteFlags` or `PteFlagsArch`, like all other mapping functions --- kernel/framebuffer/src/lib.rs | 2 +- kernel/memory/src/lib.rs | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/kernel/framebuffer/src/lib.rs b/kernel/framebuffer/src/lib.rs index 55123a1d21..18abfd935c 100644 --- a/kernel/framebuffer/src/lib.rs +++ b/kernel/framebuffer/src/lib.rs @@ -77,7 +77,7 @@ impl Framebuffer

{ // get a reference to the kernel's memory mapping information let kernel_mmi_ref = memory::get_kernel_mmi_ref().ok_or("KERNEL_MMI was not yet initialized!")?; - let vesa_display_flags: PteFlags = PteFlags::new() + let vesa_display_flags = PteFlags::new() .valid(true) .writable(true) .device_memory(true); // TODO: use PAT write-combining instead of disabling caching diff --git a/kernel/memory/src/lib.rs b/kernel/memory/src/lib.rs index 1625d0dff1..d2eaf640e2 100644 --- a/kernel/memory/src/lib.rs +++ b/kernel/memory/src/lib.rs @@ -90,7 +90,10 @@ pub struct MemoryManagementInfo { /// # Locking / Deadlock /// Currently, this function acquires the lock on the frame allocator and the kernel's `MemoryManagementInfo` instance. /// Thus, the caller should ensure that the locks on those two variables are not held when invoking this function. -pub fn create_contiguous_mapping(size_in_bytes: usize, flags: PteFlags) -> Result<(MappedPages, PhysicalAddress), &'static str> { +pub fn create_contiguous_mapping>( + size_in_bytes: usize, + flags: F, +) -> Result<(MappedPages, PhysicalAddress), &'static str> { let kernel_mmi_ref = get_kernel_mmi_ref().ok_or("create_contiguous_mapping(): KERNEL_MMI was not yet initialized!")?; let allocated_pages = allocate_pages_by_bytes(size_in_bytes).ok_or("memory::create_contiguous_mapping(): couldn't allocate contiguous pages!")?; let allocated_frames = allocate_frames_by_bytes(size_in_bytes).ok_or("memory::create_contiguous_mapping(): couldn't allocate contiguous frames!")?; @@ -108,7 +111,10 @@ pub fn create_contiguous_mapping(size_in_bytes: usize, flags: PteFlags) -> Resul /// # Locking / Deadlock /// Currently, this function acquires the lock on the kernel's `MemoryManagementInfo` instance. /// Thus, the caller should ensure that lock is not held when invoking this function. -pub fn create_mapping(size_in_bytes: usize, flags: PteFlags) -> Result { +pub fn create_mapping>( + size_in_bytes: usize, + flags: F, +) -> Result { let kernel_mmi_ref = get_kernel_mmi_ref().ok_or("create_contiguous_mapping(): KERNEL_MMI was not yet initialized!")?; let allocated_pages = allocate_pages_by_bytes(size_in_bytes).ok_or("memory::create_mapping(): couldn't allocate pages!")?; kernel_mmi_ref.lock().page_table.map_allocated_pages(allocated_pages, flags)