diff --git a/aarch64/Cargo.lock b/aarch64/Cargo.lock new file mode 100644 index 0000000000..408f945343 --- /dev/null +++ b/aarch64/Cargo.lock @@ -0,0 +1,548 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "bare-metal" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5deb64efa5bd81e31fcd1938615a6d98c82eafcbcd787162b6f63b91d6bac5b3" +dependencies = [ + "rustc_version 0.2.3", +] + +[[package]] +name = "bit_field" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff91a64014e1bc53bf643920f2c9ab5f0980d92a0948295f3ee550e9266849ad" + +[[package]] +name = "bit_field" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcb6dd1c2376d2e096796e234a70e17e94cc2d5d54ff8ce42b28cef1d0d359a4" + +[[package]] +name = "bitfield" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46afbd2983a5d5a7bd740ccb198caf5b82f45c40c09c0eed36052d91cb92e719" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + +[[package]] +name = "cortex-a" +version = "7.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdecfbb28672ad3664e71ae05a398a52df430d86d660691501b28968cc4467e6" +dependencies = [ + "tock-registers", +] + +[[package]] +name = "cortex-m" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70858629a458fdfd39f9675c4dc309411f2a3f83bede76988d81bf1a0ecee9e0" +dependencies = [ + "bare-metal", + "bitfield", + "embedded-hal", + "volatile-register", +] + +[[package]] +name = "derive_more" +version = "0.99.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version 0.4.0", + "syn", +] + +[[package]] +name = "embedded-hal" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35949884794ad573cf46071e41c9b60efb0cb311e3ca01f7af807af1debc66ff" +dependencies = [ + "nb 0.1.3", + "void", +] + +[[package]] +name = "frame_allocator" +version = "0.1.0" +dependencies = [ + "intrusive-collections", + "kernel_config", + "log", + "memory_structs", + "spin 0.9.4", + "static_assertions", +] + +[[package]] +name = "intrusive-collections" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfe531a7789d7120f3e17d4f3f2cd95f54418ba7354f60b7b622b6644a07888a" +dependencies = [ + "memoffset", +] + +[[package]] +name = "irq_safety" +version = "0.1.1" +source = "git+https://github.com/theseus-os/irq_safety#4908fbb38aca1513572e0b4c7a98884390ef361a" +dependencies = [ + "owning_ref", + "spin 0.9.4", + "stable_deref_trait", +] + +[[package]] +name = "kernel_config" +version = "0.1.0" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin 0.5.2", +] + +[[package]] +name = "lock_api" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "logger" +version = "0.1.0" +dependencies = [ + "log", + "pl011_qemu", +] + +[[package]] +name = "memoffset" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +dependencies = [ + "autocfg", +] + +[[package]] +name = "memory" +version = "0.1.0" +dependencies = [ + "bit_field 0.7.0", + "bitflags", + "frame_allocator", + "irq_safety", + "kernel_config", + "lazy_static", + "log", + "memory_aarch64", + "memory_structs", + "multiboot2", + "no_drop", + "owned_borrowed_trait", + "page_allocator", + "page_table_entry", + "pte_flags", + "spin 0.9.4", + "xmas-elf", + "zerocopy", +] + +[[package]] +name = "memory_aarch64" +version = "0.1.0" +dependencies = [ + "cortex-a", + "memory_structs", + "tock-registers", +] + +[[package]] +name = "memory_structs" +version = "0.1.0" +dependencies = [ + "bit_field 0.7.0", + "derive_more", + "kernel_config", + "paste", + "zerocopy", +] + +[[package]] +name = "multiboot2" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6170b6f12ea75d8d0f5621e3ed780b041a666c4a5b904c77261fe343d0e798d" +dependencies = [ + "bitflags", +] + +[[package]] +name = "nano_core" +version = "0.1.0" +dependencies = [ + "frame_allocator", + "kernel_config", + "log", + "logger", + "memory", + "memory_structs", + "page_allocator", + "pte_flags", + "uefi", + "uefi-services", +] + +[[package]] +name = "nb" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "801d31da0513b6ec5214e9bf433a77966320625a37860f910be265be6e18d06f" +dependencies = [ + "nb 1.0.0", +] + +[[package]] +name = "nb" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "546c37ac5d9e56f55e73b677106873d9d9f5190605e41a856503623648488cae" + +[[package]] +name = "no_drop" +version = "0.1.0" + +[[package]] +name = "owned_borrowed_trait" +version = "0.1.0" + +[[package]] +name = "owning_ref" +version = "0.4.1" +source = "git+https://github.com/theseus-os/owning-ref-rs.git#0d2dcdcffd75b80157d0e72fb82593a8696a9c49" +dependencies = [ + "spin 0.9.4", + "stable_deref_trait", +] + +[[package]] +name = "page_allocator" +version = "0.1.0" +dependencies = [ + "intrusive-collections", + "kernel_config", + "log", + "memory_structs", + "spin 0.9.4", + "static_assertions", +] + +[[package]] +name = "page_table_entry" +version = "0.1.0" +dependencies = [ + "bit_field 0.7.0", + "frame_allocator", + "kernel_config", + "memory_structs", + "pte_flags", + "zerocopy", +] + +[[package]] +name = "paste" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" + +[[package]] +name = "pl011_qemu" +version = "0.2.0" +source = "git+https://github.com/theseus-os/pl011/?branch=aarch64-qemu-virt-test#3b3c3f6260cfaa5ca97ab6989bdb141c1276257e" +dependencies = [ + "cortex-m", + "embedded-hal", + "nb 1.0.0", + "volatile-register", +] + +[[package]] +name = "proc-macro2" +version = "1.0.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "pte_flags" +version = "0.1.0" +dependencies = [ + "bitflags", + "cfg-if", + "static_assertions", +] + +[[package]] +name = "quote" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver 0.9.0", +] + +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver 1.0.14", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09" +dependencies = [ + "lock_api", +] + +[[package]] +name = "stable_deref_trait" +version = "1.1.1" +source = "git+https://github.com/theseus-os/stable_deref_trait.git?branch=spin#e006c79280042e27c4f16c7d29633eb2273752ee" +dependencies = [ + "spin 0.9.4", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "syn" +version = "1.0.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tock-registers" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ee8fba06c1f4d0b396ef61a54530bb6b28f0dc61c38bc8bc5a5a48161e6282e" + +[[package]] +name = "ucs2" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bad643914094137d475641b6bab89462505316ec2ce70907ad20102d28a79ab8" +dependencies = [ + "bit_field 0.10.1", +] + +[[package]] +name = "uefi" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07b87700863d65dd4841556be3374d8d4f9f8dbb577ad93a39859e70b3b91f35" +dependencies = [ + "bitflags", + "log", + "ucs2", + "uefi-macros", +] + +[[package]] +name = "uefi-macros" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "275f054a1d9fd7e43a2ce91cc24298a87b281117dea8afc120ae95faa0e96b94" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "uefi-services" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74f6e28d165193fb5da5230faec576f0be75c71ce6d171556a2775b3673094b4" +dependencies = [ + "cfg-if", + "log", + "uefi", +] + +[[package]] +name = "unicode-ident" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" + +[[package]] +name = "vcell" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77439c1b53d2303b20d9459b1ade71a83c716e3f9c34f3228c00e6f185d6c002" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "volatile-register" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ee8f19f9d74293faf70901bc20ad067dc1ad390d2cbf1e3f75f721ffee908b6" +dependencies = [ + "vcell", +] + +[[package]] +name = "xmas-elf" +version = "0.6.2" +source = "git+https://github.com/theseus-os/xmas-elf.git#635d55f6886ae3fe0ec8a78e0bcc1238224c903d" +dependencies = [ + "zero", +] + +[[package]] +name = "zero" +version = "0.1.3" +source = "git+https://github.com/theseus-os/zero.git#9fc7ff523138a21f40359b706d2d6bf91deafc62" + +[[package]] +name = "zerocopy" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e59ec1d2457bd6c0dd89b50e7d9d6b0b647809bf3f0a59ac85557046950b7b2" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6505e6815af7de1746a08f69c69606bb45695a17149517680f3b2149713b19a3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/aarch64/kernel/memory/Cargo.toml b/aarch64/kernel/memory/Cargo.toml new file mode 100644 index 0000000000..ac614bf25c --- /dev/null +++ b/aarch64/kernel/memory/Cargo.toml @@ -0,0 +1,62 @@ +[package] +authors = ["Kevin Boos "] +name = "memory" +description = "The memory management subsystem" +version = "0.1.0" +edition = "2021" + +[dependencies] +spin = "0.9.4" +bitflags = "1.1.0" +multiboot2 = "0.14.0" +xmas-elf = { version = "0.6.2", git = "https://github.com/theseus-os/xmas-elf.git" } +bit_field = "0.7.0" +zerocopy = "0.5.0" + +# [target.'cfg(target_arch = "x86_64")'.dependencies] +# x86_64 = "0.14.8" +# memory_x86_64 = { path = "../memory_x86_64" } + +[target.'cfg(target_arch = "aarch64")'.dependencies] +memory_aarch64 = { path = "../memory_aarch64" } + +[dependencies.log] +version = "0.4.8" + +[dependencies.lazy_static] +features = ["spin_no_std"] +version = "1.4.0" + +[dependencies.irq_safety] +git = "https://github.com/theseus-os/irq_safety" + +# [dependencies.atomic_linked_list] +# path = "../../libs/atomic_linked_list" + +[dependencies.kernel_config] +path = "../../../kernel/kernel_config" + +[dependencies.memory_structs] +path = "../../../kernel/memory_structs" + +[dependencies.page_table_entry] +path = "../../../kernel/page_table_entry" + +[dependencies.pte_flags] +path = "../../../kernel/pte_flags" + +[dependencies.page_allocator] +path = "../../../kernel/page_allocator" + +[dependencies.frame_allocator] +path = "../../../kernel/frame_allocator" + +[dependencies.no_drop] +path = "../../../kernel/no_drop" + +[dependencies.owned_borrowed_trait] +path = "../../../libs/owned_borrowed_trait" + + +[lib] +crate-type = ["rlib"] diff --git a/aarch64/kernel/memory/src/lib.rs b/aarch64/kernel/memory/src/lib.rs new file mode 100644 index 0000000000..75fb9a2bdd --- /dev/null +++ b/aarch64/kernel/memory/src/lib.rs @@ -0,0 +1,317 @@ +//! This crate implements the main memory management subsystem for Theseus. +//! +//! The primary type of interest is [`MappedPages`], which offers a robust +//! interface that unifies the usage of arbitrary memory regions +//! with that of Rust's safe type system and lifetimes. +//! +//! ## Acknowledgments +//! Some of the internal page table management code was based on +//! Philipp Oppermann's [blog_os], but has since changed significantly. +//! +//! [blog_os]: https://github.com/phil-opp/blog_os + +#![no_std] +#![feature(ptr_internals)] +#![feature(unboxed_closures)] +#![feature(result_option_inspect)] + +extern crate alloc; + +mod paging; +pub use self::paging::{ + PageTable, Mapper, Mutability, Mutable, Immutable, + MappedPages, BorrowedMappedPages, BorrowedSliceMappedPages, +}; + +pub use memory_structs::{Frame, Page, FrameRange, PageRange, VirtualAddress, PhysicalAddress}; +pub use page_allocator::{ + AllocatedPages, allocate_pages, allocate_pages_at, + allocate_pages_by_bytes, allocate_pages_by_bytes_at, +}; + +pub use frame_allocator::{ + AllocatedFrames, MemoryRegionType, PhysicalMemoryRegion, + allocate_frames, allocate_frames_at, allocate_frames_by_bytes_at, allocate_frames_by_bytes, +}; + +#[cfg(target_arch = "x86_64")] +use { + memory_x86_64::{ + BootInformation, get_kernel_address, get_boot_info_mem_area, find_section_memory_bounds, + get_vga_mem_addr, get_modules_address, tlb_flush_virt_addr, tlb_flush_all, get_p4, + set_as_active_page_table_root + }, + kernel_config::memory::KERNEL_OFFSET, +}; + +#[cfg(target_arch = "aarch64")] +use memory_aarch64::{ + tlb_flush_virt_addr, tlb_flush_all, get_p4, set_as_active_page_table_root, + disable_mmu, enable_mmu, configure_translation_registers +}; + +pub use pte_flags::*; + +use log::debug; +use spin::Once; +use irq_safety::MutexIrqSafe; +use alloc::vec::Vec; +use alloc::sync::Arc; +use no_drop::NoDrop; +pub use kernel_config::memory::PAGE_SIZE; + +/// The memory management info and address space of the kernel +static KERNEL_MMI: Once = Once::new(); + +/// A shareable reference to a `MemoryManagementInfo` struct wrapper in a lock. +pub type MmiRef = Arc>; + +/// Returns a reference to the kernel's `MemoryManagementInfo`, if initialized. +/// If not, it returns `None`. +pub fn get_kernel_mmi_ref() -> Option<&'static MmiRef> { + KERNEL_MMI.get() +} + + +/// This holds all the information for a `Task`'s memory mappings and address space +/// (this is basically the equivalent of Linux's mm_struct) +#[derive(Debug)] +pub struct MemoryManagementInfo { + /// the PageTable that should be switched to when this Task is switched to. + pub page_table: PageTable, + + /// a list of additional virtual-mapped Pages that have the same lifetime as this MMI + /// and are thus owned by this MMI, but is not all-inclusive (e.g., Stacks are excluded). + pub extra_mapped_pages: Vec, +} + + +/// A convenience function that creates a new memory mapping by allocating frames that are contiguous in physical memory. +/// If contiguous frames are not required, then see [`create_mapping()`](fn.create_mapping.html). +/// Returns a tuple containing the new `MappedPages` and the starting PhysicalAddress of the first frame, +/// which is a convenient way to get the physical address without walking the page tables. +/// +/// # Locking / Deadlock +/// Currently, this function acquires the lock on the frame allocator and the kernel's `MemoryManagementInfo` instance. +/// Thus, the caller should ensure that the locks on those two variables are not held when invoking this function. +pub fn create_contiguous_mapping>( + size_in_bytes: usize, + flags: F, +) -> Result<(MappedPages, PhysicalAddress), &'static str> { + let kernel_mmi_ref = get_kernel_mmi_ref().ok_or("create_contiguous_mapping(): KERNEL_MMI was not yet initialized!")?; + let allocated_pages = allocate_pages_by_bytes(size_in_bytes).ok_or("memory::create_contiguous_mapping(): couldn't allocate contiguous pages!")?; + let allocated_frames = allocate_frames_by_bytes(size_in_bytes).ok_or("memory::create_contiguous_mapping(): couldn't allocate contiguous frames!")?; + let starting_phys_addr = allocated_frames.start_address(); + let mp = kernel_mmi_ref.lock().page_table.map_allocated_pages_to(allocated_pages, allocated_frames, flags)?; + Ok((mp, starting_phys_addr)) +} + + +/// A convenience function that creates a new memory mapping. The pages allocated are contiguous in memory but there's +/// no guarantee that the frames they are mapped to are also contiguous in memory. If contiguous frames are required +/// then see [`create_contiguous_mapping()`](fn.create_contiguous_mapping.html). +/// Returns the new `MappedPages.` +/// +/// # Locking / Deadlock +/// Currently, this function acquires the lock on the kernel's `MemoryManagementInfo` instance. +/// Thus, the caller should ensure that lock is not held when invoking this function. +pub fn create_mapping>( + size_in_bytes: usize, + flags: F, +) -> Result { + let kernel_mmi_ref = get_kernel_mmi_ref().ok_or("create_contiguous_mapping(): KERNEL_MMI was not yet initialized!")?; + let allocated_pages = allocate_pages_by_bytes(size_in_bytes).ok_or("memory::create_mapping(): couldn't allocate pages!")?; + kernel_mmi_ref.lock().page_table.map_allocated_pages(allocated_pages, flags) +} + + +static BROADCAST_TLB_SHOOTDOWN_FUNC: Once = Once::new(); + +/// Set the function callback that will be invoked every time a TLB shootdown is necessary, +/// i.e., during page table remapping and unmapping operations. +pub fn set_broadcast_tlb_shootdown_cb(func: fn(PageRange)) { + BROADCAST_TLB_SHOOTDOWN_FUNC.call_once(|| func); +} + + +#[cfg(target_arch = "x86_64")] +/// Initializes the virtual memory management system. +/// Consumes the given BootInformation, because after the memory system is initialized, +/// the original BootInformation will be unmapped and inaccessible. +/// +/// Returns the following tuple, if successful: +/// 1. the kernel's new `PageTable`, which is now currently active, +/// 2. the `MappedPages` of the kernel's text section, +/// 3. the `MappedPages` of the kernel's rodata section, +/// 4. the `MappedPages` of the kernel's data section, +/// 5. a tuple of the stack's underlying guard page (an `AllocatedPages` instance) and the actual `MappedPages` backing it, +/// 6. the `MappedPages` holding the bootloader info, +/// 7. the kernel's list of *other* higher-half MappedPages that needs to be converted to a vector after heap initialization, and which should be kept forever, +/// 8. the kernel's list of identity-mapped MappedPages that needs to be converted to a vector after heap initialization, and which should be dropped before starting the first userspace program. +pub fn init( + boot_info: &BootInformation +) -> Result<( + PageTable, + NoDrop, + NoDrop, + NoDrop, + (AllocatedPages, NoDrop), + MappedPages, + [Option>; 32], + [Option>; 32], +), &'static str> { + // Get the start and end addresses of the kernel, boot info, boot modules, etc. + let (kernel_phys_start, kernel_phys_end, kernel_virt_end) = get_kernel_address(&boot_info)?; + let (boot_info_paddr_start, boot_info_paddr_end) = get_boot_info_mem_area(&boot_info)?; + let (modules_start_paddr, modules_end_paddr) = get_modules_address(&boot_info); + debug!("bootloader info memory: p{:#X} to p{:#X}, bootloader modules: p{:#X} to p{:#X}", + boot_info_paddr_start, boot_info_paddr_end, modules_start_paddr, modules_end_paddr, + ); + debug!("kernel_phys_start: p{:#X}, kernel_phys_end: p{:#X} kernel_virt_end = v{:#x}", + kernel_phys_start, kernel_phys_end, kernel_virt_end + ); + + // In addition to the information about the hardware's physical memory map provided by the bootloader, + // Theseus chooses to reserve the following regions of physical memory for specific use. + let low_memory_frames = FrameRange::from_phys_addr(PhysicalAddress::zero(), 0x10_0000); // suggested by most OS developers + let kernel_frames = FrameRange::from_phys_addr(kernel_phys_start, kernel_phys_end.value() - kernel_phys_start.value()); + let boot_modules_frames = FrameRange::from_phys_addr(modules_start_paddr, modules_end_paddr.value() - modules_start_paddr.value()); + let boot_info_frames = FrameRange::from_phys_addr(boot_info_paddr_start, boot_info_paddr_end.value() - boot_info_paddr_start.value()); + + // Add the VGA display's memory region to the list of reserved physical memory areas. + // Currently this is covered by the first 1MiB region, but it's okay to duplicate it here. + let (vga_start_paddr, vga_size, _vga_flags) = memory_x86_64::get_vga_mem_addr()?; + let vga_display_frames = FrameRange::from_phys_addr(vga_start_paddr, vga_size); + + // Now set up the list of free regions and reserved regions so we can initialize the frame allocator. + let mut free_regions: [Option; 32] = Default::default(); + let mut free_index = 0; + let mut reserved_regions: [Option; 32] = Default::default(); + let mut reserved_index = 0; + + reserved_regions[reserved_index] = Some(PhysicalMemoryRegion::new(low_memory_frames, MemoryRegionType::Reserved)); + reserved_index += 1; + reserved_regions[reserved_index] = Some(PhysicalMemoryRegion::new(kernel_frames, MemoryRegionType::Reserved)); + reserved_index += 1; + reserved_regions[reserved_index] = Some(PhysicalMemoryRegion::new(boot_modules_frames, MemoryRegionType::Reserved)); + reserved_index += 1; + reserved_regions[reserved_index] = Some(PhysicalMemoryRegion::new(boot_info_frames, MemoryRegionType::Reserved)); + reserved_index += 1; + reserved_regions[reserved_index] = Some(PhysicalMemoryRegion::new(vga_display_frames, MemoryRegionType::Reserved)); + reserved_index += 1; + + for area in boot_info.memory_map_tag() + .ok_or("Multiboot2 boot information has no physical memory map information")? + .all_memory_areas() + { + let frames = FrameRange::from_phys_addr(PhysicalAddress::new_canonical(area.start_address() as usize), area.size() as usize); + if area.typ() == multiboot2::MemoryAreaType::Available { + free_regions[free_index] = Some(PhysicalMemoryRegion::new(frames, MemoryRegionType::Free)); + free_index += 1; + } else { + reserved_regions[reserved_index] = Some(PhysicalMemoryRegion::new(frames, MemoryRegionType::Reserved)); + reserved_index += 1; + } + } + + let into_alloc_frames_fn = frame_allocator::init(free_regions.iter().flatten(), reserved_regions.iter().flatten())?; + debug!("Initialized new frame allocator!"); + frame_allocator::dump_frame_allocator_state(); + + page_allocator::init(VirtualAddress::new_canonical(kernel_phys_end.value()))?; + debug!("Initialized new page allocator!"); + page_allocator::dump_page_allocator_state(); + + // Initialize paging, which creates a new page table and maps all of the current code/data sections into it. + paging::init(boot_info, into_alloc_frames_fn) + .inspect(|(new_page_table, ..)| { + debug!("Done with paging::init(). new page table: {:?}", new_page_table); + }) +} + +#[cfg(target_arch = "aarch64")] +/// Initializes the virtual memory management system. +/// +/// A slice describing the current memory layout is required. +/// +/// Returns the kernel's current PageTable, if successful. +pub fn init( + layout: &[(FrameRange, MemoryRegionType, Option)], +) -> Result { + // Identifying free and reserved regions so we can initialize the frame allocator. + let mut free_regions: [Option; 32] = Default::default(); + let mut free_index = 0; + let mut reserved_regions: [Option; 32] = Default::default(); + let mut reserved_index = 0; + + for (range, mem_type, _) in layout { + let (dst, index) = match mem_type { + MemoryRegionType::Free => (&mut free_regions, &mut free_index), + MemoryRegionType::Reserved => (&mut reserved_regions, &mut reserved_index), + MemoryRegionType::Unknown => continue, + }; + + let region = PhysicalMemoryRegion::new(range.clone(), *mem_type); + dst[*index] = Some(region); + *index += 1; + } + + let into_alloc_frames_fn = frame_allocator::init(free_regions.iter().flatten(), reserved_regions.iter().flatten())?; + debug!("Initialized new frame allocator!"); + // frame_allocator::dump_frame_allocator_state(); + + // On x86_64 `page_allocator` is initialized with a value obtained + // from the ELF layout. Here I'm choosing a value which is probably + // valid (uneducated guess); once we have an ELF aarch64 kernel + // we'll be able to use the original limit defined with KERNEL_OFFSET + // and the ELF layout. + page_allocator::init(VirtualAddress::new_canonical(0x100_000_000))?; + debug!("Initialized new page allocator!"); + // page_allocator::dump_page_allocator_state(); + + // Initialize paging, which only bootstraps the current page table at the moment. + paging::init(into_alloc_frames_fn, layout) + .inspect(|page_table| debug!("Done with paging::init(). page table: {:?}", page_table)) +} + +/// Finishes initializing the memory management system after the heap is ready. +/// +/// Returns the following tuple: +/// * The kernel's new [`MemoryManagementInfo`], representing the initial virtual address space, +/// * The kernel's list of identity-mapped [`MappedPages`], +/// which must not be dropped until all AP (additional CPUs) are fully booted, +/// but *should* be dropped before starting the first user application. +pub fn init_post_heap( + page_table: PageTable, + mut higher_half_mapped_pages: [Option>; 32], + mut identity_mapped_pages: [Option>; 32], + heap_mapped_pages: MappedPages +) -> (MmiRef, NoDrop>) { + // HERE: heap is initialized! We can now use `alloc` types. + + page_allocator::convert_to_heap_allocated(); + frame_allocator::convert_to_heap_allocated(); + + let mut higher_half_mapped_pages: Vec = higher_half_mapped_pages + .iter_mut() + .filter_map(|opt| opt.take().map(NoDrop::into_inner)) + .collect(); + higher_half_mapped_pages.push(heap_mapped_pages); + let identity_mapped_pages: Vec = identity_mapped_pages + .iter_mut() + .filter_map(|opt| opt.take().map(NoDrop::into_inner)) + .collect(); + let identity_mapped_pages = NoDrop::new(identity_mapped_pages); + + // Construct the kernel's memory mgmt info, i.e., its address space info + let kernel_mmi = MemoryManagementInfo { + page_table, + extra_mapped_pages: higher_half_mapped_pages, + }; + + let kernel_mmi_ref = KERNEL_MMI.call_once( || { + Arc::new(MutexIrqSafe::new(kernel_mmi)) + }); + + (kernel_mmi_ref.clone(), identity_mapped_pages) +} diff --git a/aarch64/kernel/memory/src/paging/mapper.rs b/aarch64/kernel/memory/src/paging/mapper.rs new file mode 100644 index 0000000000..0dc19d1afe --- /dev/null +++ b/aarch64/kernel/memory/src/paging/mapper.rs @@ -0,0 +1,1277 @@ +// Copyright 2016 Philipp Oppermann. See the README.md +// file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use core::{ + borrow::{Borrow, BorrowMut}, + cmp::Ordering, + fmt::{self, Write}, + hash::{Hash, Hasher}, + marker::PhantomData, + mem, + ops::{Deref, DerefMut}, + ptr::{NonNull, Unique}, + slice, +}; +use log::{error, warn, debug, trace}; +use crate::{BROADCAST_TLB_SHOOTDOWN_FUNC, VirtualAddress, PhysicalAddress, Page, Frame, FrameRange, AllocatedPages, AllocatedFrames}; +use crate::paging::{ + get_current_p4, + PageRange, + table::{P4, Table, Level4, NextLevelAccess}, +}; +use pte_flags::PteFlagsArch; +use spin::Once; +use kernel_config::memory::PAGE_SIZE; +use super::tlb_flush_virt_addr; +use zerocopy::FromBytes; +use page_table_entry::UnmapResult; +use owned_borrowed_trait::{OwnedOrBorrowed, Owned, Borrowed}; + +#[cfg(target_arch = "x86_64")] +use kernel_config::memory::ENTRIES_PER_PAGE_TABLE; + +/// This is a private callback used to convert `UnmappedFrames` into `AllocatedFrames`. +/// +/// This exists to break the cyclic dependency cycle between `page_table_entry` and +/// `frame_allocator`, which depend on each other as such: +/// * `frame_allocator` needs to `impl Into for UnmappedFrames` +/// in order to allow unmapped exclusive frames to be safely deallocated +/// * `page_table_entry` needs to use the `AllocatedFrames` type in order to allow +/// page table entry values to be set safely to a real physical frame that is owned and exists. +/// +/// To get around that, the `frame_allocator::init()` function returns a callback +/// to its function that allows converting a range of unmapped frames back into `AllocatedFrames`, +/// which then allows them to be dropped and thus deallocated. +/// +/// This is safe because the frame allocator can only be initialized once, and also because +/// only this crate has access to that function callback and can thus guarantee +/// that it is only invoked for `UnmappedFrames`. +pub(super) static INTO_ALLOCATED_FRAMES_FUNC: Once AllocatedFrames> = Once::new(); + + +pub struct Mapper { + p4: Unique>, + /// The Frame contaning the top-level P4 page table. + pub(crate) target_p4: Frame, +} + +impl Mapper { + pub(crate) fn from_current() -> Mapper { + Self::with_p4_frame(get_current_p4()) + } + + pub(crate) fn with_p4_frame(p4: Frame) -> Mapper { + Mapper { + p4: Unique::new(P4).unwrap(), // cannot panic because we know the P4 value is valid + target_p4: p4, + } + } + + unsafe fn p4_ptr(&self, access: NextLevelAccess) -> *mut Table { + match access { + NextLevelAccess::Recursive => self.p4.as_ptr(), + NextLevelAccess::Identity => self.target_p4.start_address().value() as *mut _ + } + } + + pub(crate) fn p4_with_access(&self, access: NextLevelAccess) -> &Table { + unsafe { self.p4_ptr(access).as_ref().unwrap() } + } + + pub(crate) fn p4_mut_with_access(&mut self, access: NextLevelAccess) -> &mut Table { + unsafe { self.p4_ptr(access).as_mut().unwrap() } + } + + pub(crate) fn p4(&self) -> &Table { + self.p4_with_access(NextLevelAccess::Recursive) + } + + pub(crate) fn p4_mut(&mut self) -> &mut Table { + self.p4_mut_with_access(NextLevelAccess::Recursive) + } + + /// Dumps all page table entries at all four page table levels for the given `VirtualAddress`, + /// and also shows their `PteFlags`. + /// + /// The page table details are written to the the given `writer`. + pub fn dump_pte(&self, writer: &mut W, virtual_address: VirtualAddress) -> fmt::Result { + let page = Page::containing_address(virtual_address); + let p4 = self.p4(); + let p3 = p4.next_table(page.p4_index(), NextLevelAccess::Recursive); + let p2 = p3.and_then(|p3| p3.next_table(page.p3_index(), NextLevelAccess::Recursive)); + let p1 = p2.and_then(|p2| p2.next_table(page.p2_index(), NextLevelAccess::Recursive)); + write!( + writer, + "VirtualAddress: {:#X}: + P4 entry: {:#X} ({:?}) + P3 entry: {:#X} ({:?}) + P2 entry: {:#X} ({:?}) + P1 entry: (PTE) {:#X} ({:?})", + virtual_address, + &p4[page.p4_index()].value(), + &p4[page.p4_index()].flags(), + p3.map(|p3| &p3[page.p3_index()]).map(|p3_entry| p3_entry.value()).unwrap_or(0x0), + p3.map(|p3| &p3[page.p3_index()]).map(|p3_entry| p3_entry.flags()), + p2.map(|p2| &p2[page.p2_index()]).map(|p2_entry| p2_entry.value()).unwrap_or(0x0), + p2.map(|p2| &p2[page.p2_index()]).map(|p2_entry| p2_entry.flags()), + p1.map(|p1| &p1[page.p1_index()]).map(|p1_entry| p1_entry.value()).unwrap_or(0x0), + p1.map(|p1| &p1[page.p1_index()]).map(|p1_entry| p1_entry.flags()), + ) + } + + /// Translates a `VirtualAddress` to a `PhysicalAddress` by walking the page tables. + pub fn translate(&self, virtual_address: VirtualAddress) -> Option { + // get the frame number of the page containing the given virtual address, + // and then the corresponding physical address is that page frame number * page size + offset + self.translate_page(Page::containing_address(virtual_address)) + .map(|frame| frame.start_address() + virtual_address.page_offset()) + } + + /// Translates a virtual memory `Page` to a physical memory `Frame` by walking the page tables. + pub fn translate_page(&self, page: Page) -> Option { + let p3 = self.p4().next_table(page.p4_index(), NextLevelAccess::Recursive); + + #[cfg(target_arch = "x86_64")] + let huge_page = || { + p3.and_then(|p3| { + let p3_entry = &p3[page.p3_index()]; + // 1GiB page? + if let Some(start_frame) = p3_entry.pointed_frame() { + if p3_entry.flags().is_huge() { + // address must be 1GiB aligned + assert!(start_frame.number() % (ENTRIES_PER_PAGE_TABLE * ENTRIES_PER_PAGE_TABLE) == 0); + return Some(Frame::containing_address(PhysicalAddress::new_canonical( + PAGE_SIZE * (start_frame.number() + page.p2_index() * ENTRIES_PER_PAGE_TABLE + page.p1_index()) + ))); + } + } + if let Some(p2) = p3.next_table(page.p3_index(), NextLevelAccess::Recursive) { + let p2_entry = &p2[page.p2_index()]; + // 2MiB page? + if let Some(start_frame) = p2_entry.pointed_frame() { + if p2_entry.flags().is_huge() { + // address must be 2MiB aligned + assert!(start_frame.number() % ENTRIES_PER_PAGE_TABLE == 0); + return Some(Frame::containing_address(PhysicalAddress::new_canonical( + PAGE_SIZE * (start_frame.number() + page.p1_index()) + ))); + } + } + } + None + }) + }; + #[cfg(target_arch = "aarch64")] + let huge_page = || { + // TODO: huge page (block descriptor) translation for aarch64 + None + }; + + p3.and_then(|p3| p3.next_table(page.p3_index(), NextLevelAccess::Recursive)) + .and_then(|p2| p2.next_table(page.p2_index(), NextLevelAccess::Recursive)) + .and_then(|p1| p1[page.p1_index()].pointed_frame()) + .or_else(huge_page) + } + + + /// An internal function that performs the actual mapping of a range of allocated `pages` + /// to a range of allocated `frames`. + /// + /// Returns a tuple of the new `MappedPages` object containing the allocated `pages` + /// and the allocated `frames` object. + pub(super) fn internal_map_to( + &mut self, + pages: AllocatedPages, + frames: Frames, + flags: Flags, + access: NextLevelAccess, + ) -> Result<(MappedPages, Frames::Inner), &'static str> + where + Frames: OwnedOrBorrowed, + Flags: Into, + { + let frames = frames.into_inner(); + let flags = flags.into(); + let higher_level_flags = flags.adjust_for_higher_level_pte(); + let actual_flags = flags + .valid(true) + .exclusive(Frames::OWNED); + + #[cfg(target_arch = "aarch64")] + let actual_flags = actual_flags.accessed(true).page_descriptor(true); + + let pages_count = pages.size_in_pages(); + let frames_count = frames.borrow().size_in_frames(); + if pages_count != frames_count { + error!("map_allocated_pages_to(): pages {:?} count {} must equal frames {:?} count {}!", + pages, pages_count, frames.borrow(), frames_count + ); + return Err("map_allocated_pages_to(): page count must equal frame count"); + } + + // iterate over pages and frames in lockstep + for (page, frame) in pages.deref().clone().into_iter().zip(frames.borrow().into_iter()) { + let p4 = self.p4_mut_with_access(access); + let p3 = p4.next_table_create(page.p4_index(), higher_level_flags, access)?; + let p2 = p3.next_table_create(page.p3_index(), higher_level_flags, access)?; + let p1 = p2.next_table_create(page.p2_index(), higher_level_flags, access)?; + + if !p1[page.p1_index()].is_unused() { + error!("map_allocated_pages_to(): page {:#X} -> frame {:#X}, page was already in use!", page.start_address(), frame.start_address()); + return Err("map_allocated_pages_to(): page was already in use"); + } + + p1[page.p1_index()].set_entry(frame, actual_flags); + } + + Ok(( + MappedPages { + page_table_p4: self.target_p4.clone(), + pages, + flags: actual_flags, + }, + frames, + )) + } + + + /// Maps the given virtual `AllocatedPages` to the given physical `AllocatedFrames`. + /// + /// Consumes the given `AllocatedPages` and returns a `MappedPages` object which contains those `AllocatedPages`. + pub fn map_allocated_pages_to>( + &mut self, + pages: AllocatedPages, + frames: AllocatedFrames, + flags: F, + ) -> Result { + let (mapped_pages, frames) = self.internal_map_to(pages, Owned(frames), flags, NextLevelAccess::Recursive)?; + + // Currently we forget the actual `AllocatedFrames` object because + // there is no easy/efficient way to store a dynamic list of non-contiguous frames (would require Vec). + // This is okay because we will deallocate each of these frames when this MappedPages object is dropped + // and each of the page table entries for its pages are cleared. + core::mem::forget(frames); + + Ok(mapped_pages) + } + + + #[cfg(any(target_arch = "aarch64", doc))] + /// Maps the given virtual `AllocatedPages` to the given physical `AllocatedFrames`. + /// + /// Consumes the given `AllocatedPages` and returns a `MappedPages` object which contains those `AllocatedPages`. + pub(crate) fn map_allocated_pages_to_frames_identity>( + &mut self, + pages: AllocatedPages, + frames: AllocatedFrames, + flags: F, + ) -> Result { + let (mapped_pages, frames) = self.internal_map_to(pages, Owned(frames), flags, NextLevelAccess::Identity)?; + + // Currently we forget the actual `AllocatedFrames` object because + // there is no easy/efficient way to store a dynamic list of non-contiguous frames (would require Vec). + // This is okay because we will deallocate each of these frames when this MappedPages object is dropped + // and each of the page table entries for its pages are cleared. + core::mem::forget(frames); + + Ok(mapped_pages) + } + + + /// Maps the given `AllocatedPages` to randomly chosen (allocated) physical frames. + /// + /// Consumes the given `AllocatedPages` and returns a `MappedPages` object which contains those `AllocatedPages`. + pub fn map_allocated_pages>( + &mut self, + pages: AllocatedPages, + flags: F, + ) -> Result { + let flags = flags.into(); + let higher_level_flags = flags.adjust_for_higher_level_pte(); + + // Only the lowest-level P1 entry can be considered exclusive, and only because + // we are mapping it exclusively (to owned `AllocatedFrames`). + let actual_flags = flags + .exclusive(true) + .valid(true); + + #[cfg(target_arch = "aarch64")] + let actual_flags = actual_flags.accessed(true).page_descriptor(true); + + for page in pages.deref().clone() { + let af = frame_allocator::allocate_frames(1).ok_or("map_allocated_pages(): couldn't allocate new frame, out of memory")?; + + let p4 = self.p4_mut(); + let p3 = p4.next_table_create(page.p4_index(), higher_level_flags, NextLevelAccess::Recursive)?; + let p2 = p3.next_table_create(page.p3_index(), higher_level_flags, NextLevelAccess::Recursive)?; + let p1 = p2.next_table_create(page.p2_index(), higher_level_flags, NextLevelAccess::Recursive)?; + + if !p1[page.p1_index()].is_unused() { + error!("map_allocated_pages(): page {:#X} -> frame {:#X}, page was already in use!", + page.start_address(), af.start_address() + ); + return Err("map_allocated_pages(): page was already in use"); + } + + p1[page.p1_index()].set_entry(af.as_allocated_frame(), actual_flags); + core::mem::forget(af); // we currently forget frames allocated here since we don't yet have a way to track them. + } + + Ok(MappedPages { + page_table_p4: self.target_p4.clone(), + pages, + flags: actual_flags, + }) + } +} + +// This implementation block contains a hacky function for non-bijective mappings +// that shouldn't be exposed to most other OS components, especially applications. +impl Mapper { + /// An unsafe escape hatch that allows one to map the given virtual `AllocatedPages` + /// to the given range of physical `frames`. + /// + /// This is unsafe because it accepts a reference to an `AllocatedFrames` object. + /// This violates Theseus's bijective mapping guarantee, + /// in which only one virtual page can map to a given physical frame, + /// which preserves Rust's knowledge of language-level aliasing and thus its safety checks. + /// + /// As such, the pages mapped here will be marked as non-exclusive, + /// regardless of the `flags` passed in. + /// This means that the `frames` they map will NOT be deallocated upon unmapping. + /// + /// Consumes the given `AllocatedPages` and returns a `MappedPages` object + /// which contains those `AllocatedPages`. + #[doc(hidden)] + pub unsafe fn map_to_non_exclusive>( + mapper: &mut Self, + pages: AllocatedPages, + frames: &AllocatedFrames, + flags: F, + ) -> Result { + // In this function, none of the frames can be mapped as exclusive + // because we're accepting a *reference* to an `AllocatedFrames`, not consuming it. + mapper.internal_map_to(pages, Borrowed(frames), flags, NextLevelAccess::Recursive) + .map(|(mp, _af)| mp) + } +} + + +/// Represents a contiguous range of virtual memory pages that are currently mapped. +/// A `MappedPages` object can only have a single range of contiguous pages, not multiple disjoint ranges. +/// This does not guarantee that its pages are mapped to frames that are contiguous in physical memory. +/// +/// This object also represents ownership of those pages; if this object falls out of scope, +/// it will be dropped, and the pages will be unmapped and then also de-allocated. +/// Thus, it ensures memory safety by guaranteeing that this object must be held +/// in order to access data stored in these mapped pages, much like a guard type. +#[derive(Debug)] +pub struct MappedPages { + /// The Frame containing the top-level P4 page table that this MappedPages was originally mapped into. + page_table_p4: Frame, + /// The range of allocated virtual pages contained by this mapping. + pages: AllocatedPages, + // The PTE flags that define the page permissions of this mapping. + flags: PteFlagsArch, +} +impl Deref for MappedPages { + type Target = PageRange; + fn deref(&self) -> &PageRange { + self.pages.deref() + } +} + +impl MappedPages { + /// Returns an empty MappedPages object that performs no allocation or mapping actions. + /// Can be used as a placeholder, but will not permit any real usage. + pub const fn empty() -> MappedPages { + MappedPages { + page_table_p4: Frame::containing_address(PhysicalAddress::zero()), + pages: AllocatedPages::empty(), + flags: PteFlagsArch::new(), + } + } + + /// Returns the flags that describe this `MappedPages` page table permissions. + pub fn flags(&self) -> PteFlagsArch { + self.flags + } + + /// Merges the given `MappedPages` object `mp` into this `MappedPages` object (`self`). + /// + /// For example, if you have the following `MappedPages` objects: + /// * this mapping, with a page range including one page at 0x2000 + /// * `mp`, with a page range including two pages at 0x3000 and 0x4000 + /// Then this `MappedPages` object will be updated to cover three pages from `[0x2000:0x4000]` inclusive. + /// + /// In addition, the `MappedPages` objects must have the same flags and page table root frame + /// (i.e., they must have all been mapped using the same set of page tables). + /// + /// If an error occurs, such as the `mappings` not being contiguous or having different flags, + /// then a tuple including an error message and the original `mp` will be returned, + /// which prevents the `mp` from being dropped. + /// + /// # Note + /// No remapping actions or page reallocations will occur on either a failure or a success. + pub fn merge(&mut self, mut mp: MappedPages) -> Result<(), (&'static str, MappedPages)> { + if mp.page_table_p4 != self.page_table_p4 { + error!("MappedPages::merge(): mappings weren't mapped using the same page table: {:?} vs. {:?}", + self.page_table_p4, mp.page_table_p4); + return Err(("failed to merge MappedPages that were mapped into different page tables", mp)); + } + if mp.flags != self.flags { + error!("MappedPages::merge(): mappings had different flags: {:?} vs. {:?}", + self.flags, mp.flags); + return Err(("failed to merge MappedPages that were mapped with different flags", mp)); + } + + // Attempt to merge the page ranges together, which will fail if they're not contiguous. + // First, take ownership of the AllocatedPages inside of the `mp` argument. + let second_alloc_pages_owned = core::mem::replace(&mut mp.pages, AllocatedPages::empty()); + if let Err(orig) = self.pages.merge(second_alloc_pages_owned) { + // Upon error, restore the `mp.pages` AllocatedPages that we took ownership of. + mp.pages = orig; + error!("MappedPages::merge(): mappings not virtually contiguous: first ends at {:?}, second starts at {:?}", + self.pages.end(), mp.pages.start() + ); + return Err(("failed to merge MappedPages that weren't virtually contiguous", mp)); + } + + // Ensure the existing mapping doesn't run its drop handler and unmap its pages. + mem::forget(mp); + Ok(()) + } + + /// Splits this `MappedPages` into two separate `MappedPages` objects: + /// * `[beginning : at_page - 1]` + /// * `[at_page : end]` + /// + /// This function follows the behavior of [`core::slice::split_at()`], + /// thus, either one of the returned `MappedPages` objects may be empty. + /// * If `at_page == self.pages.start`, the first returned `MappedPages` object will be empty. + /// * If `at_page == self.pages.end + 1`, the second returned `MappedPages` object will be empty. + /// + /// Returns an `Err` containing this `MappedPages` (`self`) if `at_page` is not within its bounds. + /// + /// # Note + /// No remapping actions or page reallocations will occur on either a failure or a success. + /// + /// [`core::slice::split_at()`]: https://doc.rust-lang.org/core/primitive.slice.html#method.split_at + pub fn split(mut self, at_page: Page) -> Result<(MappedPages, MappedPages), MappedPages> { + // Take ownership of the `AllocatedPages` inside of the `MappedPages` so we can split it. + let alloc_pages_owned = core::mem::replace(&mut self.pages, AllocatedPages::empty()); + + match alloc_pages_owned.split(at_page) { + Ok((first_ap, second_ap)) => Ok(( + MappedPages { + page_table_p4: self.page_table_p4, + pages: first_ap, + flags: self.flags, + }, + MappedPages { + page_table_p4: self.page_table_p4, + pages: second_ap, + flags: self.flags, + } + // When returning here, `self` will be dropped, but it's empty so it has no effect. + )), + Err(orig_ap) => { + // Upon error, restore the `self.pages` (`AllocatedPages`) that we took ownership of. + self.pages = orig_ap; + Err(self) + } + } + } + + + /// Creates a deep copy of this `MappedPages` memory region, + /// by duplicating not only the virtual memory mapping + /// but also the underlying physical memory frames. + /// + /// The caller can optionally specify new flags for the duplicated mapping, + /// otherwise, the same flags as the existing `MappedPages` will be used. + /// This is useful for when you want to modify contents in the new pages, + /// since it avoids extra `remap()` operations. + /// + /// Returns a new `MappedPages` object with the same in-memory contents + /// as this object, but at a completely new memory region. + pub fn deep_copy>( + &self, + active_table_mapper: &mut Mapper, + new_flags: Option, + ) -> Result { + warn!("MappedPages::deep_copy() has not been adequately tested yet."); + let size_in_pages = self.size_in_pages(); + + use crate::paging::allocate_pages; + let new_pages = allocate_pages(size_in_pages).ok_or_else(|| "Couldn't allocate_pages()")?; + + // we must temporarily map the new pages as Writable, since we're about to copy data into them + let new_flags = new_flags.map_or(self.flags, Into::into); + let needs_remapping = !new_flags.is_writable(); + let mut new_mapped_pages = active_table_mapper.map_allocated_pages( + new_pages, + new_flags.writable(true), // force writable + )?; + + // perform the actual copy of in-memory content + // TODO: there is probably a better way to do this, e.g., `rep stosq/movsq` or something + { + type PageContent = [u8; PAGE_SIZE]; + let source: &[PageContent] = self.as_slice(0, size_in_pages)?; + let dest: &mut [PageContent] = new_mapped_pages.as_slice_mut(0, size_in_pages)?; + dest.copy_from_slice(source); + } + + if needs_remapping { + new_mapped_pages.remap(active_table_mapper, new_flags)?; + } + + Ok(new_mapped_pages) + } + + + /// Change the mapping flags of this `MappedPages`'s page table entries. + /// + /// Note that attempting to change certain "reserved" flags will have no effect. + /// For example, the `EXCLUSIVE` flag cannot be changed beause arbitrarily setting it + /// would violate safety. + pub fn remap>( + &mut self, + active_table_mapper: &mut Mapper, + new_flags: F, + ) -> Result<(), &'static str> { + if self.size_in_pages() == 0 { return Ok(()); } + + // Use the existing value of the `EXCLUSIVE` flag, ignoring whatever value was passed in. + // Also ensure these flags are PRESENT (valid), since they are currently being mapped. + let new_flags = new_flags.into() + .exclusive(self.flags.is_exclusive()) + .valid(true); + + if new_flags == self.flags { + trace!("remap(): new_flags were the same as existing flags, doing nothing."); + return Ok(()); + } + + for page in self.pages.clone() { + let p1 = active_table_mapper.p4_mut() + .next_table_mut(page.p4_index(), NextLevelAccess::Recursive) + .and_then(|p3| p3.next_table_mut(page.p3_index(), NextLevelAccess::Recursive)) + .and_then(|p2| p2.next_table_mut(page.p2_index(), NextLevelAccess::Recursive)) + .ok_or("mapping code does not support huge pages")?; + + p1[page.p1_index()].set_flags(new_flags); + + tlb_flush_virt_addr(page.start_address()); + } + + if let Some(func) = BROADCAST_TLB_SHOOTDOWN_FUNC.get() { + func(self.pages.deref().clone()); + } + + self.flags = new_flags; + Ok(()) + } + + /// Consumes and unmaps this `MappedPages` object without auto-deallocating its `AllocatedPages` and `AllocatedFrames`, + /// allowing the caller to continue using them directly, e.g., reusing them for a future mapping. + /// This removes the need to attempt to to reallocate those same pages or frames on a separate code path. + /// + /// Note that only the first contiguous range of `AllocatedFrames` will be returned, if any were unmapped. + /// All other non-contiguous ranges will be auto-dropped and deallocated. + /// This is due to how frame deallocation works. + pub fn unmap_into_parts(mut self, active_table_mapper: &mut Mapper) -> Result<(AllocatedPages, Option), Self> { + match self.unmap(active_table_mapper) { + Ok(first_frames) => { + let pages = mem::replace(&mut self.pages, AllocatedPages::empty()); + Ok((pages, first_frames)) + } + Err(e) => { + error!("MappedPages::unmap_into_parts(): failed to unmap {:?}, error: {}", self, e); + return Err(self); + } + } + } + + + /// Remove the virtual memory mapping represented by this `MappedPages`. + /// + /// This must NOT be public because it does not take ownership of this `MappedPages` object (`self`). + /// This is to allow it to be invoked from the `MappedPages` drop handler. + /// + /// Returns the **first, contiguous** range of frames that was mapped to these pages. + /// If there are multiple discontiguous ranges of frames that were unmapped, + /// or the frames were not mapped bijectively (i.e., multiple pages mapped to these frames), + /// then only the first contiguous range of frames will be returned. + /// + /// TODO: a few optional improvements could be made here: + /// (1) Accept an `Option<&mut Vec>` argument that allows the caller to + /// recover **all** `AllocatedFrames` unmapped during this function, not just the first contiguous frame range. + /// (2) Redesign this to take/consume `self` by ownership, and expose it as the only unmap function, + /// avoiding the need for a separate `unmap_into_parts()` function. + /// We could then use `mem::replace(&mut self, MappedPages::empty())` in the drop handler + /// to obtain ownership of `self`, which would allow us to transfer ownership of the dropped `MappedPages` here. + /// + fn unmap(&mut self, active_table_mapper: &mut Mapper) -> Result, &'static str> { + if self.size_in_pages() == 0 { return Ok(None); } + + if active_table_mapper.target_p4 != self.page_table_p4 { + error!("BUG: MappedPages::unmap(): {:?}\n current P4 {:?} must equal original P4 {:?}, \ + cannot unmap MappedPages from a different page table than they were originally mapped to!", + self, get_current_p4(), self.page_table_p4 + ); + return Err( + "BUG: MappedPages::unmap(): current P4 must equal original P4, \ + cannot unmap MappedPages from a different page table than they were originally mapped to!" + ); + } + + let mut first_frame_range: Option = None; // this is what we'll return + let mut current_frame_range: Option = None; + + for page in self.pages.clone() { + let p1 = active_table_mapper.p4_mut() + .next_table_mut(page.p4_index(), NextLevelAccess::Recursive) + .and_then(|p3| p3.next_table_mut(page.p3_index(), NextLevelAccess::Recursive)) + .and_then(|p2| p2.next_table_mut(page.p2_index(), NextLevelAccess::Recursive)) + .ok_or("mapping code does not support huge pages")?; + let pte = &mut p1[page.p1_index()]; + if pte.is_unused() { + return Err("unmap(): page not mapped"); + } + + let unmapped_frames = pte.set_unmapped(); + tlb_flush_virt_addr(page.start_address()); + + // Here, create (or extend) a contiguous ranges of frames here based on the `unmapped_frames` + // freed from the newly-unmapped P1 PTE entry above. + match unmapped_frames { + UnmapResult::Exclusive(newly_unmapped_frames) => { + let newly_unmapped_frames = INTO_ALLOCATED_FRAMES_FUNC.get() + .ok_or("BUG: Mapper::unmap(): the `INTO_ALLOCATED_FRAMES_FUNC` callback was not initialized") + .map(|into_func| into_func(newly_unmapped_frames.deref().clone()))?; + + if let Some(mut curr_frames) = current_frame_range.take() { + match curr_frames.merge(newly_unmapped_frames) { + Ok(()) => { + // Here, the newly unmapped frames were contiguous with the current frame_range, + // and we successfully merged them into a single range of AllocatedFrames. + current_frame_range = Some(curr_frames); + } + Err(newly_unmapped_frames) => { + // Here, the newly unmapped frames were **NOT** contiguous with the current_frame_range, + // so we "finish" the current_frame_range (it's already been "taken") and start a new one + // based on the newly unmapped frames. + current_frame_range = Some(newly_unmapped_frames); + + // If this is the first frame range we've unmapped, don't drop it -- save it as the return value. + if first_frame_range.is_none() { + first_frame_range = Some(curr_frames); + } else { + // If this is NOT the first frame range we've unmapped, then go ahead and drop it now, + // otherwise there will not be any other opportunity for it to be dropped. + // + // TODO: here in the future, we could add it to the optional input list (see this function's doc comments) + // of AllocatedFrames to return, i.e., `Option<&mut Vec>`. + trace!("MappedPages::unmap(): dropping additional non-contiguous frames {:?}", curr_frames); + // curr_frames is dropped here + } + } + } + } else { + // This was the first frames we unmapped, so start a new current_frame_range. + current_frame_range = Some(newly_unmapped_frames); + } + } + UnmapResult::NonExclusive(_frames) => { + // trace!("Note: FYI: page {:X?} -> frames {:X?} was just unmapped but not mapped as EXCLUSIVE.", page, _frames); + } + } + } + + #[cfg(not(bm_map))] + { + if let Some(func) = BROADCAST_TLB_SHOOTDOWN_FUNC.get() { + func(self.pages.deref().clone()); + } + } + + // Ensure that we return at least some frame range, even if we broke out of the above loop early. + Ok(first_frame_range.or(current_frame_range)) + } + + + /// Reinterprets this `MappedPages`'s underlying memory region as a struct of the given type `T`, + /// i.e., overlays a struct on top of this mapped memory region. + /// + /// # Requirements + /// The type `T` must implement the `FromBytes` trait, which is similar to the requirements + /// of a "plain old data" type, in that it cannot contain Rust references (`&` or `&mut`). + /// This makes sense because there is no valid way to reinterpret a region of untyped memory + /// as a Rust reference. + /// In addition, if we did permit that, a Rust reference created from unchecked memory contents + /// could never be valid, safe, or sound, as it could allow random memory access + /// (just like with an arbitrary pointer dereference) that could break isolation. + /// + /// To satisfy this condition, you can use `#[derive(FromBytes)]` on your struct type `T`, + /// which will only compile correctly if the struct can be validly constructed + /// from "untyped" memory, i.e., an array of bytes. + /// + /// # Arguments + /// * `byte_offset`: the offset (in number of bytes) from the beginning of the memory region + /// at which the struct is located (where it should start). + /// This offset must be properly aligned with respect to the alignment requirements + /// of type `T`, otherwise an error will be returned. + /// + /// Returns a reference to the new struct (`&T`) that is formed from the underlying memory region, + /// with a lifetime dependent upon the lifetime of this `MappedPages` object. + /// This ensures safety by guaranteeing that the returned struct reference + /// cannot be used after this `MappedPages` object is dropped and unmapped. + pub fn as_type(&self, byte_offset: usize) -> Result<&T, &'static str> { + let size = mem::size_of::(); + if false { + debug!("MappedPages::as_type(): requested type {} with size {} at byte_offset {}, MappedPages size {}!", + core::any::type_name::(), + size, byte_offset, self.size_in_bytes() + ); + } + + if byte_offset % mem::align_of::() != 0 { + error!("MappedPages::as_type(): requested type {} with size {}, but the byte_offset {} is unaligned with type alignment {}!", + core::any::type_name::(), + size, byte_offset, mem::align_of::() + ); + } + + let start_vaddr = self.start_address().value().checked_add(byte_offset) + .ok_or("MappedPages::as_type(): overflow: start_address + byte_offset")?; + // check that size of type T fits within the size of the mapping + let end_bound = byte_offset.checked_add(size) + .ok_or("MappedPages::as_type(): overflow: byte_offset + size_of::())")?; + if end_bound > self.size_in_bytes() { + error!("MappedPages::as_type(): requested type {} with size {} at byte_offset {}, which is too large for MappedPages of size {}!", + core::any::type_name::(), + size, byte_offset, self.size_in_bytes() + ); + return Err("MappedPages::as_type(): requested type and byte_offset would not fit within the MappedPages bounds"); + } + + // SAFE: we guarantee the size and lifetime are within that of this MappedPages object + let t: &T = unsafe { + &*(start_vaddr as *const T) + }; + + Ok(t) + } + + + /// Same as [`MappedPages::as_type()`], but returns a *mutable* reference to the type `T`. + /// + /// Thus, it also checks that the underlying mapping is writable. + pub fn as_type_mut(&mut self, byte_offset: usize) -> Result<&mut T, &'static str> { + let size = mem::size_of::(); + if false { + debug!("MappedPages::as_type_mut(): requested type {} with size {} at byte_offset {}, MappedPages size {}!", + core::any::type_name::(), + size, byte_offset, self.size_in_bytes() + ); + } + + if byte_offset % mem::align_of::() != 0 { + error!("MappedPages::as_type_mut(): requested type {} with size {}, but the byte_offset {} is unaligned with type alignment {}!", + core::any::type_name::(), + size, byte_offset, mem::align_of::() + ); + } + + // check flags to make sure mutability is allowed (otherwise a page fault would occur on a write) + if !self.flags.is_writable() { + error!("MappedPages::as_type_mut(): requested type {} with size {} at byte_offset {}, but MappedPages weren't writable (flags: {:?})", + core::any::type_name::(), + size, byte_offset, self.flags + ); + return Err("MappedPages::as_type_mut(): MappedPages were not writable"); + } + + let start_vaddr = self.start_address().value().checked_add(byte_offset) + .ok_or("MappedPages::as_type_mut(): overflow: start_address + byte_offset")?; + // check that size of type T fits within the size of the mapping + let end_bound = byte_offset.checked_add(size) + .ok_or("MappedPages::as_type_mut(): overflow: byte_offset + size_of::())")?; + if end_bound > self.size_in_bytes() { + error!("MappedPages::as_type_mut(): requested type {} with size {} at byte_offset {}, which is too large for MappedPages of size {}!", + core::any::type_name::(), + size, byte_offset, self.size_in_bytes() + ); + return Err("MappedPages::as_type_mut(): requested type and byte_offset would not fit within the MappedPages bounds"); + } + + // SAFE: we guarantee the size and lifetime are within that of this MappedPages object + let t: &mut T = unsafe { + &mut *(start_vaddr as *mut T) + }; + + Ok(t) + } + + + /// Reinterprets this `MappedPages`'s underlying memory region as `&[T]`, a `length`-element slice of type `T`. + /// + /// It has similar requirements and behavior as [`MappedPages::as_type()`]. + /// + /// # Arguments + /// * `byte_offset`: the offset (in number of bytes) into the memory region + /// at which the slice should start. + /// This offset must be properly aligned with respect to the alignment requirements + /// of type `T`, otherwise an error will be returned. + /// * `length`: the length of the slice, i.e., the number of elements of type `T` in the slice. + /// Thus, the slice's address bounds will span the range from + /// `byte_offset` (inclusive) to `byte_offset + (size_of::() * length)` (exclusive). + /// + /// Returns a reference to the new slice that is formed from the underlying memory region, + /// with a lifetime dependent upon the lifetime of this `MappedPages` object. + /// This ensures safety by guaranteeing that the returned slice + /// cannot be used after this `MappedPages` object is dropped and unmapped. + pub fn as_slice(&self, byte_offset: usize, length: usize) -> Result<&[T], &'static str> { + let size_in_bytes = length.checked_mul(mem::size_of::()) + .ok_or("MappedPages::as_slice(): overflow: length * size_of::()")?; + if false { + debug!("MappedPages::as_slice(): requested slice of type {} with length {} (total size {}) at byte_offset {}, MappedPages size {}!", + core::any::type_name::(), + length, size_in_bytes, byte_offset, self.size_in_bytes() + ); + } + + if size_in_bytes > isize::MAX as usize { + return Err("MappedPages::as_slice(): length * size_of::() must be no larger than isize::MAX"); + } + + if byte_offset % mem::align_of::() != 0 { + error!("MappedPages::as_slice(): requested slice of type {} with length {} (total size {}), but the byte_offset {} is unaligned with type alignment {}!", + core::any::type_name::(), + length, size_in_bytes, byte_offset, mem::align_of::() + ); + } + + let start_vaddr = self.start_address().value().checked_add(byte_offset) + .ok_or("MappedPages::as_slice(): overflow: start_address + byte_offset")?; + // check that size of slice fits within the size of the mapping + let end_bound = byte_offset.checked_add(size_in_bytes) + .ok_or("MappedPages::as_slice_mut(): overflow: byte_offset + (length * size_of::())")?; + if end_bound > self.size_in_bytes() { + error!("MappedPages::as_slice(): requested slice of type {} with length {} (total size {}) at byte_offset {}, which is too large for MappedPages of size {}!", + core::any::type_name::(), + length, size_in_bytes, byte_offset, self.size_in_bytes() + ); + return Err("MappedPages::as_slice(): requested slice length and byte_offset would not fit within the MappedPages bounds"); + } + + // SAFETY: + // ✅ The pointer is properly aligned (checked above) and is non-null. + // ✅ The entire memory range of the slice is contained within this `MappedPages` (bounds checked above). + // ✅ The pointer points to `length` consecutive values of type T. + // ✅ The slice memory cannot be mutated by anyone else because we only return an immutable reference to it. + // ✅ The total size of the slice does not exceed isize::MAX (checked above). + // ✅ The lifetime of the returned slice reference is tied to the lifetime of this `MappedPages`. + let slc: &[T] = unsafe { + slice::from_raw_parts(start_vaddr as *const T, length) + }; + + Ok(slc) + } + + + /// Same as [`MappedPages::as_slice()`], but returns a *mutable* slice. + /// + /// Thus, it checks that the underlying mapping is writable. + pub fn as_slice_mut(&mut self, byte_offset: usize, length: usize) -> Result<&mut [T], &'static str> { + let size_in_bytes = length.checked_mul(mem::size_of::()) + .ok_or("MappedPages::as_slice_mut(): overflow: length * size_of::()")?; + + if false { + debug!("MappedPages::as_slice_mut(): requested slice of type {} with length {} (total size {}) at byte_offset {}, MappedPages size {}!", + core::any::type_name::(), + length, size_in_bytes, byte_offset, self.size_in_bytes() + ); + } + + if size_in_bytes > isize::MAX as usize { + return Err("MappedPages::as_slice_mut(): length * size_of::() must be no larger than isize::MAX"); + } + + if byte_offset % mem::align_of::() != 0 { + error!("MappedPages::as_slice_mut(): requested slice of type {} with length {} (total size {}), but the byte_offset {} is unaligned with type alignment {}!", + core::any::type_name::(), + length, size_in_bytes, byte_offset, mem::align_of::() + ); + } + + // check flags to make sure mutability is allowed (otherwise a page fault would occur on a write) + if !self.flags.is_writable() { + error!("MappedPages::as_slice_mut(): requested mutable slice of type {} with length {} (total size {}) at byte_offset {}, but MappedPages weren't writable (flags: {:?})", + core::any::type_name::(), + length, size_in_bytes, byte_offset, self.flags + ); + return Err("MappedPages::as_slice_mut(): MappedPages were not writable"); + } + + let start_vaddr = self.start_address().value().checked_add(byte_offset) + .ok_or("MappedPages::as_slice_mut(): overflow: start_address + byte_offset")?; + // check that size of slice fits within the size of the mapping + let end_bound = byte_offset.checked_add(size_in_bytes) + .ok_or("MappedPages::as_slice_mut(): overflow: byte_offset + (length * size_of::())")?; + if end_bound > self.size_in_bytes() { + error!("MappedPages::as_slice_mut(): requested mutable slice of type {} with length {} (total size {}) at byte_offset {}, which is too large for MappedPages of size {}!", + core::any::type_name::(), + length, size_in_bytes, byte_offset, self.size_in_bytes() + ); + return Err("MappedPages::as_slice_mut(): requested slice length and byte_offset would not fit within the MappedPages bounds"); + } + + // SAFETY: + // ✅ same as for `MappedPages::as_slice()`, plus: + // ✅ The underlying memory is not accessible through any other pointer, as we require a `&mut self` above. + // ✅ The underlying memory can be mutated because it is mapped as writable (checked above). + let slc: &mut [T] = unsafe { + slice::from_raw_parts_mut(start_vaddr as *mut T, length) + }; + + Ok(slc) + } + + /// A convenience function for [`BorrowedMappedPages::from()`]. + pub fn into_borrowed( + self, + byte_offset: usize, + ) -> Result, (MappedPages, &'static str)> { + BorrowedMappedPages::from(self, byte_offset) + } + + /// A convenience function for [`BorrowedMappedPages::from_mut()`]. + pub fn into_borrowed_mut( + self, + byte_offset: usize, + ) -> Result, (MappedPages, &'static str)> { + BorrowedMappedPages::from_mut(self, byte_offset) + } + + /// A convenience function for [`BorrowedSliceMappedPages::from()`]. + pub fn into_borrowed_slice( + self, + byte_offset: usize, + length: usize, + ) -> Result, (MappedPages, &'static str)> { + BorrowedSliceMappedPages::from(self, byte_offset, length) + } + + /// A convenience function for [`BorrowedSliceMappedPages::from_mut()`]. + pub fn into_borrowed_slice_mut( + self, + byte_offset: usize, + length: usize, + ) -> Result, (MappedPages, &'static str)> { + BorrowedSliceMappedPages::from_mut(self, byte_offset, length) + } +} + +impl Drop for MappedPages { + fn drop(&mut self) { + // if self.size_in_pages() > 0 { + // trace!("MappedPages::drop(): unmapped MappedPages {:?}, flags: {:?}", &*self.pages, self.flags); + // } + + let mut mapper = Mapper::from_current(); + if let Err(e) = self.unmap(&mut mapper) { + error!("MappedPages::drop(): failed to unmap, error: {:?}", e); + } + + // Note that the AllocatedPages will automatically be dropped here too, + // we do not need to call anything to make that happen. + } +} + + +/// A borrowed [`MappedPages`] object that derefs to `&T` and optionally also `&mut T`. +/// +/// By default, the `Mutability` type parameter is `Immutable` for ease of use. +/// +/// When dropped, the borrow ends and the contained `MappedPages` is dropped and unmapped. +/// You can manually end the borrow and reclaim the inner `MappedPages` via [`Self::into_inner()`]. +pub struct BorrowedMappedPages { + ptr: Unique, + mp: MappedPages, + _mut: PhantomData, +} + +impl BorrowedMappedPages { + /// Immutably borrows the given `MappedPages` as an instance of type `&T` + /// located at the given `byte_offset` into the `MappedPages`. + /// + /// See [`MappedPages::as_type()`] for more info. + /// + /// Upon failure, returns an error containing the unmodified `MappedPages` and a string + /// describing the error. + pub fn from( + mp: MappedPages, + byte_offset: usize, + ) -> Result, (MappedPages, &'static str)> { + Ok(Self { + ptr: match mp.as_type::(byte_offset) { + Ok(r) => { + let nn: NonNull = r.into(); + nn.into() + } + Err(e_str) => return Err((mp, e_str)), + }, + mp, + _mut: PhantomData, + }) + } +} + +impl BorrowedMappedPages { + /// Mutably borrows the given `MappedPages` as an instance of type `&mut T` + /// located at the given `byte_offset` into the `MappedPages`. + /// + /// See [`MappedPages::as_type_mut()`] for more info. + /// + /// Upon failure, returns an error containing the unmodified `MappedPages` + /// and a string describing the error. + pub fn from_mut( + mut mp: MappedPages, + byte_offset: usize, + ) -> Result, (MappedPages, &'static str)> { + Ok(Self { + ptr: match mp.as_type_mut::(byte_offset) { + Ok(r) => r.into(), + Err(e_str) => return Err((mp, e_str)), + }, + mp, + _mut: PhantomData, + }) + } +} + +impl BorrowedMappedPages { + /// Consumes this object and returns the inner `MappedPages`. + pub fn into_inner(self) -> MappedPages { + self.mp + } +} + +/// Both [`Mutable`] and [`Immutable`] [`BorrowedMappedPages`] can deref into `&T`. +impl Deref for BorrowedMappedPages { + type Target = T; + fn deref(&self) -> &T { + // SAFETY: + // ✅ The pointer is properly aligned; its alignment has been checked in `MappedPages::as_type()`. + // ✅ The pointer is dereferenceable; it has been bounds checked by `MappedPages::as_type()`. + // ✅ The pointer has been initialized in the constructor `from()`. + // ✅ The lifetime of the returned reference `&T` is tied to the lifetime of the `MappedPages`, + // ensuring that the `MappedPages` object will persist at least as long as the reference. + unsafe { self.ptr.as_ref() } + } +} +/// Only [`Mutable`] [`BorrowedMappedPages`] can deref into `&mut T`. +impl DerefMut for BorrowedMappedPages { + fn deref_mut(&mut self) -> &mut T { + // SAFETY: + // ✅ Same as the above `Deref` block, plus: + // ✅ The underlying `MappedPages` is guaranteed to be writable by `MappedPages::as_type_mut()`. + unsafe { self.ptr.as_mut() } + } +} +/// Both [`Mutable`] and [`Immutable`] [`BorrowedMappedPages`] implement `AsRef`. +impl AsRef for BorrowedMappedPages { + fn as_ref(&self) -> &T { self.deref() } +} +/// Only [`Mutable`] [`BorrowedMappedPages`] implement `AsMut`. +impl AsMut for BorrowedMappedPages { + fn as_mut(&mut self) -> &mut T { self.deref_mut() } +} +/// Both [`Mutable`] and [`Immutable`] [`BorrowedMappedPages`] implement `Borrow`. +impl Borrow for BorrowedMappedPages { + fn borrow(&self) -> &T { self.deref() } +} +/// Only [`Mutable`] [`BorrowedMappedPages`] implement `BorrowMut`. +impl BorrowMut for BorrowedMappedPages { + fn borrow_mut(&mut self) -> &mut T { self.deref_mut() } +} + +// Forward the impls of `PartialEq`, `Eq`, `PartialOrd`, `Ord`, and `Hash`. +impl PartialEq for BorrowedMappedPages { + fn eq(&self, other: &Self) -> bool { self.deref().eq(other.deref()) } +} +impl Eq for BorrowedMappedPages { } +impl PartialOrd for BorrowedMappedPages { + fn partial_cmp(&self, other: &Self) -> Option { self.deref().partial_cmp(other.deref()) } +} +impl Ord for BorrowedMappedPages { + fn cmp(&self, other: &Self) -> Ordering { self.deref().cmp(other.deref()) } +} +impl Hash for BorrowedMappedPages { + fn hash(&self, state: &mut H) { self.deref().hash(state) } +} + + +/// A borrowed [`MappedPages`] object that derefs to a slice `&[T]` and optionally also `&mut [T]`. +/// +/// For ease of use, the default `Mutability` type parameter is `Immutable`. +/// +/// When dropped, the borrow ends and the contained `MappedPages` is dropped and unmapped. +/// You can manually end the borrow and reclaim the inner `MappedPages` via [`Self::into_inner()`]. +pub struct BorrowedSliceMappedPages { + ptr: Unique<[T]>, + mp: MappedPages, + _mut: PhantomData, +} + +impl BorrowedSliceMappedPages { + /// Immutably borrows the given `MappedPages` as a slice `&[T]` + /// of `length` elements of type `T` starting at the given `byte_offset` into the `MappedPages`. + /// + /// See [`MappedPages::as_slice()`] for more info. + /// + /// Upon failure, returns an error containing the unmodified `MappedPages` and a string + /// describing the error. + pub fn from( + mp: MappedPages, + byte_offset: usize, + length: usize, + ) -> Result, (MappedPages, &'static str)> { + Ok(Self { + ptr: match mp.as_slice::(byte_offset, length) { + Ok(r) => { + let nn: NonNull<[T]> = r.into(); + nn.into() + } + Err(e_str) => return Err((mp, e_str)), + }, + mp, + _mut: PhantomData, + }) + } +} + +impl BorrowedSliceMappedPages { + /// Mutably borrows the given `MappedPages` as an instance of type `&mut T` + /// starting at the given `byte_offset` into the `MappedPages`. + /// + /// See [`MappedPages::as_type_mut()`] for more info. + /// + /// Upon failure, returns an error containing the unmodified `MappedPages` + /// and a string describing the error. + pub fn from_mut( + mut mp: MappedPages, + byte_offset: usize, + length: usize, + ) -> Result { + Ok(Self { + ptr: match mp.as_slice_mut::(byte_offset, length) { + Ok(r) => r.into(), + Err(e_str) => return Err((mp, e_str)), + }, + mp, + _mut: PhantomData, + }) + } +} + +impl BorrowedSliceMappedPages { + /// Consumes this object and returns the inner `MappedPages`. + pub fn into_inner(self) -> MappedPages { + self.mp + } +} + +/// Both [`Mutable`] and [`Immutable`] [`BorrowedSliceMappedPages`] can deref into `&[T]`. +impl Deref for BorrowedSliceMappedPages { + type Target = [T]; + fn deref(&self) -> &[T] { + // SAFETY: + // ✅ The pointer is properly aligned; its alignment has been checked in `MappedPages::as_slice()`. + // ✅ The pointer is dereferenceable; it has been bounds checked by `MappedPages::as_slice()`. + // ✅ The pointer has been initialized in the constructor `from()`. + // ✅ The lifetime of the returned reference `&[T]` is tied to the lifetime of the `MappedPages`, + // ensuring that the `MappedPages` object will persist at least as long as the reference. + unsafe { self.ptr.as_ref() } + } +} +/// Only [`Mutable`] [`BorrowedSliceMappedPages`] can deref into `&mut T`. +impl DerefMut for BorrowedSliceMappedPages { + fn deref_mut(&mut self) -> &mut [T] { + // SAFETY: + // ✅ Same as the above `Deref` block, plus: + // ✅ The underlying `MappedPages` is guaranteed to be writable by `MappedPages::as_slice_mut()`. + unsafe { self.ptr.as_mut() } + } +} + +/// Both [`Mutable`] and [`Immutable`] [`BorrowedSliceMappedPages`] implement `AsRef<[T]>`. +impl AsRef<[T]> for BorrowedSliceMappedPages { + fn as_ref(&self) -> &[T] { self.deref() } +} +/// Only [`Mutable`] [`BorrowedSliceMappedPages`] implement `AsMut`. +impl AsMut<[T]> for BorrowedSliceMappedPages { + fn as_mut(&mut self) -> &mut [T] { self.deref_mut() } +} +/// Both [`Mutable`] and [`Immutable`] [`BorrowedSliceMappedPages`] implement `Borrow`. +impl Borrow<[T]> for BorrowedSliceMappedPages { + fn borrow(&self) -> &[T] { self.deref() } +} +/// Only [`Mutable`] [`BorrowedSliceMappedPages`] implement `BorrowMut`. +impl BorrowMut<[T]> for BorrowedSliceMappedPages { + fn borrow_mut(&mut self) -> &mut [T] { self.deref_mut() } +} + +// Forward the impls of `PartialEq`, `Eq`, `PartialOrd`, `Ord`, and `Hash`. +impl PartialEq for BorrowedSliceMappedPages { + fn eq(&self, other: &Self) -> bool { self.deref().eq(other.deref()) } +} +impl Eq for BorrowedSliceMappedPages { } +impl PartialOrd for BorrowedSliceMappedPages { + fn partial_cmp(&self, other: &Self) -> Option { self.deref().partial_cmp(other.deref()) } +} +impl Ord for BorrowedSliceMappedPages { + fn cmp(&self, other: &Self) -> Ordering { self.deref().cmp(other.deref()) } +} +impl Hash for BorrowedSliceMappedPages { + fn hash(&self, state: &mut H) { self.deref().hash(state) } +} + + +/// A marker type used to indicate that a [`BorrowedMappedPages`] +/// or [`BorrowedSliceMappedPages`] is borrowed mutably. +/// +/// Implements the [`Mutability`] trait. +#[non_exhaustive] +pub struct Mutable { } + +/// A marker type used to indicate that a [`BorrowedMappedPages`] +/// or [`BorrowedSliceMappedPages`] is borrowed immutably. +/// +/// Implements the [`Mutability`] trait. +#[non_exhaustive] +pub struct Immutable { } + +/// A trait for parameterizing a [`BorrowedMappedPages`] +/// or [`BorrowedSliceMappedPages`] as mutably or immutably borrowed. +/// +/// Only [`Mutable`] and [`Immutable`] are able to implement this trait. +pub trait Mutability: private::Sealed { } + +impl private::Sealed for Immutable { } +impl private::Sealed for Mutable { } +impl Mutability for Immutable { } +impl Mutability for Mutable { } + +mod private { + pub trait Sealed { } +} diff --git a/aarch64/kernel/memory/src/paging/mod.rs b/aarch64/kernel/memory/src/paging/mod.rs new file mode 100644 index 0000000000..0ec84a56d5 --- /dev/null +++ b/aarch64/kernel/memory/src/paging/mod.rs @@ -0,0 +1,487 @@ +// Copyright 2016 Philipp Oppermann. See the README.md +// file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +mod temporary_page; +mod mapper; +mod table; + +pub use page_table_entry::PageTableEntry; +pub use self::{ + temporary_page::TemporaryPage, + mapper::{ + Mapper, MappedPages, BorrowedMappedPages, BorrowedSliceMappedPages, + Mutability, Mutable, Immutable, + }, +}; + +use core::{ + ops::{Deref, DerefMut}, + fmt, +}; +use log::debug; +use super::{Frame, FrameRange, PageRange, VirtualAddress, PhysicalAddress, + AllocatedPages, allocate_pages, AllocatedFrames, PteFlags, + tlb_flush_all, tlb_flush_virt_addr, get_p4, set_as_active_page_table_root}; +use pte_flags::PteFlagsArch; +use no_drop::NoDrop; +use kernel_config::memory::{RECURSIVE_P4_INDEX}; + +#[cfg(target_arch = "x86_64")] +use super::{find_section_memory_bounds, get_vga_mem_addr, KERNEL_OFFSET}; + +#[cfg(target_arch = "aarch64")] +use { + super::{ + disable_mmu, enable_mmu, allocate_frames_at, allocate_frames, + allocate_pages_at, configure_translation_registers, MemoryRegionType + }, + log::warn, + table::{Table, Level4}, +}; + +/// A top-level root (P4) page table. +/// +/// Auto-derefs into a `Mapper` for easy invocation of memory mapping functions. +pub struct PageTable { + mapper: Mapper, + p4_table: AllocatedFrames, +} +impl fmt::Debug for PageTable { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "PageTable(p4: {:#X})", self.p4_table.start_address()) + } +} + +impl Deref for PageTable { + type Target = Mapper; + + fn deref(&self) -> &Mapper { + &self.mapper + } +} + +impl DerefMut for PageTable { + fn deref_mut(&mut self) -> &mut Mapper { + &mut self.mapper + } +} + +impl PageTable { + /// An internal function to bootstrap a new top-level PageTable + /// based on the given currently-active P4 frame (the frame holding the page table root). + /// + /// Returns an error if the given `active_p4_frame` is not the currently active page table. + fn from_current(active_p4_frame: AllocatedFrames) -> Result { + assert!(active_p4_frame.size_in_frames() == 1); + let current_p4 = get_current_p4(); + if active_p4_frame.start() != ¤t_p4 { + return Err("PageTable::from_current(): the active_p4_frame must be the root of the currently-active page table."); + } + Ok(PageTable { + mapper: Mapper::with_p4_frame(current_p4), + p4_table: active_p4_frame, + }) + } + + #[cfg(target_arch = "x86_64")] + /// Initializes a new top-level P4 `PageTable` whose root is located in the given `new_p4_frame`. + /// It requires using the given `current_active_table` to set up its initial mapping contents. + /// + /// A single allocated page can optionally be provided for use as part of a new `TemporaryPage` + /// for the recursive mapping. + /// + /// Returns the new `PageTable` that exists in physical memory at the given `new_p4_frame`. + /// Note that this new page table has no current mappings beyond the recursive P4 mapping, + /// so you will need to create or copy over any relevant mappings + /// before using (switching) to this new page table in order to ensure the system keeps running. + pub fn new_table( + current_page_table: &mut PageTable, + new_p4_frame: AllocatedFrames, + page: Option, + ) -> Result { + let p4_frame = *new_p4_frame.start(); + + let mut temporary_page = TemporaryPage::create_and_map_table_frame(page, new_p4_frame, current_page_table)?; + temporary_page.with_table_and_frame(|table, frame| { + table.zero(); + table[RECURSIVE_P4_INDEX].set_entry( + frame.as_allocated_frame(), + PteFlagsArch::new().valid(true).writable(true), + ); + })?; + + let (_temp_page, inited_new_p4_frame) = temporary_page.unmap_into_parts(current_page_table)?; + + Ok(PageTable { + mapper: Mapper::with_p4_frame(p4_frame), + p4_table: inited_new_p4_frame.ok_or("BUG: PageTable::new_table(): failed to take back unmapped Frame for p4_table")?, + }) + } + + #[cfg(target_arch = "aarch64")] + /// Initializes a new top-level P4 `PageTable` whose root is located in the given `new_p4_frame`. + /// + /// Returns the new `PageTable` that exists in physical memory at the given `new_p4_frame`. + /// Note that this new page table has no current mappings beyond the recursive P4 mapping, + /// so you will need to create or copy over any relevant mappings + /// before using (switching) to this new page table in order to ensure the system keeps running. + pub fn new_table(new_p4_frame: AllocatedFrames) -> Result { + let p4_frame = new_p4_frame.start_address().value(); + // assumes that we are in identity-mapping + let table = unsafe { (p4_frame as *mut Table).as_mut().unwrap() }; + table.zero(); + + let rec_flags = PteFlagsArch::VALID + | PteFlagsArch::ACCESSED + | PteFlagsArch::PAGE_DESCRIPTOR; + table[RECURSIVE_P4_INDEX].set_entry(new_p4_frame.as_allocated_frame(), rec_flags); + + Ok(PageTable { + mapper: Mapper::with_p4_frame(*new_p4_frame.as_allocated_frame()), + p4_table: new_p4_frame, + }) + } + + /// Temporarily maps the given other `PageTable` to the recursive entry (510th entry) + /// so that the given closure `f` can set up new mappings on the new `other_table` without actually switching to it yet. + /// Accepts a closure `f` that is passed a `Mapper`, such that it can set up new mappings on the other table. + /// Consumes the given `temporary_page` and automatically unmaps it afterwards. + /// # Note + /// This does not perform any task switching or changing of the current page table register (e.g., cr3). + pub fn with( + &mut self, + other_table: &mut PageTable, + f: F, + ) -> Result<(), &'static str> + where F: FnOnce(&mut Mapper) -> Result<(), &'static str> + { + let active_p4_frame = get_current_p4(); + if self.p4_table.start() != &active_p4_frame || self.p4_table.end() != &active_p4_frame { + return Err("PageTable::with(): this PageTable ('self') must be the currently active page table."); + } + + // Temporarily take ownership of this page table's p4 allocated frame and + // create a new temporary page that maps to that frame. + let this_p4 = core::mem::replace(&mut self.p4_table, AllocatedFrames::empty()); + let mut temporary_page = TemporaryPage::create_and_map_table_frame(None, this_p4, self)?; + + // overwrite recursive mapping + self.p4_mut()[RECURSIVE_P4_INDEX].set_entry( + other_table.p4_table.as_allocated_frame(), + PteFlagsArch::new().valid(true).writable(true), + ); + tlb_flush_all(); + + // set mapper's target frame to reflect that future mappings will be mapped into the other_table + self.mapper.target_p4 = *other_table.p4_table.start(); + + // execute `f` in the new context, in which the new page table is considered "active" + let ret = f(self); + + // restore mapper's target frame to reflect that future mappings will be mapped using the currently-active (original) PageTable + self.mapper.target_p4 = active_p4_frame; + + // restore recursive mapping to original p4 table + temporary_page.with_table_and_frame(|p4_table, frame| { + p4_table[RECURSIVE_P4_INDEX].set_entry( + frame.as_allocated_frame(), + PteFlagsArch::new().valid(true).writable(true), + ); + })?; + tlb_flush_all(); + + // Here, recover the current page table's p4 frame and restore it into this current page table, + // since we removed it earlier at the top of this function and gave it to the temporary page. + let (_temp_page, p4_frame) = temporary_page.unmap_into_parts(self)?; + self.p4_table = p4_frame.ok_or("BUG: PageTable::with(): failed to take back unmapped Frame for p4_table")?; + + ret + } + + + /// Switches from the currently-active page table (this `PageTable`, i.e., `self`) to the given `new_table`. + /// After this function, the given `new_table` will be the currently-active `PageTable`. + pub fn switch(&mut self, new_table: &PageTable) { + // debug!("PageTable::switch() old table: {:?}, new table: {:?}", self, new_table); + + set_as_active_page_table_root(new_table.physical_address()); + } + + + /// Returns the physical address of this page table's top-level p4 frame + pub fn physical_address(&self) -> PhysicalAddress { + self.p4_table.start_address() + } +} + + +/// Returns the current top-level (P4) root page table frame. +pub fn get_current_p4() -> Frame { + Frame::containing_address(get_p4()) +} + +#[cfg(target_arch = "x86_64")] +/// Initializes a new page table and sets up all necessary mappings for the kernel to continue running. +/// Returns the following tuple, if successful: +/// +/// 1. The kernel's new PageTable, which is now currently active, +/// 2. the kernel's text section MappedPages, +/// 3. the kernel's rodata section MappedPages, +/// 4. the kernel's data section MappedPages, +/// 5. a tuple of the stack's underlying guard page (an `AllocatedPages` instance) and the actual `MappedPages` backing it, +/// 6. the `MappedPages` holding the bootloader info, +/// 7. the kernel's list of *other* higher-half MappedPages that needs to be converted to a vector after heap initialization, and which should be kept forever, +/// 8. the kernel's list of identity-mapped MappedPages that needs to be converted to a vector after heap initialization, and which should be dropped before starting the first userspace program. +/// +/// Otherwise, it returns a str error message. +pub fn init( + boot_info: &multiboot2::BootInformation, + into_alloc_frames_fn: fn(FrameRange) -> AllocatedFrames, +) -> Result<( + PageTable, + NoDrop, + NoDrop, + NoDrop, + (AllocatedPages, NoDrop), + MappedPages, + [Option>; 32], + [Option>; 32], + ), &'static str> +{ + // Store the callback from `frame_allocator::init()` that allows the `Mapper` to convert + // `page_table_entry::UnmappedFrames` back into `AllocatedFrames`. + mapper::INTO_ALLOCATED_FRAMES_FUNC.call_once(|| into_alloc_frames_fn); + + let (aggregated_section_memory_bounds, _sections_memory_bounds) = find_section_memory_bounds(boot_info)?; + debug!("{:X?}\n{:X?}", aggregated_section_memory_bounds, _sections_memory_bounds); + + // bootstrap a PageTable from the currently-loaded page table + let current_active_p4 = frame_allocator::allocate_frames_at(aggregated_section_memory_bounds.page_table.start.1, 1)?; + let mut page_table = PageTable::from_current(current_active_p4)?; + debug!("Bootstrapped initial {:?}", page_table); + + let boot_info_start_vaddr = VirtualAddress::new(boot_info.start_address()).ok_or("boot_info start virtual address was invalid")?; + let boot_info_start_paddr = page_table.translate(boot_info_start_vaddr).ok_or("Couldn't get boot_info start physical address")?; + let boot_info_size = boot_info.total_size(); + debug!("multiboot vaddr: {:#X}, multiboot paddr: {:#X}, size: {:#X}\n", boot_info_start_vaddr, boot_info_start_paddr, boot_info_size); + + let new_p4_frame = frame_allocator::allocate_frames(1).ok_or("couldn't allocate frame for new page table")?; + let mut new_table = PageTable::new_table(&mut page_table, new_p4_frame, None)?; + + let mut text_mapped_pages: Option> = None; + let mut rodata_mapped_pages: Option> = None; + let mut data_mapped_pages: Option> = None; + let mut stack_page_group: Option<(AllocatedPages, NoDrop)> = None; + let mut boot_info_mapped_pages: Option = None; + let mut higher_half_mapped_pages: [Option>; 32] = Default::default(); + let mut identity_mapped_pages: [Option>; 32] = Default::default(); + + // Create and initialize a new page table with the same contents as the currently-executing kernel code/data sections. + page_table.with(&mut new_table, |mapper| { + + // Map every section found in the kernel image (given by the boot information above) into our new page table. + // To allow the APs to boot up, we must identity map those kernel sections too, i.e., + // map the same physical frames to both lower-half and higher-half virtual addresses. + // This is the only time in Theseus that we permit non-bijective (non 1-to-1) virtual-to-physical memory mappings, + // since it is unavoidable if we want to place the kernel in the higher half. + // Debatably, this is no longer needed because we're don't have a userspace, and there's no real reason to + // place the kernel in the higher half. + // + // These identity mappings are short-lived; they are unmapped later after all other CPUs are brought up + // but before we start running applications. + + debug!("{:X?}", aggregated_section_memory_bounds); + let mut index = 0; + + let (text_start_virt, text_start_phys) = aggregated_section_memory_bounds.text.start; + let (text_end_virt, text_end_phys) = aggregated_section_memory_bounds.text.end; + let (rodata_start_virt, rodata_start_phys) = aggregated_section_memory_bounds.rodata.start; + let (rodata_end_virt, rodata_end_phys) = aggregated_section_memory_bounds.rodata.end; + let (data_start_virt, data_start_phys) = aggregated_section_memory_bounds.data.start; + let (data_end_virt, data_end_phys) = aggregated_section_memory_bounds.data.end; + let (stack_start_virt, stack_start_phys) = aggregated_section_memory_bounds.stack.start; + let (stack_end_virt, _stack_end_phys) = aggregated_section_memory_bounds.stack.end; + + let text_flags = aggregated_section_memory_bounds.text.flags; + let rodata_flags = aggregated_section_memory_bounds.rodata.flags; + let data_flags = aggregated_section_memory_bounds.data.flags; + + let text_pages = page_allocator::allocate_pages_by_bytes_at(text_start_virt, text_end_virt.value() - text_start_virt.value())?; + let text_frames = frame_allocator::allocate_frames_by_bytes_at(text_start_phys, text_end_phys.value() - text_start_phys.value())?; + let text_pages_identity = page_allocator::allocate_pages_by_bytes_at(text_start_virt - KERNEL_OFFSET, text_end_virt.value() - text_start_virt.value())?; + identity_mapped_pages[index] = Some(NoDrop::new( unsafe { + Mapper::map_to_non_exclusive(mapper, text_pages_identity, &text_frames, text_flags)? + })); + text_mapped_pages = Some(NoDrop::new(mapper.map_allocated_pages_to(text_pages, text_frames, text_flags)?)); + index += 1; + + let rodata_pages = page_allocator::allocate_pages_by_bytes_at(rodata_start_virt, rodata_end_virt.value() - rodata_start_virt.value())?; + let rodata_frames = frame_allocator::allocate_frames_by_bytes_at(rodata_start_phys, rodata_end_phys.value() - rodata_start_phys.value())?; + let rodata_pages_identity = page_allocator::allocate_pages_by_bytes_at(rodata_start_virt - KERNEL_OFFSET, rodata_end_virt.value() - rodata_start_virt.value())?; + identity_mapped_pages[index] = Some(NoDrop::new( unsafe { + Mapper::map_to_non_exclusive(mapper, rodata_pages_identity, &rodata_frames, rodata_flags)? + })); + rodata_mapped_pages = Some(NoDrop::new(mapper.map_allocated_pages_to(rodata_pages, rodata_frames, rodata_flags)?)); + index += 1; + + let data_pages = page_allocator::allocate_pages_by_bytes_at(data_start_virt, data_end_virt.value() - data_start_virt.value())?; + let data_frames = frame_allocator::allocate_frames_by_bytes_at(data_start_phys, data_end_phys.value() - data_start_phys.value())?; + let data_pages_identity = page_allocator::allocate_pages_by_bytes_at(data_start_virt - KERNEL_OFFSET, data_end_virt.value() - data_start_virt.value())?; + identity_mapped_pages[index] = Some(NoDrop::new( unsafe { + Mapper::map_to_non_exclusive(mapper, data_pages_identity, &data_frames, data_flags)? + })); + data_mapped_pages = Some(NoDrop::new(mapper.map_allocated_pages_to(data_pages, data_frames, data_flags)?)); + index += 1; + + // We don't need to do any mapping for the initial root (P4) page table stack (a separate data section), + // which was initially set up and created by the bootstrap assembly code. + // It was used to bootstrap the initial page table at the beginning of this function. + + // Handle the stack (a separate data section), which consists of one guard page followed by the real stack pages. + // It does not need to be identity mapped because each AP core will have its own stack. + let stack_pages = page_allocator::allocate_pages_by_bytes_at(stack_start_virt, (stack_end_virt - stack_start_virt).value())?; + let start_of_stack_pages = *stack_pages.start() + 1; + let (stack_guard_page, stack_allocated_pages) = stack_pages.split(start_of_stack_pages) + .map_err(|_ap| "BUG: initial stack's allocated pages were not split correctly after guard page")?; + let stack_start_frame = Frame::containing_address(stack_start_phys) + 1; // skip 1st frame, which corresponds to the guard page + let stack_allocated_frames = frame_allocator::allocate_frames_at(stack_start_frame.start_address(), stack_allocated_pages.size_in_pages())?; + let stack_mapped_pages = mapper.map_allocated_pages_to( + stack_allocated_pages, + stack_allocated_frames, + data_flags, + )?; + stack_page_group = Some((stack_guard_page, NoDrop::new(stack_mapped_pages))); + + // Map the VGA display memory as writable. + // We do an identity mapping for the VGA display too, because the AP cores may access it while booting. + let (vga_phys_addr, vga_size_in_bytes, vga_flags) = get_vga_mem_addr()?; + let vga_virt_addr_identity = VirtualAddress::new_canonical(vga_phys_addr.value()); + let vga_display_pages = page_allocator::allocate_pages_by_bytes_at(vga_virt_addr_identity + KERNEL_OFFSET, vga_size_in_bytes)?; + let vga_display_frames = frame_allocator::allocate_frames_by_bytes_at(vga_phys_addr, vga_size_in_bytes)?; + let vga_display_pages_identity = page_allocator::allocate_pages_by_bytes_at(vga_virt_addr_identity, vga_size_in_bytes)?; + identity_mapped_pages[index] = Some(NoDrop::new( unsafe { + Mapper::map_to_non_exclusive(mapper, vga_display_pages_identity, &vga_display_frames, vga_flags)? + })); + higher_half_mapped_pages[index] = Some(NoDrop::new(mapper.map_allocated_pages_to(vga_display_pages, vga_display_frames, vga_flags)?)); + index += 1; + + + // Map the multiboot boot_info at the same address it is currently at, so we can continue to validly access `boot_info` + let boot_info_pages = page_allocator::allocate_pages_by_bytes_at(boot_info_start_vaddr, boot_info_size)?; + let boot_info_frames = frame_allocator::allocate_frames_by_bytes_at(boot_info_start_paddr, boot_info_size)?; + boot_info_mapped_pages = Some(mapper.map_allocated_pages_to( + boot_info_pages, + boot_info_frames, + PteFlags::new(), + )?); + + debug!("identity_mapped_pages: {:?}", &identity_mapped_pages[..index]); + debug!("higher_half_mapped_pages: {:?}", &higher_half_mapped_pages[..index]); + + Ok(()) // mapping closure completed successfully + + })?; // TemporaryPage is dropped here + + + let text_mapped_pages = text_mapped_pages .ok_or("Couldn't map .text section")?; + let rodata_mapped_pages = rodata_mapped_pages .ok_or("Couldn't map .rodata section")?; + let data_mapped_pages = data_mapped_pages .ok_or("Couldn't map .data section")?; + let boot_info_mapped_pages = boot_info_mapped_pages.ok_or("Couldn't map boot_info pages section")?; + let stack_page_group = stack_page_group .ok_or("Couldn't map .stack section")?; + + debug!("switching from old page table {:?} to new page table {:?}", page_table, new_table); + page_table.switch(&new_table); + debug!("switched to new page table {:?}.", new_table); + // The old page_table set up during bootstrap will be dropped here. It's no longer being used. + + // Return the new page table because that's the one that should be used by the kernel in future mappings. + Ok(( + new_table, + text_mapped_pages, + rodata_mapped_pages, + data_mapped_pages, + stack_page_group, + boot_info_mapped_pages, + higher_half_mapped_pages, + identity_mapped_pages + )) +} + +#[cfg(target_arch = "aarch64")] +/// Initializes a new page table and sets up all necessary mappings for the kernel to continue running. +/// Returns the kernel's current PageTable, if successful. +/// Otherwise, it returns a str error message. +pub fn init( + into_alloc_frames_fn: fn(FrameRange) -> AllocatedFrames, + layout: &[(FrameRange, MemoryRegionType, Option)], +) -> Result { + // Store the callback from `frame_allocator::init()` that allows the `Mapper` to convert + // `page_table_entry::UnmappedFrames` back into `AllocatedFrames`. + mapper::INTO_ALLOCATED_FRAMES_FUNC.call_once(|| into_alloc_frames_fn); + + // Modifying the established page table could lead to + // unwanted faults because we don't know if it uses + // 4 levels and it could contain block mappings, which + // we don't support. UEFI makes our code run in an + // identity-mapped AS anyway, so by disabling the MMU + // we don't have to map frames temporarily while building + // the new theseus-made page table. + disable_mmu(); + + // bootstrap a PageTable from the currently-loaded page table + let current_p4 = get_current_p4().start_address(); + let current_active_p4 = allocate_frames_at(current_p4, 1)?; + let mut page_table = PageTable::from_current(current_active_p4)?; + debug!("Bootstrapped initial {:?}", page_table); + + let new_p4_frame = allocate_frames(1).ok_or("couldn't allocate frame for new page table")?; + debug!("new_p4_frame {:?}", new_p4_frame); + + let mut new_table = PageTable::new_table(new_p4_frame)?; + debug!("Created new table {:?}", new_table); + + let mut map_region = |phys_addr: PhysicalAddress, num_frames, flags| -> Result<(), &'static str> { + let virt_addr = VirtualAddress::new(phys_addr.value()) + .ok_or("VirtualAddress::new failed - paging/mod.rs")?; + + let frames = allocate_frames_at(phys_addr, num_frames)?; + let pages = allocate_pages_at(virt_addr, num_frames)?; + + NoDrop::new(new_table.map_allocated_pages_to_frames_identity(pages, frames, flags)?); + + Ok(()) + }; + + // As a UEFI app we are in an identity mapped AS so virt_addr = phys_addr + // either that or the MMU is disabled, which works the same + for (range, _mem_type, flags) in layout { + if let Some(flags) = flags { + let phys_addr = range.start_address(); + let num_frames = range.size_in_frames(); + if let Err(error_msg) = map_region(phys_addr, num_frames, *flags) { + warn!("Early remapping: {}; addr={:?} n={} flags={:?}", + error_msg, phys_addr, num_frames, flags); + } + } + } + + debug!("Switching to the new page table"); + page_table.switch(&new_table); + + debug!("Configuring translation registers"); + configure_translation_registers(); + + debug!("Re-enabling the MMU"); + enable_mmu(); + + debug!("Flushing the TLB"); + tlb_flush_all(); + + Ok(new_table) +} diff --git a/aarch64/kernel/memory/src/paging/table.rs b/aarch64/kernel/memory/src/paging/table.rs new file mode 100644 index 0000000000..c8530f7d5c --- /dev/null +++ b/aarch64/kernel/memory/src/paging/table.rs @@ -0,0 +1,177 @@ +// Copyright 2016 Philipp Oppermann. See the README.md +// file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use kernel_config::memory::{PAGE_SHIFT, ENTRIES_PER_PAGE_TABLE}; +use super::{PageTableEntry, VirtualAddress}; +use core::ops::{Index, IndexMut}; +use core::marker::PhantomData; +use pte_flags::PteFlagsArch; +use zerocopy::FromBytes; + + +/// Theseus uses the 511th entry of the P4 table for mapping the higher-half kernel, +/// so it uses the 510th entry of P4 for the recursive mapping. +/// +/// NOTE: this must be kept in sync with the recursive index in `kernel_config/memory.rs` +/// and `nano_core//boot.asm`. +/// +/// See these links for more: +/// * +/// * +pub const P4: *mut Table = VirtualAddress::new_canonical( + 0o177777_776_776_776_776_0000 + // ^p4 ^p3 ^p2 ^p1 ^offset + // ^ 0o776 means that we're always looking at the 510th entry recursively +).value() as *mut _; + +#[derive(FromBytes)] +pub struct Table { + entries: [PageTableEntry; ENTRIES_PER_PAGE_TABLE], + level: PhantomData, +} + +impl Table { + /// Zero out (clear) all entries in this page table frame. + pub(crate) fn zero(&mut self) { + for entry in self.entries.iter_mut() { + entry.zero(); + } + } +} + +/// Selects a way to get a mutable reference +/// to a next-level table from an earlier table +/// level +#[derive(Copy, Clone, Debug)] +pub(crate) enum NextLevelAccess { + /// Use `Recursive` when the modified table + /// is currently active in the CPU and the MMU + /// is enabled. To access the next page table, + /// code will use the special recursive entry + /// in the P4 table. + Recursive, + #[allow(unused)] + /// If an identity mapping is active, or if + /// paging is fully disabled, then the physical + /// address in a page table entry is assumed + /// to be usable as a virtual address, and code + /// will use that address to access the next-level + /// page table. + Identity, +} + +impl Table { + /// Uses the given `index` as an index into this table's list of entries. + /// + /// Returns the virtual address of the next lowest page table: + /// if `self` is a P4-level `Table`, then this returns a P3-level `Table`, + /// and so on for P3 -> P3 and P2 -> P1. + fn next_table_address(&self, index: usize, access: NextLevelAccess) -> Option { + let pte_flags = self[index].flags(); + + #[cfg(target_arch = "aarch64")] + let is_huge = false; + + #[cfg(target_arch = "x86_64")] + let is_huge = pte_flags.is_huge(); + + if pte_flags.is_valid() && !is_huge { + let table_address = self as *const _ as usize; + let next_table_vaddr: usize = match access { + NextLevelAccess::Recursive => (table_address << 9) | (index << PAGE_SHIFT), + NextLevelAccess::Identity => self[index].pointed_frame().unwrap().start_address().value(), + }; + Some(VirtualAddress::new_canonical(next_table_vaddr)) + } else { + None + } + } + + /// Returns a reference to the next lowest-level page table. + /// + /// A convenience wrapper around `next_table_address()`; see that method for more. + pub(crate) fn next_table(&self, index: usize, access: NextLevelAccess) -> Option<&Table> { + // convert the next table address from a raw pointer back to a Table type + self.next_table_address(index, access).map(|vaddr| unsafe { &*(vaddr.value() as *const _) }) + } + + /// Returns a mutable reference to the next lowest-level page table. + /// + /// A convenience wrapper around `next_table_address()`; see that method for more. + pub(crate) fn next_table_mut(&mut self, index: usize, access: NextLevelAccess) -> Option<&mut Table> { + self.next_table_address(index, access).map(|vaddr| unsafe { &mut *(vaddr.value() as *mut _) }) + } + + /// Returns a mutable reference to the next lowest-level page table, + /// creating and initializing a new one if it doesn't already exist. + /// + /// A convenience wrapper around `next_table_address()`; see that method for more. + pub(crate) fn next_table_create( + &mut self, + index: usize, + flags: PteFlagsArch, + access: NextLevelAccess, + ) -> Result<&mut Table, &'static str> { + if self.next_table(index, access).is_none() { + // commenting until we understand how huge pages work on aarch64 + // assert!(!self[index].flags().is_huge(), "mapping code does not support huge pages"); + + let af = frame_allocator::allocate_frames(1).ok_or("next_table_create(): no frames available")?; + self[index].set_entry(af.as_allocated_frame(), flags.writable(true).valid(true)); + let table = self.next_table_mut(index, access).unwrap(); + table.zero(); + core::mem::forget(af); // we currently forget frames allocated as page table frames since we don't yet have a way to track them. + } + Ok(self.next_table_mut(index, access).unwrap()) + } +} + +impl Index for Table { + type Output = PageTableEntry; + + fn index(&self, index: usize) -> &PageTableEntry { + &self.entries[index] + } +} + +impl IndexMut for Table { + fn index_mut(&mut self, index: usize) -> &mut PageTableEntry { + &mut self.entries[index] + } +} + +pub trait TableLevel {} + +pub enum Level4 {} +#[allow(dead_code)] +pub enum Level3 {} +#[allow(dead_code)] +pub enum Level2 {} +pub enum Level1 {} + +impl TableLevel for Level4 {} +impl TableLevel for Level3 {} +impl TableLevel for Level2 {} +impl TableLevel for Level1 {} + +pub trait HierarchicalLevel: TableLevel { + type NextLevel: TableLevel; +} + +impl HierarchicalLevel for Level4 { + type NextLevel = Level3; +} + +impl HierarchicalLevel for Level3 { + type NextLevel = Level2; +} + +impl HierarchicalLevel for Level2 { + type NextLevel = Level1; +} diff --git a/aarch64/kernel/memory/src/paging/temporary_page.rs b/aarch64/kernel/memory/src/paging/temporary_page.rs new file mode 100644 index 0000000000..aca853eda1 --- /dev/null +++ b/aarch64/kernel/memory/src/paging/temporary_page.rs @@ -0,0 +1,97 @@ +// Copyright 2016 Philipp Oppermann. See the README.md +// file at the top-level directory of this distribution. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use core::mem::ManuallyDrop; +use log::error; +use super::{ + AllocatedPages, AllocatedFrames, PageTable, MappedPages, VirtualAddress, + table::{Table, Level1, NextLevelAccess}, +}; +use pte_flags::PteFlagsArch; +use kernel_config::memory::{TEMPORARY_PAGE_VIRT_ADDR, PAGE_SIZE}; +use owned_borrowed_trait::Owned; + + +/// A page that can be temporarily mapped to the recursive page table frame, +/// used for purposes of editing a top-level (P4) page table itself. +/// +/// See how recursive paging works: +/// +/// # Note +/// Instead of just dropping a `TemporaryPage` object, +/// it should be cleaned up (reclaimed) using [`TemporaryPage::unmap_into_parts()`]. + +pub struct TemporaryPage { + mapped_page: MappedPages, + /// `ManuallyDrop` is required here in order to prevent this `AllocatedFrames` + /// from being dropped twice: once when unmapping the above `mapped_page`, and + /// once when dropping this `TemporaryPage`. + /// This is because the `AllocatedFrames` object here is the same one that is + /// mapped by the above `mapped_page`. + frame: ManuallyDrop, +} + +impl TemporaryPage { + /// Creates a new [`TemporaryPage`] and maps it to the given page table `frame` + /// in the active table. + /// + /// # Arguments + /// * `page`: the optional page that will be used for the temporary mapping. + /// If `None`, a new page will be allocated. + /// * `frame`: the single frame containing the page table that we want to modify, + /// which will be mapped to this [`TemporaryPage`]. + /// * `page_table`: the currently active [`PageTable`]. + pub fn create_and_map_table_frame( + mut page: Option, + frame: AllocatedFrames, + page_table: &mut PageTable, + ) -> Result { + let mut vaddr = VirtualAddress::new_canonical(TEMPORARY_PAGE_VIRT_ADDR); + while page.is_none() && vaddr.value() != 0 { + page = page_allocator::allocate_pages_at(vaddr, 1).ok(); + vaddr -= PAGE_SIZE; + } + let (mapped_page, frame) = page_table.internal_map_to( + page.ok_or("Couldn't allocate a new Page for the temporary P4 table frame")?, + Owned(frame), + PteFlagsArch::new().valid(true).writable(true), + NextLevelAccess::Recursive, + )?; + Ok(TemporaryPage { + mapped_page, + frame: ManuallyDrop::new(frame), + }) + } + + /// Invokes the given closure `f` with a mutable reference to the root P4 page table + /// `Table` and `AllocatedFrame` held in this `TemporaryPage`. + pub fn with_table_and_frame( + &mut self, + f: F, + ) -> Result + where F: FnOnce(&mut Table, &AllocatedFrames) -> R + { + let res = f( + self.mapped_page.as_type_mut(0)?, + &self.frame, + ); + Ok(res) + } + + /// Call this to clean up a `TemporaryPage` instead of just letting it be dropped. + /// + /// A simple wrapper around [`MappedPages::unmap_into_parts()`]. + pub fn unmap_into_parts(mut self, page_table: &mut PageTable) -> Result<(AllocatedPages, Option), &'static str> { + let mp = core::mem::replace(&mut self.mapped_page, MappedPages::empty()); + mp.unmap_into_parts(page_table).map_err(|e_mp| { + error!("TemporaryPage::unmap_into_parts(): failed to unmap internal {:?}", e_mp); + "BUG: TemporaryPage::unmap_into_parts(): failed to unmap internal MappedPages into its parts." + }) + } +} diff --git a/aarch64/kernel/memory_aarch64/Cargo.toml b/aarch64/kernel/memory_aarch64/Cargo.toml new file mode 100644 index 0000000000..9bfb0194aa --- /dev/null +++ b/aarch64/kernel/memory_aarch64/Cargo.toml @@ -0,0 +1,11 @@ +[package] +authors = ["Kevin Boos ", "Nathan Royer "] +name = "memory_aarch64" +description = "The memory subsystem interfaces on aarch64." +version = "0.1.0" + +[dependencies] +cortex-a = "7.5.0" +tock-registers = "0.7.0" + +memory_structs = { path = "../../../kernel/memory_structs" } diff --git a/aarch64/kernel/memory_aarch64/src/lib.rs b/aarch64/kernel/memory_aarch64/src/lib.rs new file mode 100644 index 0000000000..41e0c3653f --- /dev/null +++ b/aarch64/kernel/memory_aarch64/src/lib.rs @@ -0,0 +1,198 @@ +//! This crate implements the virtual memory subsystem interfaces for Theseus on aarch64. +//! `memory` uses this crate to get the memory layout and do other arch-specific operations on aarch64. +//! +//! This is the top-level arch-specific memory crate. +//! All arch-specific definitions for memory system are exported from this crate. + +#![no_std] +#![feature(ptr_internals)] +#![feature(unboxed_closures)] + +extern crate memory_structs; +extern crate cortex_a; +extern crate tock_registers; + +use cortex_a::asm::barrier; +use cortex_a::registers::*; +use tock_registers::interfaces::Writeable; +use tock_registers::interfaces::ReadWriteable; + +use memory_structs::PhysicalAddress; + +#[cfg(any(target_arch = "aarch64", doc))] +use { + core::arch::asm, + memory_structs::VirtualAddress, +}; + +const THESEUS_ASID: u16 = 0; + +#[cfg(any(target_arch = "aarch64", doc))] +/// Flushes the specific virtual address in TLB. +/// +/// TLBI => tlb invalidate instruction +/// "va" => all translations at execution level +/// using the supplied address +/// "e1" => execution level +pub fn tlb_flush_virt_addr(vaddr: VirtualAddress) { + #[cfg(target_arch = "aarch64")] + unsafe { asm!("tlbi vae1, {}", in(reg) vaddr.value()) }; +} + +#[cfg(any(target_arch = "aarch64", doc))] +/// Flushes all TLB entries with Theseus' ASID (=0). +/// +/// TLBI => tlb invalidate instruction +/// "asid" => all entries with specific ASID +/// "e1" => execution level +pub fn tlb_flush_by_theseus_asid() { + #[cfg(target_arch = "aarch64")] + unsafe { asm!("tlbi aside1, {:x}", in(reg) THESEUS_ASID) }; +} + +#[cfg(any(target_arch = "aarch64", doc))] +pub use tlb_flush_by_theseus_asid as tlb_flush_all; + +/// Returns the current top-level page table address. +/// +/// We use TTBR0 in Theseus to store the +/// top-level page table, so this function +/// reads that register. +pub fn get_p4() -> PhysicalAddress { + PhysicalAddress::new_canonical( + TTBR0_EL1.get_baddr() as usize + ) +} + +/// Disable the MMU using aarch64 registers +/// +/// This uses the `SCTLR_EL1` register. +/// +/// When the MMU is disabled, the CPU acts as +/// if a full-address-space identity mapping +/// was active. +pub fn disable_mmu() { + SCTLR_EL1.modify(SCTLR_EL1::M::Disable); + unsafe { barrier::isb(barrier::SY) }; +} + +/// Enable the MMU using aarch64 registers +/// +/// This uses the `SCTLR_EL1` register. +/// +/// When the MMU is disabled, the CPU acts as +/// if a full-address-space identity mapping +/// was active. When it's enabled, the TTB0_EL1 +/// register is expected to point to a valid +/// page table (using its physical address). +pub fn enable_mmu() { + SCTLR_EL1.modify(SCTLR_EL1::M::Enable); + unsafe { barrier::isb(barrier::SY) }; +} + +/// Configures paging for Theseus. +pub fn configure_translation_registers() { + unsafe { + // The MAIR register holds up to 8 memory profiles; + // each profile describes cacheability of the memory. + // In Theseus, we currently use two profiles: one for + // device memory (non-cacheable) and one for normal + // memory (the usual RAM memory, cacheable). + // + // For more information on MAIR, See section D17.2.97 + // of [DDI0487l.a](https://l0.pm/arm-ddi0487l.a.pdf). + MAIR_EL1.write( + // Attribute 1 - Device. + MAIR_EL1::Attr1_Device::nonGathering_nonReordering_EarlyWriteAck + + + // Attribute 0 - Cacheable normal DRAM. + MAIR_EL1::Attr0_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc + + MAIR_EL1::Attr0_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc, + ); + + // The translation control register contains most of + // parameters for address translation; these parameters + // notably define the format for page table entry flags. + TCR_EL1.write( + // TCR_EL1::DS is not exposed by `cortex_a`. + // by default (cleared) it means we cannot + // use 52-bit output-address mode. + + // Whether to use the top-byte of virtual + // addresses for tagged addresses ("Ignored") + // or to use them in the page table walk ("Used"). + // With our four-level paging, however, the top-byte + // is not used for page table walks anyway. + TCR_EL1::TBI0::Used + // | TCR_EL1::TBI1::Used + + // Translation Granule Size = Page Size + // => four kilobytes + + TCR_EL1::TG0::KiB_4 + // + TCR_EL1::TG1::KiB_4 + + // These fields could only be used if we had access + // to the DS field, and if we set it to one. Indeed, + // when DS=1, the shareability fields of page + // descriptors are replaced by some bits of the output + // address; the shareability is constant for the whole + // page table: one for TTBR0 and one for TTBR1. + // + TCR_EL1::SH0::Inner + // + TCR_EL1::SH1::Inner + + // ASID Size. The upper 8 bits of TTBR0_EL1 and + // TTBR1_EL1 are ignored by hardware for every + // purpose except reading back the register, and + // are treated as if they are all zeros for when + // used for allocation and matching entries in the TLB. + + TCR_EL1::AS::ASID8Bits + + // We currently output 48 bits of physical + // address on aarch64. + + TCR_EL1::IPS::Bits_48 + + // Translation table walk disable. This bit controls + // whether a translation table walk is performed on + // a TLB miss. + + TCR_EL1::EPD0::EnableTTBR0Walks + // + TCR_EL1::EPD1::EnableTTBR1Walks + + // From which TTBR to read the ASID, when comparing the + // current address space with the one from an address. + + TCR_EL1::A1::TTBR0 + + // Controls the size of the memory region addressed + // by page table walks. We have to write 64 - (max + // number of bits in a region address): 64 - 48 = 16. + + TCR_EL1::T0SZ.val(16) + // + TCR_EL1::T1SZ.val(16) + + // I (Nathan) don't understand when these flags are used at the moment + // + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + // + TCR_EL1::ORGN1::NonCacheable + // + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + // + TCR_EL1::IRGN1::NonCacheable + + // Allow the MMU to update the ACCESSED flag. + + TCR_EL1::HA::Enable + + // Allow the MMU to update the DIRTY flag. + + TCR_EL1::HD::Enable + ); + + barrier::isb(barrier::SY); + } +} + +/// Installs a page table in the TTBR CPU register +pub fn set_as_active_page_table_root(page_table: PhysicalAddress) { + unsafe { + let page_table_addr = page_table.value() as u64; + TTBR0_EL1.write( + TTBR0_EL1::ASID.val(THESEUS_ASID as u64) + + TTBR0_EL1::BADDR.val(page_table_addr >> 1) + ); + + barrier::isb(barrier::SY); + } +} diff --git a/aarch64/kernel/nano_core/Cargo.toml b/aarch64/kernel/nano_core/Cargo.toml index 0251b5e6fe..8bf3aaca01 100644 --- a/aarch64/kernel/nano_core/Cargo.toml +++ b/aarch64/kernel/nano_core/Cargo.toml @@ -1,4 +1,6 @@ [package] +authors = ["Kevin Boos ", "Nathan Royer "] +description = "The code executed by a bootloader once it wants to start Theseus" name = "nano_core" version = "0.1.0" edition = "2021" @@ -15,6 +17,8 @@ frame_allocator = { path = "../../../kernel/frame_allocator" } page_allocator = { path = "../../../kernel/page_allocator" } memory_structs = { path = "../../../kernel/memory_structs" } kernel_config = { path = "../../../kernel/kernel_config" } +pte_flags = { path = "../../../kernel/pte_flags" } +memory = { path = "../memory" } uefi = { version = "0.18", default-features = false, features = [ "alloc", "exts" ] } uefi-services = { version = "0.15", default-features = false, features = [ "panic_handler" ] } diff --git a/aarch64/kernel/nano_core/main.rs b/aarch64/kernel/nano_core/main.rs index 5057224cfe..12302a635d 100644 --- a/aarch64/kernel/nano_core/main.rs +++ b/aarch64/kernel/nano_core/main.rs @@ -8,16 +8,19 @@ extern crate logger; extern crate frame_allocator; extern crate page_allocator; extern crate memory_structs; +extern crate memory; extern crate kernel_config; use alloc::vec; use core::arch::asm; +use alloc::vec::Vec; -use uefi::{prelude::entry, Status, Handle, table::{SystemTable, Boot}}; +use uefi::{prelude::entry, Status, Handle, table::{SystemTable, Boot, boot::MemoryType}}; -use frame_allocator::{PhysicalMemoryRegion, MemoryRegionType}; -use memory_structs::{VirtualAddress, PhysicalAddress, Frame, FrameRange}; +use frame_allocator::MemoryRegionType; +use memory_structs::{PhysicalAddress, FrameRange}; use kernel_config::memory::PAGE_SIZE; +use pte_flags::PteFlags; use log::{info, error}; @@ -47,48 +50,47 @@ fn main( let mmap_size = boot_svc.memory_map_size(); let mut mmap = vec![0; mmap_size.map_size + safety * mmap_size.entry_size]; + let mut layout_len = 0; + { + let (_, layout) = boot_svc.memory_map(&mut mmap).unwrap(); + for descriptor in layout { + if descriptor.ty != MemoryType::CONVENTIONAL && descriptor.page_count > 0 { + layout_len += 1; + } + } + } + let mut layout_vec = Vec::with_capacity(layout_len + safety); + let (_runtime_svc, mem_iter) = system_table.exit_boot_services(handle, &mut mmap) .map_err(|_| "nano_core::main - couldn't exit uefi boot services")?; - // Identifying free and reserved regions so we can initialize the frame allocator. - let mut free_regions: [Option; 32] = Default::default(); - let mut free_index = 0; - let mut reserved_regions: [Option; 32] = Default::default(); - let mut reserved_index = 0; - for descriptor in mem_iter { let page_count = descriptor.page_count as usize; let size = page_count * PAGE_SIZE; if size > 0 { - let region_type = uefi_conv::convert_mem(descriptor.ty); - let (dst, index) = match region_type { - MemoryRegionType::Free => (&mut free_regions, &mut free_index), - MemoryRegionType::Reserved => (&mut reserved_regions, &mut reserved_index), - MemoryRegionType::Unknown => continue, - }; + let mem_type = uefi_conv::convert_mem(descriptor.ty); + let flags = uefi_conv::get_mem_flags(descriptor.ty); let start_addr = descriptor.phys_start as usize; let start_addr = PhysicalAddress::new_canonical(start_addr); let range = FrameRange::from_phys_addr(start_addr, size); - let region = PhysicalMemoryRegion::new(range, region_type); - dst[*index] = Some(region); - *index += 1; + layout_vec.push((range, mem_type, flags)); } } - frame_allocator::init(free_regions.iter().flatten(), reserved_regions.iter().flatten())?; - info!("Initialized new frame allocator!"); - frame_allocator::dump_frame_allocator_state(); - - // On x86_64 `page_allocator` is initialized with a value obtained - // from the ELF layout. Here I'm choosing a value which is probably - // valid (uneducated guess); once we have an ELF aarch64 kernel - // we'll be able to use the original limit defined with KERNEL_OFFSET - // and the ELF layout. - page_allocator::init(VirtualAddress::new_canonical(0x100_000_000))?; - info!("Initialized new page allocator!"); - page_allocator::dump_page_allocator_state(); + // I'm also using this utility function for GIC mmio mapping + let mmio_region = |phys_addr, num_pages| { + let phys_addr = PhysicalAddress::new(phys_addr).unwrap(); + let range = FrameRange::from_phys_addr(phys_addr, num_pages); + let flags = PteFlags::DEVICE_MEMORY | PteFlags::NOT_EXECUTABLE | PteFlags::WRITABLE; + (range, MemoryRegionType::Free, Some(flags)) + }; + + layout_vec.push(mmio_region(0x0900_0000, 1)); + + info!("Calling memory::init();"); + info!("page table: {:?}", memory::init(&layout_vec)); info!("Going to infinite loop now."); inf_loop_0xbeef(); diff --git a/aarch64/kernel/nano_core/uefi_conv.rs b/aarch64/kernel/nano_core/uefi_conv.rs index 169305310c..fe27e3be87 100644 --- a/aarch64/kernel/nano_core/uefi_conv.rs +++ b/aarch64/kernel/nano_core/uefi_conv.rs @@ -1,5 +1,6 @@ use frame_allocator::MemoryRegionType::{self, *}; use uefi::table::boot::MemoryType; +use pte_flags::PteFlags; pub fn convert_mem(uefi: MemoryType) -> MemoryRegionType { match uefi { @@ -57,3 +58,61 @@ pub fn convert_mem(uefi: MemoryType) -> MemoryRegionType { _ => Unknown, } } + +pub fn get_mem_flags(uefi: MemoryType) -> Option { + match uefi { + // This enum variant is not used. + MemoryType::RESERVED => None, + + // The code portions of a loaded UEFI application. + // We get permission faults if this isn't writable + MemoryType::LOADER_CODE => Some(PteFlags::WRITABLE), + + // The data portions of a loaded UEFI applications, + // as well as any memory allocated by it. + MemoryType::LOADER_DATA => Some(PteFlags::NOT_EXECUTABLE | PteFlags::WRITABLE), + + // Code of the boot drivers. + // Can be reused after OS is loaded. + // We get permission faults if this isn't writable + MemoryType::BOOT_SERVICES_CODE => Some(PteFlags::WRITABLE), + + // Memory used to store boot drivers' data. + // Can be reused after OS is loaded. + MemoryType::BOOT_SERVICES_DATA => Some(PteFlags::NOT_EXECUTABLE | PteFlags::WRITABLE), + + // Runtime drivers' code. + // We get permission faults if this isn't writable + MemoryType::RUNTIME_SERVICES_CODE => Some(PteFlags::WRITABLE), + + // Runtime services' code. + MemoryType::RUNTIME_SERVICES_DATA => Some(PteFlags::NOT_EXECUTABLE | PteFlags::WRITABLE), + + // Free usable memory. + MemoryType::CONVENTIONAL => None, + + // Memory in which errors have been detected. + MemoryType::UNUSABLE => None, + + // Memory that holds ACPI tables. + // Can be reclaimed after they are parsed. + MemoryType::ACPI_RECLAIM => Some(PteFlags::DEVICE_MEMORY | PteFlags::NOT_EXECUTABLE | PteFlags::WRITABLE), + + // Firmware-reserved addresses. + MemoryType::ACPI_NON_VOLATILE => Some(PteFlags::DEVICE_MEMORY | PteFlags::NOT_EXECUTABLE | PteFlags::WRITABLE), + + // A region used for memory-mapped I/O. + MemoryType::MMIO => Some(PteFlags::DEVICE_MEMORY | PteFlags::NOT_EXECUTABLE | PteFlags::WRITABLE), + + // Address space used for memory-mapped port I/O. + MemoryType::MMIO_PORT_SPACE => Some(PteFlags::DEVICE_MEMORY | PteFlags::NOT_EXECUTABLE | PteFlags::WRITABLE), + + // Address space which is part of the processor. + MemoryType::PAL_CODE => Some(PteFlags::DEVICE_MEMORY | PteFlags::NOT_EXECUTABLE | PteFlags::WRITABLE), + + // Memory region which is usable and is also non-volatile. + MemoryType::PERSISTENT_MEMORY => Some(PteFlags::DEVICE_MEMORY | PteFlags::NOT_EXECUTABLE | PteFlags::WRITABLE), + + _ => None, + } +} diff --git a/kernel/memory_structs/src/lib.rs b/kernel/memory_structs/src/lib.rs index 1133cb38f8..30be348bb3 100644 --- a/kernel/memory_structs/src/lib.rs +++ b/kernel/memory_structs/src/lib.rs @@ -164,20 +164,31 @@ mod canonical_address { mod canonical_address { use bit_field::BitField; - // aarch64 doesn't have a concept of canonical VA - // so this always returns true + /// On aarch64, VAs are composed of an ASID + /// which is 8 or 16 bits long depending + /// on MMU config. In Theseus, we use 8-bits + /// and the next 8 bits are unused. + /// Our ASID is zero, so a "canonical" VA has + /// the 16 most significant bits cleared. #[inline] - pub fn is_canonical_virtual_address(_virt_addr: usize) -> bool { - true + pub fn is_canonical_virtual_address(virt_addr: usize) -> bool { + match virt_addr.get_bits(48..64) { + 0 => true, + _ => false, + } } - - // aarch64 doesn't have a concept of canonical VA - // so this returns the address as-is + + /// On aarch64, VAs are composed of an ASID + /// which is 8 or 16 bits long depending + /// on MMU config. In Theseus, we use 8-bits + /// and the next 8 bits are unused. + /// Our ASID is zero, so a "canonical" VA has + /// the 16 most significant bits cleared. #[inline] pub const fn canonicalize_virtual_address(virt_addr: usize) -> usize { - virt_addr + virt_addr & 0x0000_FFFF_FFFF_FFFF } - + /// On aarch64, we configure the MMU to use 48-bit /// physical addresses; "canonical" physical addresses /// have the 16 most significant bits cleared. @@ -188,7 +199,7 @@ mod canonical_address { _ => false, } } - + /// On aarch64, we configure the MMU to use 48-bit /// physical addresses; "canonical" physical addresses /// have the 16 most significant bits cleared. diff --git a/kernel/pte_flags/src/pte_flags_aarch64.rs b/kernel/pte_flags/src/pte_flags_aarch64.rs index 7f25dd6e9e..ebb4f8340d 100644 --- a/kernel/pte_flags/src/pte_flags_aarch64.rs +++ b/kernel/pte_flags/src/pte_flags_aarch64.rs @@ -368,7 +368,7 @@ impl PteFlagsAarch64 { /// for use in a higher-level page table entry, e.g., P4, P3, P2. /// /// Currently, on aarch64, this does the following: - /// * Clears the `NOT_EXECUTABLE` bit. + /// * Clears the `NOT_EXECUTABLE` bit, making it executable. /// * P4, P3, and P2 entries should never set `NOT_EXECUTABLE`, /// only the lowest-level P1 entry should. /// * Clears the `EXCLUSIVE` bit. @@ -376,11 +376,17 @@ impl PteFlagsAarch64 { /// because another page table frame may re-use it (create another alias to it) /// without our page table implementation knowing about it. /// * Only P1-level PTEs can map a frame exclusively. + /// * Sets the `ACCESSED` bit, since Theseus currently does not use it + /// and aarch64 will throw an Access Flag Fault if it is not set. + /// * Sets the `PAGE_DESCRIPTOR` bit, since Theseus currently does not + /// use block descriptors on aarch64. /// * Sets the `VALID` bit, as every P4, P3, and P2 entry must be valid. #[must_use] pub fn adjust_for_higher_level_pte(self) -> Self { self.executable(true) .exclusive(false) + .accessed(true) + .page_descriptor(true) .valid(true) }