diff --git a/Cargo.lock b/Cargo.lock index d6fc011311..7be2c2ae98 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -330,7 +330,6 @@ dependencies = [ "raw-cpuid 11.0.1", "riscv 0.10.1", "sbi-rt", - "smccc", "spin 0.9.8", "spinlock", "static_assertions", @@ -380,6 +379,7 @@ name = "axruntime" version = "0.1.0" dependencies = [ "aarch64-cpu", + "arm_gic", "axalloc", "axconfig", "axdisplay", @@ -400,6 +400,7 @@ dependencies = [ "page_table_entry", "percpu", "raw-cpuid 11.0.1", + "smccc", "spin 0.9.8", "spinlock", "tock-registers", diff --git a/apps/hv/guest/linux/linux-aarch64.dts b/apps/hv/guest/linux/linux-aarch64.dts index 423045ff69..fb1274e118 100644 --- a/apps/hv/guest/linux/linux-aarch64.dts +++ b/apps/hv/guest/linux/linux-aarch64.dts @@ -381,7 +381,7 @@ }; chosen { - linux,initrd-end = <0x7812046a>; + linux,initrd-end = <0x78736157>; linux,initrd-start = <0x78000000>; bootargs = "root=/dev/vda rootfstype=ext4 rw console=ttyAMA0"; stdout-path = "/pl011@9000000"; diff --git a/apps/hv/guest/nimbos/nimbos-aarch64_1.dtb b/apps/hv/guest/nimbos/nimbos-aarch64_1.dtb new file mode 100644 index 0000000000..3499a338ec Binary files /dev/null and b/apps/hv/guest/nimbos/nimbos-aarch64_1.dtb differ diff --git a/apps/hv/guest/nimbos/nimbos-aarch64_1.dts b/apps/hv/guest/nimbos/nimbos-aarch64_1.dts new file mode 100644 index 0000000000..f2c13b31ab --- /dev/null +++ b/apps/hv/guest/nimbos/nimbos-aarch64_1.dts @@ -0,0 +1,388 @@ +/dts-v1/; + +/ { + interrupt-parent = <0x8002>; + model = "linux,dummy-virt"; + #size-cells = <0x02>; + #address-cells = <0x02>; + compatible = "linux,dummy-virt"; + + psci { + migrate = <0xc4000005>; + cpu_on = <0xc4000003>; + cpu_off = <0x84000002>; + cpu_suspend = <0xc4000001>; + method = "hvc"; + compatible = "arm,psci-1.0\0arm,psci-0.2\0arm,psci"; + }; + + memory@40000000 { + reg = <0x00 0x50000000 0x00 0x8000000>; + device_type = "memory"; + }; + + platform-bus@c000000 { + interrupt-parent = <0x8002>; + ranges = <0x00 0x00 0xc000000 0x2000000>; + #address-cells = <0x01>; + #size-cells = <0x01>; + compatible = "qemu,platform\0simple-bus"; + }; + + fw-cfg@9020000 { + dma-coherent; + reg = <0x00 0x9020000 0x00 0x18>; + compatible = "qemu,fw-cfg-mmio"; + }; + + virtio_mmio@a000000 { + dma-coherent; + interrupts = <0x00 0x10 0x01>; + reg = <0x00 0xa000000 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a000200 { + dma-coherent; + interrupts = <0x00 0x11 0x01>; + reg = <0x00 0xa000200 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a000400 { + dma-coherent; + interrupts = <0x00 0x12 0x01>; + reg = <0x00 0xa000400 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a000600 { + dma-coherent; + interrupts = <0x00 0x13 0x01>; + reg = <0x00 0xa000600 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a000800 { + dma-coherent; + interrupts = <0x00 0x14 0x01>; + reg = <0x00 0xa000800 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a000a00 { + dma-coherent; + interrupts = <0x00 0x15 0x01>; + reg = <0x00 0xa000a00 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a000c00 { + dma-coherent; + interrupts = <0x00 0x16 0x01>; + reg = <0x00 0xa000c00 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a000e00 { + dma-coherent; + interrupts = <0x00 0x17 0x01>; + reg = <0x00 0xa000e00 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a001000 { + dma-coherent; + interrupts = <0x00 0x18 0x01>; + reg = <0x00 0xa001000 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a001200 { + dma-coherent; + interrupts = <0x00 0x19 0x01>; + reg = <0x00 0xa001200 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a001400 { + dma-coherent; + interrupts = <0x00 0x1a 0x01>; + reg = <0x00 0xa001400 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a001600 { + dma-coherent; + interrupts = <0x00 0x1b 0x01>; + reg = <0x00 0xa001600 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a001800 { + dma-coherent; + interrupts = <0x00 0x1c 0x01>; + reg = <0x00 0xa001800 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a001a00 { + dma-coherent; + interrupts = <0x00 0x1d 0x01>; + reg = <0x00 0xa001a00 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a001c00 { + dma-coherent; + interrupts = <0x00 0x1e 0x01>; + reg = <0x00 0xa001c00 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a001e00 { + dma-coherent; + interrupts = <0x00 0x1f 0x01>; + reg = <0x00 0xa001e00 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a002000 { + dma-coherent; + interrupts = <0x00 0x20 0x01>; + reg = <0x00 0xa002000 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a002200 { + dma-coherent; + interrupts = <0x00 0x21 0x01>; + reg = <0x00 0xa002200 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a002400 { + dma-coherent; + interrupts = <0x00 0x22 0x01>; + reg = <0x00 0xa002400 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a002600 { + dma-coherent; + interrupts = <0x00 0x23 0x01>; + reg = <0x00 0xa002600 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a002800 { + dma-coherent; + interrupts = <0x00 0x24 0x01>; + reg = <0x00 0xa002800 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a002a00 { + dma-coherent; + interrupts = <0x00 0x25 0x01>; + reg = <0x00 0xa002a00 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a002c00 { + dma-coherent; + interrupts = <0x00 0x26 0x01>; + reg = <0x00 0xa002c00 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a002e00 { + dma-coherent; + interrupts = <0x00 0x27 0x01>; + reg = <0x00 0xa002e00 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a003000 { + dma-coherent; + interrupts = <0x00 0x28 0x01>; + reg = <0x00 0xa003000 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a003200 { + dma-coherent; + interrupts = <0x00 0x29 0x01>; + reg = <0x00 0xa003200 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a003400 { + dma-coherent; + interrupts = <0x00 0x2a 0x01>; + reg = <0x00 0xa003400 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a003600 { + dma-coherent; + interrupts = <0x00 0x2b 0x01>; + reg = <0x00 0xa003600 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a003800 { + dma-coherent; + interrupts = <0x00 0x2c 0x01>; + reg = <0x00 0xa003800 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a003a00 { + dma-coherent; + interrupts = <0x00 0x2d 0x01>; + reg = <0x00 0xa003a00 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a003c00 { + dma-coherent; + interrupts = <0x00 0x2e 0x01>; + reg = <0x00 0xa003c00 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + virtio_mmio@a003e00 { + dma-coherent; + interrupts = <0x00 0x2f 0x01>; + reg = <0x00 0xa003e00 0x00 0x200>; + compatible = "virtio,mmio"; + }; + + gpio-keys { + compatible = "gpio-keys"; + + poweroff { + gpios = <0x8004 0x03 0x00>; + linux,code = <0x74>; + label = "GPIO Key Poweroff"; + }; + }; + + pl061@9030000 { + phandle = <0x8004>; + clock-names = "apb_pclk"; + clocks = <0x8000>; + interrupts = <0x00 0x07 0x04>; + gpio-controller; + #gpio-cells = <0x02>; + compatible = "arm,pl061\0arm,primecell"; + reg = <0x00 0x9030000 0x00 0x1000>; + }; + + pcie@10000000 { + interrupt-map-mask = <0x1800 0x00 0x00 0x07>; + interrupt-map = <0x00 0x00 0x00 0x01 0x8002 0x00 0x00 0x00 0x03 0x04 0x00 0x00 0x00 0x02 0x8002 0x00 0x00 0x00 0x04 0x04 0x00 0x00 0x00 0x03 0x8002 0x00 0x00 0x00 0x05 0x04 0x00 0x00 0x00 0x04 0x8002 0x00 0x00 0x00 0x06 0x04 0x800 0x00 0x00 0x01 0x8002 0x00 0x00 0x00 0x04 0x04 0x800 0x00 0x00 0x02 0x8002 0x00 0x00 0x00 0x05 0x04 0x800 0x00 0x00 0x03 0x8002 0x00 0x00 0x00 0x06 0x04 0x800 0x00 0x00 0x04 0x8002 0x00 0x00 0x00 0x03 0x04 0x1000 0x00 0x00 0x01 0x8002 0x00 0x00 0x00 0x05 0x04 0x1000 0x00 0x00 0x02 0x8002 0x00 0x00 0x00 0x06 0x04 0x1000 0x00 0x00 0x03 0x8002 0x00 0x00 0x00 0x03 0x04 0x1000 0x00 0x00 0x04 0x8002 0x00 0x00 0x00 0x04 0x04 0x1800 0x00 0x00 0x01 0x8002 0x00 0x00 0x00 0x06 0x04 0x1800 0x00 0x00 0x02 0x8002 0x00 0x00 0x00 0x03 0x04 0x1800 0x00 0x00 0x03 0x8002 0x00 0x00 0x00 0x04 0x04 0x1800 0x00 0x00 0x04 0x8002 0x00 0x00 0x00 0x05 0x04>; + #interrupt-cells = <0x01>; + ranges = <0x1000000 0x00 0x00 0x00 0x3eff0000 0x00 0x10000 0x2000000 0x00 0x10000000 0x00 0x10000000 0x00 0x2eff0000 0x3000000 0x80 0x00 0x80 0x00 0x80 0x00>; + reg = <0x40 0x10000000 0x00 0x10000000>; + msi-map = <0x00 0x8003 0x00 0x10000>; + dma-coherent; + bus-range = <0x00 0xff>; + linux,pci-domain = <0x00>; + #size-cells = <0x02>; + #address-cells = <0x03>; + device_type = "pci"; + compatible = "pci-host-ecam-generic"; + }; + + pl031@9010000 { + clock-names = "apb_pclk"; + clocks = <0x8000>; + interrupts = <0x00 0x02 0x04>; + reg = <0x00 0x9010000 0x00 0x1000>; + compatible = "arm,pl031\0arm,primecell"; + }; + + pl011@9000000 { + clock-names = "uartclk\0apb_pclk"; + clocks = <0x8000 0x8000>; + interrupts = <0x00 0x01 0x04>; + reg = <0x00 0x9000000 0x00 0x1000>; + compatible = "arm,pl011\0arm,primecell"; + }; + + pmu { + interrupts = <0x01 0x07 0x104>; + compatible = "arm,armv8-pmuv3"; + }; + + intc@8000000 { + phandle = <0x8002>; + reg = <0x00 0x8000000 0x00 0x10000 0x00 0x8010000 0x00 0x10000>; + compatible = "arm,cortex-a15-gic"; + ranges; + #size-cells = <0x02>; + #address-cells = <0x02>; + interrupt-controller; + #interrupt-cells = <0x03>; + + v2m@8020000 { + phandle = <0x8003>; + reg = <0x00 0x8020000 0x00 0x1000>; + msi-controller; + compatible = "arm,gic-v2m-frame"; + }; + }; + + flash@0 { + bank-width = <0x04>; + reg = <0x00 0x00 0x00 0x4000000 0x00 0x4000000 0x00 0x4000000>; + compatible = "cfi-flash"; + }; + + cpus { + #size-cells = <0x00>; + #address-cells = <0x01>; + + cpu-map { + + socket0 { + + cluster0 { + + core0 { + cpu = <0x8001>; + }; + }; + }; + }; + + cpu@0 { + phandle = <0x8001>; + reg = <0x00>; + compatible = "arm,cortex-a72"; + device_type = "cpu"; + }; + }; + + timer { + interrupts = <0x01 0x0d 0x104 0x01 0x0e 0x104 0x01 0x0b 0x104 0x01 0x0a 0x104>; + always-on; + compatible = "arm,armv8-timer\0arm,armv7-timer"; + }; + + apb-pclk { + phandle = <0x8000>; + clock-output-names = "clk24mhz"; + clock-frequency = <0x16e3600>; + #clock-cells = <0x00>; + compatible = "fixed-clock"; + }; + + chosen { + stdout-path = "/pl011@9000000"; + rng-seed = <0x136df250 0xc3dbcc39 0xbfeda3f3 0x38a7ac60 0x5721e082 0x54ed87f6 0x1e4e2740 0x46fa7831>; + kaslr-seed = <0x5c7c8563 0xad0c2481>; + }; +}; diff --git a/apps/hv/src/main.rs b/apps/hv/src/main.rs index 09d8e73ba8..fa4eb75633 100644 --- a/apps/hv/src/main.rs +++ b/apps/hv/src/main.rs @@ -15,7 +15,12 @@ use aarch64_config::*; use libax::{ hv::{ self, GuestPageTable, GuestPageTableTrait, HyperCraftHalImpl, PerCpu, - Result, VCpu, VmCpus, VM, VcpusArray, VM_ARRAY, VM_MAX_NUM, is_vcpu_init_ok, is_vcpu_primary_ok, init_vm_vcpu, add_vm, add_vm_vcpu, print_vm, run_vm_vcpu, + Result, VCpu, VmCpus, VM, VcpusArray, + VM_ARRAY, VM_MAX_NUM, + add_vm, add_vm_vcpu, get_vm, print_vm, + init_vm_vcpu, init_vm_emu_device, init_vm_passthrough_device, + is_vcpu_init_ok, is_vcpu_primary_ok, + run_vm_vcpu, }, info, }; @@ -70,19 +75,21 @@ fn main(hart_id: usize) { } #[cfg(target_arch = "aarch64")] { + let vm1_kernel_entry = 0x7020_0000; + let vm1_dtb = 0x7000_0000; + // boot cpu - PerCpu::::init(0); // change to pub const CPU_STACK_SIZE: usize = PAGE_SIZE * 128? + PerCpu::::init(0); // get current percpu let percpu = PerCpu::::ptr_for_cpu(hart_id); // create vcpu, need to change addr for aarch64! - let gpt = setup_gpm(0x7000_0000, 0x7020_0000).unwrap(); + let gpt = setup_gpm(vm1_dtb, vm1_kernel_entry).unwrap(); let vcpu = percpu.create_vcpu(0, 0).unwrap(); percpu.set_active_vcpu(Some(vcpu.clone())); let vcpus = VcpusArray::new(); // add vcpu into vm - // vcpus.add_vcpu(vcpu).unwrap(); let mut vm: VM = VM::new(vcpus, gpt, 0).unwrap(); unsafe { let mut vm_array = Vec::with_capacity(VM_MAX_NUM); @@ -92,11 +99,13 @@ fn main(hart_id: usize) { VM_ARRAY.init_by(vm_array); debug!("this is VM_ARRAY: {:p}", &VM_ARRAY as *const _); } + add_vm(0, vm); - // init_vm_vcpu(0, vcpu, 0x7020_0000, 0x7000_0000); let vcpu_id = vcpu.vcpu_id; add_vm_vcpu(0, vcpu); - init_vm_vcpu(0, vcpu_id, 0x7020_0000, 0x7000_0000); + init_vm_vcpu(0, vcpu_id, vm1_kernel_entry, vm1_dtb); + init_vm_emu_device(0); + init_vm_passthrough_device(0); run_vm_vcpu(0, 0); } @@ -126,29 +135,7 @@ fn main(hart_id: usize) { panic!("Other arch is not supported yet!") } } -/* -#[cfg(target_arch = "aarch64")] -#[no_mangle] -pub extern "C" fn secondary_main_hv(cpu_id: usize) { - // info!("before sleep cpu {}", cpu_id); - // thread::sleep(Duration::from_millis(1000)); - info!("Hello World from cpu {}", cpu_id); - while !is_vcpu_primary_ok() { - core::hint::spin_loop(); - } - PerCpu::::setup_this_cpu(cpu_id); - let percpu = PerCpu::::this_cpu(); - let vcpu = percpu.create_vcpu(0, 1).unwrap(); - percpu.set_active_vcpu(Some(vcpu.clone())); - add_vm_vcpu(0, vcpu); - while !is_vcpu_init_ok() { - core::hint::spin_loop(); - } - info!("vcpu {} init ok", cpu_id); - // run_vm_vcpu(0, 1); - // print_vm(0); -} -*/ + #[cfg(target_arch = "riscv64")] pub fn setup_gpm(dtb: usize) -> Result { let mut gpt = GuestPageTable::new()?; @@ -222,6 +209,35 @@ pub fn setup_gpm(dtb: usize) -> Result { Ok(gpt) } +#[cfg(target_arch = "aarch64")] +#[no_mangle] +pub extern "C" fn secondary_vm(cpu_id: usize) ->! { + while !is_vcpu_primary_ok() { + core::hint::spin_loop(); + } + let vm2_kernel_entry = 0x5020_0000; + let vm2_dtb = 0x5000_0000; + + PerCpu::::setup_this_cpu(cpu_id); + let percpu = PerCpu::::this_cpu(); + let vcpu = percpu.create_vcpu(1, 0).unwrap(); + // create vcpu, need to change addr for aarch64! + let gpt = setup_gpm(vm2_dtb, vm2_kernel_entry).unwrap(); + percpu.set_active_vcpu(Some(vcpu.clone())); + let vcpus = VcpusArray::new(); + // add vcpu into vm + // vcpus.add_vcpu(vcpu).unwrap(); + let mut vm: VM = VM::new(vcpus, gpt, 1).unwrap(); + + add_vm(1, vm); + let vcpu_id = vcpu.vcpu_id; + add_vm_vcpu(1, vcpu); + init_vm_vcpu(1, vcpu_id, vm2_kernel_entry, vm2_dtb); + init_vm_emu_device(1); + init_vm_passthrough_device(1); + + run_vm_vcpu(1, 0); +} #[cfg(target_arch = "aarch64")] pub fn setup_gpm(dtb: usize, kernel_entry: usize) -> Result { let mut gpt = GuestPageTable::new()?; @@ -244,7 +260,8 @@ pub fn setup_gpm(dtb: usize, kernel_entry: usize) -> Result { 0x4000, MappingFlags::READ | MappingFlags::WRITE | MappingFlags::USER, )?; - + debug!("map virtio"); + if let Some(pl011) = meta.pl011 { gpt.map_region( pl011.base_address, @@ -253,6 +270,7 @@ pub fn setup_gpm(dtb: usize, kernel_entry: usize) -> Result { MappingFlags::READ | MappingFlags::WRITE | MappingFlags::USER, )?; } + debug!("map pl011"); if let Some(pl031) = meta.pl031 { gpt.map_region( @@ -262,7 +280,7 @@ pub fn setup_gpm(dtb: usize, kernel_entry: usize) -> Result { MappingFlags::READ | MappingFlags::WRITE | MappingFlags::USER, )?; } - + debug!("map pl031"); if let Some(pl061) = meta.pl061 { gpt.map_region( pl061.base_address, @@ -271,7 +289,9 @@ pub fn setup_gpm(dtb: usize, kernel_entry: usize) -> Result { MappingFlags::READ | MappingFlags::WRITE | MappingFlags::USER, )?; } - + debug!("map pl061"); + + /* for intc in meta.intc.iter() { gpt.map_region( intc.base_address, @@ -280,7 +300,21 @@ pub fn setup_gpm(dtb: usize, kernel_entry: usize) -> Result { MappingFlags::READ | MappingFlags::WRITE | MappingFlags::USER, )?; } + */ + // map gicc to gicv. the address is qemu setting, it is different from real hardware + gpt.map_region( + 0x8010000, + 0x8040000, + 0x2000, + MappingFlags::READ | MappingFlags::WRITE | MappingFlags::USER, + )?; + gpt.map_region( + 0x8020000, + 0x8020000, + 0x10000, + MappingFlags::READ | MappingFlags::WRITE | MappingFlags::USER, + )?; if let Some(pcie) = meta.pcie { gpt.map_region( pcie.base_address, @@ -289,6 +323,7 @@ pub fn setup_gpm(dtb: usize, kernel_entry: usize) -> Result { MappingFlags::READ | MappingFlags::WRITE | MappingFlags::USER, )?; } + debug!("map pcie"); for flash in meta.flash.iter() { gpt.map_region( @@ -298,29 +333,31 @@ pub fn setup_gpm(dtb: usize, kernel_entry: usize) -> Result { MappingFlags::READ | MappingFlags::WRITE | MappingFlags::USER, )?; } + debug!("map flash"); info!( "physical memory: [{:#x}: {:#x})", meta.physical_memory_offset, meta.physical_memory_offset + meta.physical_memory_size ); - gpt.map_region( meta.physical_memory_offset, meta.physical_memory_offset, meta.physical_memory_size, MappingFlags::READ | MappingFlags::WRITE | MappingFlags::EXECUTE | MappingFlags::USER, )?; - let vaddr = 0x8000000014; + debug!("map physical memeory"); + + let vaddr = 0x8010000; let hpa = gpt.translate(vaddr)?; debug!("translate vaddr: {:#x}, hpa: {:#x}", vaddr, hpa); -/* + gpt.map_region( NIMBOS_KERNEL_BASE_VADDR, kernel_entry, meta.physical_memory_size, MappingFlags::READ | MappingFlags::WRITE | MappingFlags::EXECUTE | MappingFlags::USER, )?; -*/ + Ok(gpt) } diff --git a/crates/arm_gic/Cargo.toml b/crates/arm_gic/Cargo.toml index e5028c8ebd..06038a3921 100644 --- a/crates/arm_gic/Cargo.toml +++ b/crates/arm_gic/Cargo.toml @@ -13,3 +13,6 @@ documentation = "https://rcore-os.github.io/arceos/arm_gic/index.html" log = "0.4" tock-registers = "0.8" spin = { version = "0.9.4", features = ["use_ticket_mutex"] } + +[features] +hv = [] \ No newline at end of file diff --git a/crates/arm_gic/src/gic_v2.rs b/crates/arm_gic/src/gic_v2.rs index 4c6cea9bc9..cc075f06f6 100644 --- a/crates/arm_gic/src/gic_v2.rs +++ b/crates/arm_gic/src/gic_v2.rs @@ -4,7 +4,9 @@ use core::ptr::NonNull; -use crate::{TriggerMode, GIC_MAX_IRQ, SPI_RANGE, SGI_RANGE, GIC_LIST_REGS_NUM, GIC_CONFIG_BITS}; +use crate::{TriggerMode, GIC_CONFIG_BITS, GIC_LIST_REGS_NUM, GIC_MAX_IRQ, SGI_RANGE, SPI_RANGE, +GIC_PRIVATE_INT_NUM, GIC_SGIS_NUM, +}; use tock_registers::interfaces::{Readable, Writeable}; use tock_registers::register_structs; use tock_registers::registers::{ReadOnly, ReadWrite, WriteOnly}; @@ -80,7 +82,7 @@ register_structs! { } // #[cfg(feature = "hv")] -/*register_structs! { +register_structs! { /// GIC Hypervisor Interface registers #[allow(non_snake_case)] GicHypervisorInterfaceRegs { @@ -88,54 +90,56 @@ register_structs! { (0x0000 => HCR: ReadWrite), /// Virtual Type Register (0x0004 => VTR: ReadOnly), - (0x0008 => _reserved_1), + /// Virtual Machine Control Register + (0x0008 => VMCR: ReadWrite), + (0x000c => _reserved_0), /// Maintenance Interrupt Status Register (0x0010 => MISR: ReadOnly), - (0x0014 => reserve1), + (0x0014 => _reserved_1), /// End Interrupt Status Register (0x0020 => EISR: [ReadOnly; GIC_LIST_REGS_NUM / 32]), - (0x0028 => reserve2), + (0x0028 => _reserved_2), /// Empty List Register Status Register (0x0030 => ELRSR: [ReadOnly; GIC_LIST_REGS_NUM / 32]), - (0x0038 => reserve3), + (0x0038 => _reserved_3), /// Active Priorities Registers (0x00f0 => APR: ReadWrite), - (0x00f4 => reserve4), + (0x00f4 => _reserved_4), /// List Registers (0x0100 => LR: [ReadWrite; GIC_LIST_REGS_NUM]), - (0x0200 => reserve5), + (0x0200 => _reserved_5), (0x1000 => @END), } -}*/ +} + +// for debug usage register_structs! { #[allow(non_snake_case)] - GicHypervisorInterfaceRegs { - /// Hypervisor Control Register. - (0x0000 => HCR: ReadWrite), - /// VGIC Type Register. - (0x0004 => VTR: ReadOnly), - /// Virtual Machine Control Register. - (0x0008 => VMCR: ReadWrite), - (0x000c => _reserved_0), - // Maintenance Interrupt Status Register. - (0x0010 => MISR: ReadOnly), - (0x0014 => _reserved_1), - // End of Interrupt Status Registers 0 and 1. - (0x0020 => EISR0: ReadOnly), - (0x0024 => EISR1: ReadOnly), - (0x0028 => _reserved_2), - // Empty List Register Status Registers 0 and 1. - (0x0030 => ELSR0: ReadOnly), - (0x0034 => ELSR1: ReadOnly), - (0x0038 => _reserved_3), - // Active Priorities Register. - (0x00f0 => APR: ReadWrite), - (0x00f4 => _reserved_4), - // List Registers 0-63. - (0x0100 => LR: [ReadWrite; 0x40]), - (0x0200 => @END), + GicVcpuInterfaceRegs { + /// CPU Interface Control Register. + (0x0000 => CTLR: ReadWrite), + /// Interrupt Priority Mask Register. + (0x0004 => PMR: ReadWrite), + /// Binary Point Register. + (0x0008 => BPR: ReadWrite), + /// Interrupt Acknowledge Register. + (0x000c => IAR: ReadOnly), + /// End of Interrupt Register. + (0x0010 => EOIR: WriteOnly), + /// Running Priority Register. + (0x0014 => RPR: ReadOnly), + /// Highest Priority Pending Interrupt Register. + (0x0018 => HPPIR: ReadOnly), + (0x001c => _reserved_1), + /// CPU Interface Identification Register. + (0x00fc => IIDR: ReadOnly), + (0x0100 => _reserved_2), + /// Deactivate Interrupt Register. + (0x1000 => DIR: WriteOnly), + (0x1004 => @END), } } + /// The GIC distributor. /// /// The Distributor block performs interrupt prioritization and distribution @@ -196,6 +200,11 @@ pub struct GicHypervisorInterface { base: NonNull, } +#[derive(Debug, Clone)] +pub struct GicVcpuInterface { + base: NonNull, +} + unsafe impl Send for GicDistributor {} unsafe impl Sync for GicDistributor {} @@ -205,6 +214,9 @@ unsafe impl Sync for GicCpuInterface {} unsafe impl Send for GicHypervisorInterface {} unsafe impl Sync for GicHypervisorInterface {} +unsafe impl Send for GicVcpuInterface {} +unsafe impl Sync for GicVcpuInterface {} + impl GicDistributor { /// Construct a new GIC distributor instance from the base address. pub const fn new(base: *mut u8) -> Self { @@ -260,16 +272,22 @@ impl GicDistributor { self.regs().ICENABLER[reg].set(mask); } } + /// Enables or disables the given interrupt. + pub fn get_enable(&mut self, vector: usize) -> bool { + let reg = vector / 32; + let mask = 1 << (vector % 32); + self.regs().ISENABLER[reg].get() & mask != 0 + } /// Set SGIR for sgi int id and target cpu. - /* + /* pub fn set_sgi(&self, cpu_interface: usize, sgi_num: usize) { debug!("set sgi!!!!"); let int_id = (sgi_num & 0b1111) as u32; let cpu_targetlist = 1 << (16 + cpu_interface); self.regs().SGIR.set(cpu_targetlist | int_id); } - + pub fn send_sgi(&mut self, cpu_if: usize, sgi_num: usize) { debug!("send ipi to cpu {}", cpu_if); @@ -277,7 +295,12 @@ impl GicDistributor { } */ pub fn send_sgi(&mut self, cpu_if: usize, sgi_num: usize) { - debug!("send sgi {} with priority {:#x} to cpu {}", sgi_num, self.get_priority(sgi_num), cpu_if); + debug!( + "send sgi {} with priority {:#x} to cpu {}", + sgi_num, + self.get_priority(sgi_num), + cpu_if + ); // debug!("send sgi 2 with priority {:#x} to cpu {}", self.get_priority(2), cpu_if); let sgir = ((1 << (16 + cpu_if)) | (sgi_num & 0b1111)) as u32; debug!("this is sgir value: {:#x}", sgir); @@ -328,7 +351,8 @@ impl GicDistributor { let reg_idx = int_id / 4; let offset = (int_id % 4) * 8; if is_pend { - self.regs().SPENDSGIR[reg_idx].set(1 << (offset + current_cpu_id)); // get current cpu todo() + self.regs().SPENDSGIR[reg_idx].set(1 << (offset + current_cpu_id)); + // get current cpu todo() } else { self.regs().CPENDSGIR[reg_idx].set(0xff << offset); } @@ -389,7 +413,6 @@ impl GicDistributor { self.regs().ICFGR[reg_ind].set((icfgr & !mask) | (((cfg as u32) << off) & mask)); } - /// Provides information about the configuration of this Redistributor. /// Get typer register. pub fn get_typer(&self) -> u32 { @@ -401,19 +424,24 @@ impl GicDistributor { self.regs().IIDR.get() } - /// Initializes the GIC distributor. + pub fn print_prio(&self) { + for i in 0..256 { + debug!("prio {} is {:#x}", i, self.regs().IPRIORITYR[i].get()); + } + } + /// Initializes the GIC distributor globally. /// /// It disables all interrupts, sets the target of all SPIs to CPU 0, /// configures all SPIs to be edge-triggered, and finally enables the GICD. /// /// This function should be called only once. - pub fn init(&mut self) { + pub fn global_init(&mut self) { let max_irqs = self.max_irqs(); assert!(max_irqs <= GIC_MAX_IRQ); self.max_irqs = max_irqs; // Disable all interrupts - for i in (0..max_irqs).step_by(32) { + for i in (GIC_PRIVATE_INT_NUM..max_irqs).step_by(32) { self.regs().ICENABLER[i / 32].set(u32::MAX); self.regs().ICPENDR[i / 32].set(u32::MAX); self.regs().ICACTIVER[i / 32].set(u32::MAX); @@ -421,10 +449,12 @@ impl GicDistributor { if self.cpu_num() > 1 { for i in (SPI_RANGE.start..max_irqs).step_by(4) { // Set external interrupts to target cpu 0 + #[cfg(feature = "hv")] self.regs().IPRIORITYR[i / 4].set(u32::MAX); self.regs().ITARGETSR[i / 4].set(0x01_01_01_01); } } + #[cfg(not(feature = "hv"))] // Initialize all the SPIs to edge triggered for i in SPI_RANGE.start..max_irqs { self.configure_interrupt(i, TriggerMode::Edge); @@ -432,7 +462,36 @@ impl GicDistributor { // enable GIC0 let prev = self.regs().CTLR.get(); - self.regs().CTLR.set( prev | 1 ); + self.regs().CTLR.set(prev | 1); + } + + /// Initializes the GIC distributor locally. + /// + /// It disables and clear all sgi interrupts + /// configures all interrupts have lowest priority possible by default + /// + /// This function should be called every cpu init. + pub fn local_init(&mut self) { + let max_irqs = self.max_irqs(); + assert!(max_irqs <= GIC_MAX_IRQ); + self.max_irqs = max_irqs; + + // Disable all interrupts + for i in (0..GIC_PRIVATE_INT_NUM).step_by(32) { + self.regs().ICENABLER[i / 32].set(u32::MAX); + self.regs().ICPENDR[i / 32].set(u32::MAX); + self.regs().ICACTIVER[i / 32].set(u32::MAX); + } + // the corresponding GICD_CPENDSGIR register number, n, is given by n = x DIV 4 + // the SGI Clear-pending field offset, y, is given by y = x MOD 4 + for i in (0..GIC_SGIS_NUM).step_by(4) { + self.regs().CPENDSGIR[i / 4].set(u32::MAX); + } + + for i in (0..GIC_PRIVATE_INT_NUM).step_by(4) { + self.regs().IPRIORITYR[i / 4].set(u32::MAX); + } + } } @@ -448,12 +507,12 @@ impl GicCpuInterface { unsafe { self.base.as_ref() } } - // When interrupt priority drop is separated from interrupt deactivation, + // When interrupt priority drop is separated from interrupt deactivation, // a write to this register deactivates the specified interrupt. pub fn set_dir(&self, dir: u32) { self.regs().DIR.set(dir); } - + /// Returns the interrupt ID of the highest priority pending interrupt for /// the CPU interface. (read GICC_IAR) /// @@ -471,11 +530,11 @@ impl GicCpuInterface { pub fn set_eoi(&self, iar: u32) { self.regs().EOIR.set(iar); } - - /// Controls the CPU interface, including enabling of interrupt groups, - /// interrupt signal bypass, binary point registers used, and separation + + /// Controls the CPU interface, including enabling of interrupt groups, + /// interrupt signal bypass, binary point registers used, and separation /// of priority drop and interrupt deactivation. - /// Get or set CTLR. + /// Get or set CTLR. pub fn get_ctlr(&self) -> u32 { self.regs().CTLR.get() } @@ -515,10 +574,12 @@ impl GicCpuInterface { // enable GIC0 #[cfg(not(feature = "hv"))] self.regs().CTLR.set(1); - #[cfg(feature = "hv")] - // set EOImodeNS and EN bit for hypervisor - self.regs().CTLR.set(1); - //self.regs().CTLR.set(1| 0x200); + #[cfg(feature = "hv")] { + // EOImodeNS, bit [9] Controls the behavior of Non-secure accesses to GICC_EOIR GICC_AEOIR, and GICC_DIR + // EnableGrp0, bit [0] Enables the signaling of Group 0 interrupts by the CPU interface to a target PE: + self.regs().CTLR.set(1| 1 << 9); + } + // unmask interrupts at all priority levels self.regs().PMR.set(0xff); } @@ -545,15 +606,13 @@ impl GicHypervisorInterface { self.regs().HCR.set(hcr); } - // ELSR1 - pub fn get_elsr1(&self) -> u32 { - self.regs().ELSR1.get() + // Enables the hypervisor to save and restore the virtual machine view of the GIC state. + pub fn get_vmcr(&self) -> u32 { + self.regs().VMCR.get() } - // ELSR0 - pub fn get_elsr0(&self) -> u32 { - self.regs().ELSR0.get() + pub fn set_vmcr(&self, vmcr:u32) { + self.regs().VMCR.set(vmcr); } - // VTR: Indicates the number of implemented virtual priority bits and List registers. // VTR ListRegs, bits [4:0]: The number of implemented List registers, minus one. // Get ListRegs number. @@ -578,9 +637,9 @@ impl GicHypervisorInterface { self.regs().MISR.get() } - // APR: These registers track which preemption levels are active in the virtual CPU interface, - // and indicate the current active priority. Corresponding bits are set to 1 in this register - // when an interrupt is acknowledged, based on GICH_LR.Priority, and the least significant + // APR: These registers track which preemption levels are active in the virtual CPU interface, + // and indicate the current active priority. Corresponding bits are set to 1 in this register + // when an interrupt is acknowledged, based on GICH_LR.Priority, and the least significant // bit set is cleared on EOI. // Get or set APR. pub fn get_apr(&self) -> u32 { @@ -590,13 +649,70 @@ impl GicHypervisorInterface { self.regs().APR.set(apr); } + pub fn get_eisr_by_idx(&self, eisr_idx: usize) -> u32 { + self.regs().EISR[eisr_idx].get() + } + + pub fn get_elrsr_by_idx(&self, elsr_idx: usize) -> u32 { + self.regs().ELRSR[elsr_idx].get() + } + pub fn init(&self) { for i in 0..self.get_lrs_num() { self.set_lr_by_idx(i, 0); } - // LRENPIE, bit [2]: List Register Entry Not Present Interrupt Enable. - // When it set to 1, maintenance interrupt signaled while GICH_HCR.EOICount is not 0. - let hcr_prev = self.get_hcr(); - self.set_hcr(hcr_prev | (1 << 2) as u32); + // [9] VEM Alias of GICV_CTLR.EOImode. + self.set_vmcr(1 | 1 << 9); + // LRENPIE, bit [2]: List Register Entry Not Present Interrupt Enable. When it set to 1, maintenance interrupt signaled while GICH_HCR.EOICount is not 0. + let hcr_prev: u32 = self.get_hcr(); + self.set_hcr(hcr_prev | 1 as u32 | (1 << 2) as u32); // need to set bit 0????? [0] enable maintenance interrupt } } + +impl GicVcpuInterface { + /// Construct a new GIC VCPU interface instance from the base address. + pub const fn new(base: *mut u8) -> Self { + Self { + base: NonNull::new(base).unwrap().cast(), + } + } + + const fn regs(&self) -> &GicVcpuInterfaceRegs { + unsafe { self.base.as_ref() } + } + + // When interrupt priority drop is separated from interrupt deactivation, + // a write to this register deactivates the specified interrupt. + pub fn set_dir(&self, dir: u32) { + self.regs().DIR.set(dir); + } + + /// Returns the interrupt ID of the highest priority pending interrupt for + /// the CPU interface. (read GICC_IAR) + /// + /// The read returns a spurious interrupt ID of `1023` if the distributor + /// or the CPU interface are disabled, or there is no pending interrupt on + /// the CPU interface. + pub fn get_iar(&self) -> u32 { + self.regs().IAR.get() + } + + /// Informs the CPU interface that it has completed the processing of the + /// specified interrupt. (write GICC_EOIR) + /// + /// The value written must be the value returns from [`Self::iar`]. + pub fn set_eoi(&self, iar: u32) { + self.regs().EOIR.set(iar); + } + + /// Controls the CPU interface, including enabling of interrupt groups, + /// interrupt signal bypass, binary point registers used, and separation + /// of priority drop and interrupt deactivation. + /// Get or set CTLR. + pub fn get_ctlr(&self) -> u32 { + self.regs().CTLR.get() + } + pub fn set_ctlr(&self, ctlr: u32) { + self.regs().CTLR.set(ctlr); + } +} \ No newline at end of file diff --git a/crates/hypercraft b/crates/hypercraft index 2614e58ace..e3434c8898 160000 --- a/crates/hypercraft +++ b/crates/hypercraft @@ -1 +1 @@ -Subproject commit 2614e58ace46b7551aec285fbc8d16c82caeead9 +Subproject commit e3434c889804f28a53e48468babde073a0e0d3b1 diff --git a/modules/axhal/Cargo.toml b/modules/axhal/Cargo.toml index 4920946556..a7c34b7b54 100644 --- a/modules/axhal/Cargo.toml +++ b/modules/axhal/Cargo.toml @@ -25,7 +25,7 @@ platform-raspi4-aarch64 = [ "dep:page_table_entry", "dep:ratio", ] default = [] -hv = ["paging", "platform-qemu-virt-aarch64", "axconfig/hv", "percpu/hv"] +hv = ["paging", "platform-qemu-virt-aarch64", "axconfig/hv", "percpu/hv", "arm_gic/hv"] [dependencies] log = "0.4" @@ -63,7 +63,6 @@ tock-registers = "0.8" arm_gic = { path = "../../crates/arm_gic" } arm_pl011 = { path = "../../crates/arm_pl011" } cortex-a = "8.1.1" -smccc = "0.1.1" spin = "0.9" [build-dependencies] diff --git a/modules/axhal/src/arch/aarch64/hv/exception.rs b/modules/axhal/src/arch/aarch64/hv/exception.rs index 4c82c7b027..498e6fef69 100644 --- a/modules/axhal/src/arch/aarch64/hv/exception.rs +++ b/modules/axhal/src/arch/aarch64/hv/exception.rs @@ -9,16 +9,21 @@ // See the Mulan PSL v2 for more details. use core::arch::global_asm; -use tock_registers::interfaces::*; - use hypercraft::arch::ContextFrame; use hypercraft::arch::ContextFrameTrait; +use tock_registers::interfaces::*; +use super::exception_utils::*; use crate::platform::aarch64_common::gic::*; -use super::sync::{data_abort_handler, hvc_handler, smc_handler}; global_asm!(include_str!("exception.S")); +extern "C" { + fn data_abort_handler(ctx: &mut ContextFrame); + fn hvc_handler(ctx: &mut ContextFrame); + fn smc_handler(ctx: &mut ContextFrame); +} + #[repr(u8)] #[derive(Debug)] #[allow(dead_code)] @@ -39,148 +44,6 @@ enum TrapSource { LowerAArch32 = 3, } -#[inline(always)] -pub fn exception_esr() -> usize { - cortex_a::registers::ESR_EL2.get() as usize -} - -#[inline(always)] -pub fn exception_esr_el1() -> usize { - cortex_a::registers::ESR_EL1.get() as usize -} - -#[inline(always)] -fn exception_class() -> usize { - (exception_esr() >> 26) & 0b111111 -} - -#[inline(always)] -fn exception_far() -> usize { - cortex_a::registers::FAR_EL2.get() as usize -} - -#[inline(always)] -fn exception_hpfar() -> usize { - let hpfar: u64; - mrs!(hpfar, HPFAR_EL2); - hpfar as usize -} - -#[allow(non_upper_case_globals)] -const ESR_ELx_S1PTW_SHIFT: usize = 7; -#[allow(non_upper_case_globals)] -const ESR_ELx_S1PTW: usize = 1 << ESR_ELx_S1PTW_SHIFT; - -macro_rules! arm_at { - ($at_op:expr, $addr:expr) => { - unsafe { - core::arch::asm!(concat!("AT ", $at_op, ", {0}"), in(reg) $addr, options(nomem, nostack)); - core::arch::asm!("isb"); - } - }; -} - -fn translate_far_to_hpfar(far: usize) -> Result { - /* - * We have - * PAR[PA_Shift - 1 : 12] = PA[PA_Shift - 1 : 12] - * HPFAR[PA_Shift - 9 : 4] = FIPA[PA_Shift - 1 : 12] - */ - // #define PAR_TO_HPFAR(par) (((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8) - fn par_to_far(par: u64) -> u64 { - let mask = ((1 << (52 - 12)) - 1) << 12; - (par & mask) >> 8 - } - - use cortex_a::registers::PAR_EL1; - - let par = PAR_EL1.get(); - arm_at!("s1e1r", far); - let tmp = PAR_EL1.get(); - PAR_EL1.set(par); - if (tmp & PAR_EL1::F::TranslationAborted.value) != 0 { - Err(()) - } else { - Ok(par_to_far(tmp) as usize) - } -} - -// addr be ipa -#[inline(always)] -pub fn exception_fault_addr() -> usize { - let far = exception_far(); - let hpfar = if (exception_esr() & ESR_ELx_S1PTW) == 0 && exception_data_abort_is_permission_fault() { - translate_far_to_hpfar(far).unwrap_or_else(|_| { - info!("error happen in translate_far_to_hpfar"); - 0 - }) - } else { - exception_hpfar() - }; - (far & 0xfff) | (hpfar << 8) -} - -/// \return 1 means 32-bit instruction, 0 means 16-bit instruction -#[inline(always)] -fn exception_instruction_length() -> usize { - (exception_esr() >> 25) & 1 -} - -#[inline(always)] -pub fn exception_next_instruction_step() -> usize { - 2 + 2 * exception_instruction_length() -} - -#[inline(always)] -pub fn exception_iss() -> usize { - exception_esr() & ((1 << 25) - 1) -} - -#[inline(always)] -pub fn exception_data_abort_handleable() -> bool { - (!(exception_iss() & (1 << 10)) | (exception_iss() & (1 << 24))) != 0 -} - -#[inline(always)] -pub fn exception_data_abort_is_translate_fault() -> bool { - (exception_iss() & 0b111111 & (0xf << 2)) == 4 -} - -#[inline(always)] -pub fn exception_data_abort_is_permission_fault() -> bool { - (exception_iss() & 0b111111 & (0xf << 2)) == 12 -} - -#[inline(always)] -pub fn exception_data_abort_access_width() -> usize { - 1 << ((exception_iss() >> 22) & 0b11) -} - -#[inline(always)] -pub fn exception_data_abort_access_is_write() -> bool { - (exception_iss() & (1 << 6)) != 0 -} - -#[inline(always)] -pub fn exception_data_abort_access_in_stage2() -> bool { - (exception_iss() & (1 << 7)) != 0 -} - -#[inline(always)] -pub fn exception_data_abort_access_reg() -> usize { - (exception_iss() >> 16) & 0b11111 -} - -#[inline(always)] -pub fn exception_data_abort_access_reg_width() -> usize { - 4 + 4 * ((exception_iss() >> 15) & 1) -} - -#[inline(always)] -pub fn exception_data_abort_access_is_sign_ext() -> bool { - ((exception_iss() >> 21) & 1) != 0 -} - /// deal with invalid aarch64 synchronous exception #[no_mangle] fn invalid_exception_el2(tf: &mut ContextFrame, kind: TrapKind, source: TrapSource) { @@ -193,45 +56,44 @@ fn invalid_exception_el2(tf: &mut ContextFrame, kind: TrapKind, source: TrapSour /// deal with lower aarch64 interruption exception #[no_mangle] fn current_spxel_irq(ctx: &mut ContextFrame) { - debug!("[current_spxel_irq] "); + debug!("IRQ stay in the same el!!!!!!!!!!!!!!!"); lower_aarch64_irq(ctx); } /// deal with lower aarch64 interruption exception #[no_mangle] fn lower_aarch64_irq(ctx: &mut ContextFrame) { - debug!("IRQ routed to EL2"); + debug!("IRQ routed to EL2!!!!!!!!!!!!!!!"); + // read_timer_regs(); let (irq, src) = gicc_get_current_irq(); debug!("src {} id{}", src, irq); - crate::trap::handle_irq_extern_hv(irq, src); - // deactivate_irq(irq); - /* - if let Some(irq_id) = pending_irq() { - // deactivate_irq(irq_id); - inject_irq(irq_id); - } - */ + crate::trap::handle_irq_extern_hv(irq, src, ctx); } /// deal with lower aarch64 synchronous exception #[no_mangle] fn lower_aarch64_synchronous(ctx: &mut ContextFrame) { - debug!("enter lower_aarch64_synchronous exception class:0x{:X}", exception_class()); + debug!( + "enter lower_aarch64_synchronous exception class:0x{:X}", + exception_class() + ); // current_cpu().set_context_addr(ctx); match exception_class() { 0x24 => { // info!("Core[{}] data_abort_handler", cpu_id()); - data_abort_handler(ctx); + unsafe { + data_abort_handler(ctx); + } } - 0x16 => { + 0x16 => unsafe { hvc_handler(ctx); - } - 0x17 => { + }, + 0x17 => unsafe { smc_handler(ctx); - } + }, // 0x18 todo? - _ => { + _ => { panic!( "handler not presents for EC_{} @ipa 0x{:x}, @pc 0x{:x}, @esr 0x{:x}, @sctlr_el1 0x{:x}, @vttbr_el2 0x{:x}, @vtcr_el2: {:#x} hcr: {:#x} ctx:{}", exception_class(), @@ -244,6 +106,6 @@ fn lower_aarch64_synchronous(ctx: &mut ContextFrame) { cortex_a::registers::HCR_EL2.get() as usize, ctx ); - }, + } } } diff --git a/modules/axhal/src/arch/aarch64/hv/exception_utils.rs b/modules/axhal/src/arch/aarch64/hv/exception_utils.rs new file mode 100644 index 0000000000..0bf943559c --- /dev/null +++ b/modules/axhal/src/arch/aarch64/hv/exception_utils.rs @@ -0,0 +1,162 @@ +use tock_registers::interfaces::*; + +#[inline(always)] +pub fn exception_esr() -> usize { + cortex_a::registers::ESR_EL2.get() as usize +} + +#[inline(always)] +pub fn exception_esr_el1() -> usize { + cortex_a::registers::ESR_EL1.get() as usize +} + +#[inline(always)] +pub fn exception_class() -> usize { + (exception_esr() >> 26) & 0b111111 +} + +#[inline(always)] +fn exception_far() -> usize { + cortex_a::registers::FAR_EL2.get() as usize +} + +#[inline(always)] +fn exception_hpfar() -> usize { + let hpfar: u64; + mrs!(hpfar, HPFAR_EL2); + hpfar as usize +} + +pub fn read_timer_regs() { + let cntvoff_el2: u64; + let cntv_cval_el0: u64; + let cntvct_el0: u64; + let cntpct_el0: u64; + let cntv_ctl_el0: u64; + mrs!(cntvoff_el2, CNTVOFF_EL2); + mrs!(cntv_cval_el0, CNTV_CVAL_EL0); + mrs!(cntvct_el0, CNTVCT_EL0); + mrs!(cntpct_el0, CNTPCT_EL0); + mrs!(cntv_ctl_el0, CNTV_CTL_EL0); + debug!("!!!!!!!!!!!!!!!!!!!!!!!this is cntv_cval_el0:{:#x}", cntv_cval_el0); + debug!("!!!!!!!!!!!!!!!!!!!!!!!this is cntv_ctl_el0:{:#x}", cntv_ctl_el0); + debug!("!!!!!!!!!!!!!!!!!!!!!this is cntvct_el0:{:#x}", cntvct_el0); + debug!("!!!!!!!!!!!!!!!!!this is cntvoff_el2:{:#x}", cntvoff_el2); + debug!("!!!!!!!!!!!!!!!!!!!!this is cntpct_el0:{:#x}", cntpct_el0); + +} +#[allow(non_upper_case_globals)] +const ESR_ELx_S1PTW_SHIFT: usize = 7; +#[allow(non_upper_case_globals)] +const ESR_ELx_S1PTW: usize = 1 << ESR_ELx_S1PTW_SHIFT; + +macro_rules! arm_at { + ($at_op:expr, $addr:expr) => { + unsafe { + core::arch::asm!(concat!("AT ", $at_op, ", {0}"), in(reg) $addr, options(nomem, nostack)); + core::arch::asm!("isb"); + } + }; +} + +fn translate_far_to_hpfar(far: usize) -> Result { + /* + * We have + * PAR[PA_Shift - 1 : 12] = PA[PA_Shift - 1 : 12] + * HPFAR[PA_Shift - 9 : 4] = FIPA[PA_Shift - 1 : 12] + */ + // #define PAR_TO_HPFAR(par) (((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8) + fn par_to_far(par: u64) -> u64 { + let mask = ((1 << (52 - 12)) - 1) << 12; + (par & mask) >> 8 + } + + use cortex_a::registers::PAR_EL1; + + let par = PAR_EL1.get(); + arm_at!("s1e1r", far); + let tmp = PAR_EL1.get(); + PAR_EL1.set(par); + if (tmp & PAR_EL1::F::TranslationAborted.value) != 0 { + Err(()) + } else { + Ok(par_to_far(tmp) as usize) + } +} + +// addr be ipa +#[inline(always)] +pub fn exception_fault_addr() -> usize { + let far = exception_far(); + let hpfar = + if (exception_esr() & ESR_ELx_S1PTW) == 0 && exception_data_abort_is_permission_fault() { + translate_far_to_hpfar(far).unwrap_or_else(|_| { + info!("error happen in translate_far_to_hpfar"); + 0 + }) + } else { + exception_hpfar() + }; + (far & 0xfff) | (hpfar << 8) +} + +/// return 1 means 32-bit instruction, 0 means 16-bit instruction +#[inline(always)] +fn exception_instruction_length() -> usize { + (exception_esr() >> 25) & 1 +} + +#[inline(always)] +pub fn exception_next_instruction_step() -> usize { + 2 + 2 * exception_instruction_length() +} + +#[inline(always)] +pub fn exception_iss() -> usize { + exception_esr() & ((1 << 25) - 1) +} + +#[inline(always)] +pub fn exception_data_abort_handleable() -> bool { + (!(exception_iss() & (1 << 10)) | (exception_iss() & (1 << 24))) != 0 +} + +#[inline(always)] +pub fn exception_data_abort_is_translate_fault() -> bool { + (exception_iss() & 0b111111 & (0xf << 2)) == 4 +} + +#[inline(always)] +pub fn exception_data_abort_is_permission_fault() -> bool { + (exception_iss() & 0b111111 & (0xf << 2)) == 12 +} + +#[inline(always)] +pub fn exception_data_abort_access_width() -> usize { + 1 << ((exception_iss() >> 22) & 0b11) +} + +#[inline(always)] +pub fn exception_data_abort_access_is_write() -> bool { + (exception_iss() & (1 << 6)) != 0 +} + +#[inline(always)] +pub fn exception_data_abort_access_in_stage2() -> bool { + (exception_iss() & (1 << 7)) != 0 +} + +#[inline(always)] +pub fn exception_data_abort_access_reg() -> usize { + (exception_iss() >> 16) & 0b11111 +} + +#[inline(always)] +pub fn exception_data_abort_access_reg_width() -> usize { + 4 + 4 * ((exception_iss() >> 15) & 1) +} + +#[inline(always)] +pub fn exception_data_abort_access_is_sign_ext() -> bool { + ((exception_iss() >> 21) & 1) != 0 +} diff --git a/modules/axhal/src/arch/aarch64/hv/guest_psci.rs b/modules/axhal/src/arch/aarch64/hv/guest_psci.rs deleted file mode 100644 index 6b6af10c7f..0000000000 --- a/modules/axhal/src/arch/aarch64/hv/guest_psci.rs +++ /dev/null @@ -1,100 +0,0 @@ -use smccc::psci::*; - -use super::ipi::*; - -const PSCI_RET_SUCCESS: usize = 0; -const PSCI_RET_NOT_SUPPORTED: usize = 0xffff_ffff_ffff_ffff; //-1 -const PSCI_RET_INVALID_PARAMS: usize = 0xffff_ffff_ffff_fffe; // -2 -const PSCI_RET_ALREADY_ON: usize = 0xffff_ffff_ffff_fffc; // -4 - -const PSCI_TOS_NOT_PRESENT_MP: usize = 2; - -#[inline(never)] -pub fn smc_guest_handler( - fid: usize, - x1: usize, - x2: usize, - x3: usize, -) -> Result { - debug!( - "smc_guest_handler: fid {:#x}, x1 {:#x}, x2 {:#x}, x3 {:#x}", - fid, x1, x2, x3 - ); - let r = match fid as u32 { - PSCI_FEATURES => match x1 as u32 { - PSCI_VERSION | PSCI_CPU_ON_64 | PSCI_FEATURES => Ok(PSCI_RET_SUCCESS), - // | PSCI_CPU_SUSPEND_64| PSCI_SYSTEM_SUSPEND_64 - // | PSCI_SYSTEM_RESET2_64 => Ok(PSCI_RET_SUCCESS), - _ => Ok(PSCI_RET_NOT_SUPPORTED), - }, - PSCI_VERSION => Ok(smc_call(PSCI_VERSION, 0, 0, 0).0), - PSCI_CPU_ON_64 => Ok(psci_guest_cpu_on(x1, x2, x3)), - /* - PSCI_CPU_ON_64 => { - // unsafe { - // run_vm_vcpu(0, 1); - // } - - let smc_ret = smc_call(PSCI_CPU_ON_64, x1, x2, x3).0; - if smc_ret == 0 { - Ok(0) - }else { - // todo(); - Ok(0) - } - } - */ - // PSCI_SYSTEM_RESET => psci_guest_sys_reset(), - // PSCI_SYSTEM_RESET => Ok(smc_call(PSCI_SYSTEM_RESET, 0, 0, 0).0), - // PSCI_SYSTEM_OFF => psci_guest_sys_off(), - PSCI_SYSTEM_OFF => Ok(smc_call(PSCI_SYSTEM_OFF, 0, 0, 0).0), - PSCI_MIGRATE_INFO_TYPE => Ok(PSCI_TOS_NOT_PRESENT_MP), - PSCI_AFFINITY_INFO_64 => Ok(0), - _ => Err(()), - }; - debug!( - "smc_guest_handler: fid {:#x}, x1 {:#x}, x2 {:#x}, x3 {:#x} result: {:#x}", - fid, x1, x2, x3, r.unwrap(), - ); - r -} - -fn psci_guest_cpu_on(mpidr: usize, entry: usize, ctx: usize) -> usize { - debug!("this is vcpu id {}, entry:{:#x} ctx:{:#x}", mpidr, entry, ctx); - let pcpu_id = mpidr & 0xff; // vcpu and pcpu id are the same - let m = IpiPowerMessage { - src: 0, //vm id - event: PowerEvent::PsciIpiCpuOn, - entry, - context: ctx, - }; - - if !ipi_send_msg(pcpu_id, IpiType::Power, IpiInnerMsg::Power(m)) { - warn!("psci_guest_cpu_on: fail to send msg"); - return usize::MAX - 1; - } - - 0 -} -#[inline(never)] -pub fn smc_call(x0: u32, x1: usize, x2: usize, x3: usize) -> (usize, usize, usize, usize) { - #[cfg(target_arch = "aarch64")] - unsafe { - let r0; - let r1; - let r2; - let r3; - core::arch::asm!( - "smc #0", - inout("x0") x0 as usize => r0, - inout("x1") x1 => r1, - inout("x2") x2 => r2, - inout("x3") x3 => r3, - options(nomem, nostack) - ); - (r0, r1, r2, r3) - } - - #[cfg(not(target_arch = "aarch64"))] - error!("smc not supported"); -} diff --git a/modules/axhal/src/arch/aarch64/hv/mod.rs b/modules/axhal/src/arch/aarch64/hv/mod.rs index a5a2d6a1a7..38b4b1f220 100644 --- a/modules/axhal/src/arch/aarch64/hv/mod.rs +++ b/modules/axhal/src/arch/aarch64/hv/mod.rs @@ -1,4 +1,5 @@ mod exception; -mod sync; -mod guest_psci; -pub mod ipi; +pub mod exception_utils; + +// mod sync; +// mod guest_psci; diff --git a/modules/axhal/src/arch/aarch64/mod.rs b/modules/axhal/src/arch/aarch64/mod.rs index 98bcdbf99b..3d735b9d53 100644 --- a/modules/axhal/src/arch/aarch64/mod.rs +++ b/modules/axhal/src/arch/aarch64/mod.rs @@ -35,6 +35,7 @@ pub fn irqs_enabled() -> bool { /// It must be called with interrupts enabled, otherwise it will never return. #[inline] pub fn wait_for_irqs() { + debug!("wait for irqqqqqqqqqqq"); aarch64_cpu::asm::wfi(); } diff --git a/modules/axhal/src/lib.rs b/modules/axhal/src/lib.rs index 2fbc75d672..dd3749123c 100644 --- a/modules/axhal/src/lib.rs +++ b/modules/axhal/src/lib.rs @@ -43,9 +43,12 @@ extern crate hypercraft; mod platform; #[cfg(all(target_arch = "aarch64", feature = "hv"))] -pub use platform::aarch64_common::gic::IPI_IRQ_NUM; -#[cfg(all(target_arch = "aarch64", feature = "hv"))] -pub use platform::aarch64_common::gic::{gicc_get_current_irq, deactivate_irq}; +pub use platform::aarch64_common::gic::{ + gicc_get_current_irq, deactivate_irq, interrupt_cpu_ipi_send, + gic_is_priv, gic_lrs, gicc_clear_current_irq, gicv_clear_current_irq, + GICH, GICD, GICV, GICC, GICD_BASE, GIC_SPI_MAX, + IPI_IRQ_NUM, MAINTENANCE_IRQ_NUM, +}; pub mod arch; pub mod cpu; diff --git a/modules/axhal/src/platform/aarch64_common/boot.rs b/modules/axhal/src/platform/aarch64_common/boot.rs index d4933ab1ef..2793c2c229 100644 --- a/modules/axhal/src/platform/aarch64_common/boot.rs +++ b/modules/axhal/src/platform/aarch64_common/boot.rs @@ -15,6 +15,7 @@ static mut BOOT_PT_L0: [A64PTE; 512] = [A64PTE::empty(); 512]; #[link_section = ".data.boot_page_table"] static mut BOOT_PT_L1: [A64PTE; 512] = [A64PTE::empty(); 512]; +#[cfg(not(all(target_arch = "aarch64", feature = "hv")))] unsafe fn switch_to_el1() { SPSel.write(SPSel::SP::ELx); SP_EL0.set(0); @@ -58,9 +59,48 @@ unsafe fn switch_to_el1() { } } +#[cfg(not(all(target_arch = "aarch64", feature = "hv")))] +unsafe fn init_mmu() { + // Device-nGnRE memory + let attr0 = MAIR_EL1::Attr0_Device::nonGathering_nonReordering_EarlyWriteAck; + // Normal memory + let attr1 = MAIR_EL1::Attr1_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc + + MAIR_EL1::Attr1_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc; + MAIR_EL1.write(attr0 + attr1); // 0xff_04 + + // Enable TTBR0 and TTBR1 walks, page size = 4K, vaddr size = 48 bits, paddr size = 40 bits. + let tcr_flags0 = TCR_EL1::EPD0::EnableTTBR0Walks + + TCR_EL1::TG0::KiB_4 + + TCR_EL1::SH0::Inner + + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::T0SZ.val(16); + let tcr_flags1 = TCR_EL1::EPD1::EnableTTBR1Walks + + TCR_EL1::TG1::KiB_4 + + TCR_EL1::SH1::Inner + + TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable + + TCR_EL1::T1SZ.val(16); + TCR_EL1.write(TCR_EL1::IPS::Bits_48 + tcr_flags0 + tcr_flags1); + barrier::isb(barrier::SY); + + // Set both TTBR0 and TTBR1 + let root_paddr = PhysAddr::from(BOOT_PT_L0.as_ptr() as usize).as_usize() as _; + TTBR0_EL1.set(root_paddr); + TTBR1_EL1.set(root_paddr); + + // Flush the entire TLB + crate::arch::flush_tlb(None); + + // Enable the MMU and turn on I-cache and D-cache + SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable); + barrier::isb(barrier::SY); +} + unsafe fn switch_to_el2() { SPSel.write(SPSel::SP::ELx); let current_el = CurrentEL.read(CurrentEL::EL); + if current_el == 3 { SCR_EL3.write( SCR_EL3::NS::NonSecure + SCR_EL3::HCE::HvcEnabled + SCR_EL3::RW::NextELIsAarch64, @@ -131,43 +171,6 @@ unsafe fn init_mmu_el2() { barrier::isb(barrier::SY); } -unsafe fn init_mmu() { - // Device-nGnRE memory - let attr0 = MAIR_EL1::Attr0_Device::nonGathering_nonReordering_EarlyWriteAck; - // Normal memory - let attr1 = MAIR_EL1::Attr1_Normal_Inner::WriteBack_NonTransient_ReadWriteAlloc - + MAIR_EL1::Attr1_Normal_Outer::WriteBack_NonTransient_ReadWriteAlloc; - MAIR_EL1.write(attr0 + attr1); // 0xff_04 - - // Enable TTBR0 and TTBR1 walks, page size = 4K, vaddr size = 48 bits, paddr size = 40 bits. - let tcr_flags0 = TCR_EL1::EPD0::EnableTTBR0Walks - + TCR_EL1::TG0::KiB_4 - + TCR_EL1::SH0::Inner - + TCR_EL1::ORGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable - + TCR_EL1::IRGN0::WriteBack_ReadAlloc_WriteAlloc_Cacheable - + TCR_EL1::T0SZ.val(16); - let tcr_flags1 = TCR_EL1::EPD1::EnableTTBR1Walks - + TCR_EL1::TG1::KiB_4 - + TCR_EL1::SH1::Inner - + TCR_EL1::ORGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable - + TCR_EL1::IRGN1::WriteBack_ReadAlloc_WriteAlloc_Cacheable - + TCR_EL1::T1SZ.val(16); - TCR_EL1.write(TCR_EL1::IPS::Bits_48 + tcr_flags0 + tcr_flags1); - barrier::isb(barrier::SY); - - // Set both TTBR0 and TTBR1 - let root_paddr = PhysAddr::from(BOOT_PT_L0.as_ptr() as usize).as_usize() as _; - TTBR0_EL1.set(root_paddr); - TTBR1_EL1.set(root_paddr); - - // Flush the entire TLB - crate::arch::flush_tlb(None); - - // Enable the MMU and turn on I-cache and D-cache - SCTLR_EL1.modify(SCTLR_EL1::M::Enable + SCTLR_EL1::C::Cacheable + SCTLR_EL1::I::Cacheable); - barrier::isb(barrier::SY); -} - unsafe fn enable_fp() { if cfg!(feature = "fp_simd") { CPACR_EL1.write(CPACR_EL1::FPEN::TrapNothing); diff --git a/modules/axhal/src/platform/aarch64_common/generic_timer.rs b/modules/axhal/src/platform/aarch64_common/generic_timer.rs index ef238a8702..f144d9cc1d 100644 --- a/modules/axhal/src/platform/aarch64_common/generic_timer.rs +++ b/modules/axhal/src/platform/aarch64_common/generic_timer.rs @@ -28,7 +28,7 @@ pub fn nanos_to_ticks(nanos: u64) -> u64 { /// Set a one-shot timer. /// /// A timer interrupt will be triggered at the given deadline (in nanoseconds). -#[cfg(feature = "irq")] +#[cfg(all(feature = "irq", not(feature = "hv")))] pub fn set_oneshot_timer(deadline_ns: u64) { let cnptct = CNTPCT_EL0.get(); let cnptct_deadline = nanos_to_ticks(deadline_ns); @@ -41,6 +41,19 @@ pub fn set_oneshot_timer(deadline_ns: u64) { } } +#[cfg(all(feature = "irq", feature = "hv"))] +pub fn set_oneshot_timer(deadline_ns: u64) { + let cnptct = CNTPCT_EL0.get(); + let cnptct_deadline = nanos_to_ticks(deadline_ns); + if cnptct < cnptct_deadline { + let interval = cnptct_deadline - cnptct; + debug_assert!(interval <= u32::MAX as u64); + msr!(CNTHP_TVAL_EL2, interval as u64); + } else { + msr!(CNTHP_TVAL_EL2, 0); + } +} + /// Early stage initialization: stores the timer frequency. pub(crate) fn init_early() { let freq = CNTFRQ_EL0.get(); @@ -51,13 +64,19 @@ pub(crate) fn init_early() { } pub(crate) fn init_percpu() { - #[cfg(feature = "irq")] + #[cfg(all(feature = "irq", not(feature = "hv")))] { CNTP_CTL_EL0.write(CNTP_CTL_EL0::ENABLE::SET); CNTP_TVAL_EL0.set(0); - #[cfg(not(feature = "hv"))] crate::platform::irq::set_enable(crate::platform::irq::TIMER_IRQ_NUM, true); - #[cfg(feature = "hv")] - crate::platform::irq::set_enable(crate::platform::irq::TIMER_IRQ_NUM, false); + } + #[cfg(feature = "hv")] + { + let ctl = 1; + let tval = 0; + msr!(CNTHP_CTL_EL2, ctl); + msr!(CNTHP_TVAL_EL2, tval); + // crate::platform::irq::set_enable(crate::platform::irq::HYPERVISOR_TIMER_IRQ_NUM, true); + } } diff --git a/modules/axhal/src/platform/aarch64_common/gic.rs b/modules/axhal/src/platform/aarch64_common/gic.rs index 0ddef49d2f..e09b0a621e 100644 --- a/modules/axhal/src/platform/aarch64_common/gic.rs +++ b/modules/axhal/src/platform/aarch64_common/gic.rs @@ -1,13 +1,19 @@ use crate::{irq::IrqHandler, mem::phys_to_virt}; -use arm_gic::gic_v2::{GicCpuInterface, GicDistributor, GicHypervisorInterface}; +use arm_gic::gic_v2::{ + GicCpuInterface, GicDistributor, GicHypervisorInterface, GicVcpuInterface +}; +use arm_gic::{GIC_SGIS_NUM, GIC_PRIVATE_INT_NUM}; use memory_addr::PhysAddr; use spinlock::SpinNoIrq; - -#[cfg(feature = "hv")] -use hypercraft::arch::utils::bit_extract; +use spin::Mutex; /// The maximum number of IRQs. pub const MAX_IRQ_COUNT: usize = 1024; +#[cfg(feature = "hv")] +pub const GIC_SPI_MAX: usize = MAX_IRQ_COUNT - GIC_PRIVATE_INT_NUM; + +#[cfg(feature = "hv")] +use hypercraft::arch::utils::bit_extract; /// The timer IRQ number. pub const TIMER_IRQ_NUM: usize = 30; // physical timer, type=PPI, id=14 @@ -24,21 +30,12 @@ pub const IPI_IRQ_NUM: usize = 1; /// The maintenance interrupt irq number. pub const MAINTENANCE_IRQ_NUM: usize = 25; -pub const GIC_SGIS_NUM: usize = 16; - -const GICD_BASE: PhysAddr = PhysAddr::from(axconfig::GICD_PADDR); +pub const GICD_BASE: PhysAddr = PhysAddr::from(axconfig::GICD_PADDR); const GICC_BASE: PhysAddr = PhysAddr::from(axconfig::GICC_PADDR); #[cfg(feature = "hv")] const GICH_BASE: PhysAddr = PhysAddr::from(axconfig::GICH_PADDR); - -#[cfg(feature = "hv")] -const LR_VIRTIRQ_MASK: usize = 0x3ff; -#[cfg(feature = "hv")] -const LR_PENDING_BIT: u32 = 1 << 28; #[cfg(feature = "hv")] -const LR_PHYSIRQ_MASK: usize = 0x3ff << 10; -#[cfg(feature = "hv")] -const LR_HW_BIT: u32 = 1 << 31; +const GICV_BASE: PhysAddr = PhysAddr::from(0x8040000); pub static GICD: SpinNoIrq = SpinNoIrq::new(GicDistributor::new(phys_to_virt(GICD_BASE).as_mut_ptr())); @@ -49,11 +46,19 @@ pub static GICC: GicCpuInterface = GicCpuInterface::new(phys_to_virt(GICC_BASE). #[cfg(feature = "hv")] pub static GICH: GicHypervisorInterface = GicHypervisorInterface::new(phys_to_virt(GICH_BASE).as_mut_ptr()); +#[cfg(feature = "hv")] +pub static GICV: GicVcpuInterface = GicVcpuInterface::new(phys_to_virt(GICV_BASE).as_mut_ptr()); + +#[cfg(feature = "hv")] +pub static GIC_LRS_NUM: Mutex = Mutex::new(0); + /// Enables or disables the given IRQ. pub fn set_enable(irq_num: usize, enabled: bool) { debug!("in platform gic set_enable: irq_num {}, enabled {}", irq_num, enabled); - // #[cfg(not(feature = "hv"))] GICD.lock().set_enable(irq_num as _, enabled); + + #[cfg(feature = "hv")] + GICD.lock().set_priority(irq_num as _, 0x7f); /* #[cfg(feature = "hv")] { @@ -92,19 +97,32 @@ pub fn dispatch_irq(irq_num: usize) { /// Initializes GICD, GICC on the primary CPU. pub(crate) fn init_primary() { info!("Initialize GICv2..."); - GICD.lock().init(); - GICC.init(); - #[cfg(feature = "hv")] - { - // GICH.init(); - } + // GICD.lock().init(); + // GICC.init(); + gic_global_init(); + gic_local_init(); } /// Initializes GICC on secondary CPUs. #[cfg(feature = "smp")] pub(crate) fn init_secondary() { info!("Initialize init_secondary GICv2..."); + // GICC.init(); + gic_local_init(); +} + +fn gic_global_init() { + set_gic_lrs(GICH.get_lrs_num()); + GICD.lock().global_init(); +} + +fn gic_local_init() { + GICD.lock().local_init(); GICC.init(); + #[cfg(feature = "hv")] + GICH.init(); + + let ctlr = GICC.get_ctlr(); } #[cfg(feature = "hv")] @@ -144,43 +162,50 @@ pub fn deactivate_irq(iar: usize) { GICC.set_eoi(iar as _); } +pub fn gic_is_priv(int_id: usize) -> bool { + int_id < GIC_PRIVATE_INT_NUM +} + +pub fn gic_lrs() -> usize { + *GIC_LRS_NUM.lock() +} + +pub fn set_gic_lrs(lrs: usize) { + let mut gic_lrs = GIC_LRS_NUM.lock(); + *gic_lrs = lrs; +} + #[cfg(feature = "hv")] -pub fn inject_irq(irq_id: usize) { - let elsr: u64 = (GICH.get_elsr1() as u64) << 32 | GICH.get_elsr0() as u64; - let lr_num = GICH.get_lrs_num(); - let mut lr_idx = -1 as isize; - for i in 0..lr_num { - if (1 << i) & elsr > 0 { - if lr_idx == -1 { - lr_idx = i as isize; - } - continue; - } - // overlap - let _lr_val = GICH.get_lr_by_idx(i) as usize; - if (i & LR_VIRTIRQ_MASK) == irq_id { - return; - } - } - debug!("To Inject IRQ {:#x}, find lr {}", irq_id, lr_idx); - if lr_idx == -1 { +pub fn gicc_clear_current_irq(irq:usize, for_hypervisor: bool) { + debug!("gicc_clear_current_irq: irq {}, for_hypervisor {}", irq, for_hypervisor); + if irq == 0 { return; - } else { - let mut val = 0; - - val = irq_id as u32; - val |= LR_PENDING_BIT; - - if false - /* sgi */ - { - todo!() - } else { - val |= ((irq_id << 10) & LR_PHYSIRQ_MASK) as u32; - val |= LR_HW_BIT; - } - - debug!("To write lr {:#x} val {:#x}", lr_idx, val); - GICH.set_lr_by_idx(lr_idx as usize, val); + } + GICC.set_eoi(irq as _); + if for_hypervisor { + // let addr = 0x08010000 + 0x1000; + // unsafe { + // let gicc_dir = addr as *mut u32; + // *gicc_dir = irq; + // } + GICC.set_dir(irq as _); } } + +#[cfg(feature = "hv")] +pub fn gicv_clear_current_irq(irq:usize, for_hypervisor: bool) { + debug!("gicv_clear_current_irq: irq {}, for_hypervisor {}", irq, for_hypervisor); + if irq == 0 { + return; + } + GICV.set_eoi(irq as _); + + if for_hypervisor { + // let addr = 0x08010000 + 0x1000; + // unsafe { + // let gicc_dir = addr as *mut u32; + // *gicc_dir = irq; + // } + GICV.set_dir(irq as _); + } +} \ No newline at end of file diff --git a/modules/axhal/src/time.rs b/modules/axhal/src/time.rs index f5c7fa8676..a945387cb3 100644 --- a/modules/axhal/src/time.rs +++ b/modules/axhal/src/time.rs @@ -14,6 +14,9 @@ pub use crate::platform::irq::TIMER_IRQ_NUM; pub use crate::platform::time::set_oneshot_timer; pub use crate::platform::time::{current_ticks, nanos_to_ticks, ticks_to_nanos}; +#[cfg(all(feature = "irq", feature = "hv", target_arch = "aarch64"))] +pub use crate::platform::irq::HYPERVISOR_TIMER_IRQ_NUM; + /// Number of milliseconds in a second. pub const MILLIS_PER_SEC: u64 = 1_000; /// Number of microseconds in a second. diff --git a/modules/axhal/src/trap.rs b/modules/axhal/src/trap.rs index ac77c47285..8e8822d9e3 100644 --- a/modules/axhal/src/trap.rs +++ b/modules/axhal/src/trap.rs @@ -2,6 +2,9 @@ use crate_interface::{call_interface, def_interface}; +#[cfg(all(feature = "hv", target_arch = "aarch64"))] +use hypercraft::arch::ContextFrame; + /// Trap handler interface. /// /// This trait is defined with the [`#[def_interface]`][1] attribute. Users @@ -15,12 +18,13 @@ pub trait TrapHandler { fn handle_irq(irq_num: usize); #[cfg(all(feature = "hv", target_arch = "aarch64"))] /// Handles interrupt requests for the given IRQ number for route to el2. - fn handle_irq_hv(irq_num: usize, src: usize); + fn handle_irq_hv(irq_num: usize, src: usize, ctx: &mut ContextFrame); // more e.g.: handle_page_fault(); } /// Call the external IRQ handler. #[allow(dead_code)] +// #[cfg(not(all(feature = "hv", target_arch = "aarch64")))] pub(crate) fn handle_irq_extern(irq_num: usize) { call_interface!(TrapHandler::handle_irq, irq_num); } @@ -28,7 +32,7 @@ pub(crate) fn handle_irq_extern(irq_num: usize) { /// Call the external IRQ handler. #[allow(dead_code)] #[cfg(all(feature = "hv", target_arch = "aarch64"))] -pub fn handle_irq_extern_hv(irq_num: usize, src: usize) { +pub fn handle_irq_extern_hv(irq_num: usize, src: usize, ctx: &mut ContextFrame) { debug!("in handle_irq_extern_hv: irq_num {}, src {}", irq_num, src); - call_interface!(TrapHandler::handle_irq_hv, irq_num, src); + call_interface!(TrapHandler::handle_irq_hv, irq_num, src, ctx); } \ No newline at end of file diff --git a/modules/axruntime/Cargo.toml b/modules/axruntime/Cargo.toml index 7c51cac6b2..5eaf298168 100644 --- a/modules/axruntime/Cargo.toml +++ b/modules/axruntime/Cargo.toml @@ -22,7 +22,7 @@ display = ["alloc", "paging", "axdriver/virtio-gpu", "dep:axdisplay"] default = ["axtask?/default"] -hv = ["alloc", "dep:hypercraft", "axhal/hv", "dep:page_table", "dep:page_table_entry", "dep:lazy_init"] +hv = ["alloc", "dep:hypercraft", "axhal/hv", "dep:page_table", "dep:page_table_entry", "dep:lazy_init", "dep:arm_gic"] [dependencies] spin = "0.9" @@ -45,6 +45,7 @@ axtask = { path = "../axtask", default-features = false, optional = true } hypercraft = { path = "../../crates/hypercraft", optional = true } page_table = { path = "../../crates/page_table", optional = true } page_table_entry = { path = "../../crates/page_table_entry", features = ["hv"], optional = true } +arm_gic = { path = "../../crates/arm_gic", optional = true} [target.'cfg(target_arch = "x86_64")'.dependencies] x86 = "0.52" @@ -55,4 +56,5 @@ raw-cpuid = "11.0" [target.'cfg(target_arch = "aarch64")'.dependencies] cortex-a = "8.1.1" tock-registers = "0.8.1" -aarch64-cpu = "9.3" \ No newline at end of file +aarch64-cpu = "9.3" +smccc = "0.1.1" \ No newline at end of file diff --git a/modules/axruntime/src/hv/aarch64_kernel/emu.rs b/modules/axruntime/src/hv/aarch64_kernel/emu.rs new file mode 100644 index 0000000000..c8f9d80f0e --- /dev/null +++ b/modules/axruntime/src/hv/aarch64_kernel/emu.rs @@ -0,0 +1,85 @@ +use spin::Mutex; +extern crate alloc; +use alloc::vec::Vec; + +use hypercraft::arch::emu::*; +use hypercraft::arch::utils::in_range; + +use super::current_cpu; + +pub const EMU_DEV_NUM_MAX: usize = 32; +pub static EMU_DEVS_LIST: Mutex> = Mutex::new(Vec::new()); + +// TO CHECK +pub fn emu_handler(emu_ctx: &EmuContext) -> bool { + let ipa = emu_ctx.address; + let emu_devs_list = EMU_DEVS_LIST.lock(); + + for emu_dev in &*emu_devs_list { + let active_vcpu = current_cpu().get_active_vcpu().unwrap(); + if active_vcpu.vm_id == emu_dev.vm_id && in_range(ipa, emu_dev.ipa, emu_dev.size - 1) { + let handler = emu_dev.handler; + let id = emu_dev.id; + drop(emu_devs_list); + return handler(id, emu_ctx); + } + } + debug!( + "emu_handler: no emul handler for Core {} data abort ipa 0x{:x}", + current_cpu().cpu_id, + ipa + ); + return false; +} + +pub fn emu_register_dev( + emu_type: EmuDeviceType, + vm_id: usize, + dev_id: usize, + address: usize, + size: usize, + handler: EmuDevHandler, +) { + let mut emu_devs_list = EMU_DEVS_LIST.lock(); + if emu_devs_list.len() >= EMU_DEV_NUM_MAX { + panic!("emu_register_dev: can't register more devs"); + } + + for emu_dev in &*emu_devs_list { + if vm_id != emu_dev.vm_id { + continue; + } + if in_range(address, emu_dev.ipa, emu_dev.size - 1) + || in_range(emu_dev.ipa, address, size - 1) + { + panic!("emu_register_dev: duplicated emul address region: prev address 0x{:x} size 0x{:x}, next address 0x{:x} size 0x{:x}", emu_dev.ipa, emu_dev.size, address, size); + } + } + + emu_devs_list.push(EmuDevEntry { + emu_type, + vm_id, + id: dev_id, + ipa: address, + size, + handler, + }); +} + +pub fn emu_remove_dev(vm_id: usize, dev_id: usize, address: usize, size: usize) { + let mut emu_devs_list = EMU_DEVS_LIST.lock(); + for (idx, emu_dev) in emu_devs_list.iter().enumerate() { + if vm_id == emu_dev.vm_id + && emu_dev.ipa == address + && emu_dev.id == dev_id + && emu_dev.size == size + { + emu_devs_list.remove(idx); + return; + } + } + panic!( + "emu_remove_dev: emu dev not exist address 0x{:x} size 0x{:x}", + address, size + ); +} diff --git a/modules/axruntime/src/hv/aarch64_kernel/emuintc_handler.rs b/modules/axruntime/src/hv/aarch64_kernel/emuintc_handler.rs new file mode 100644 index 0000000000..c3d55e70af --- /dev/null +++ b/modules/axruntime/src/hv/aarch64_kernel/emuintc_handler.rs @@ -0,0 +1,202 @@ +extern crate alloc; + +use arm_gic::{ + GICD_TYPER_CPUNUM_MSK, GICD_TYPER_CPUNUM_OFF, GIC_PRIVATE_INT_NUM, GIC_SGIS_NUM +}; +use axhal::GIC_SPI_MAX; +use alloc::sync::Arc; +use hypercraft::VM; +use hypercraft::arch::emu::{EmuContext, EmuDevs}; +use hypercraft::arch::vgic::{Vgic, VgicInt, VgicCpuPriv}; +use super::{active_vm, current_cpu}; +use axhal::{GICH, GICD}; +use crate::{HyperCraftHalImpl, GuestPageTable}; +use super::vgic::*; + +const VGICD_REG_OFFSET_PREFIX_CTLR: usize = 0x0; +// same as TYPER & IIDR +const VGICD_REG_OFFSET_PREFIX_ISENABLER: usize = 0x2; +const VGICD_REG_OFFSET_PREFIX_ICENABLER: usize = 0x3; +const VGICD_REG_OFFSET_PREFIX_ISPENDR: usize = 0x4; +const VGICD_REG_OFFSET_PREFIX_ICPENDR: usize = 0x5; +const VGICD_REG_OFFSET_PREFIX_ISACTIVER: usize = 0x6; +const VGICD_REG_OFFSET_PREFIX_ICACTIVER: usize = 0x7; +const VGICD_REG_OFFSET_PREFIX_ICFGR: usize = 0x18; +const VGICD_REG_OFFSET_PREFIX_SGIR: usize = 0x1e; + +pub fn emu_intc_handler(_emu_dev_id: usize, emu_ctx: &EmuContext) -> bool { + // get the 0 to 11th bit of address, because gicd offset end in 0x1000 + let offset = emu_ctx.address & 0xfff; + // max width bit is 0b11 (0b11 Doubleword) + if emu_ctx.width > 4 { + return false; + } + + let vm = active_vm(); + let vgic = vm.vgic(); + // extract the 7th to 11th bit in offset in order to get the prefix of different registers + let vgicd_offset_prefix = (offset & 0xf80) >> 7; + + if !vgicd_emu_access_is_vaild(emu_ctx) { + return false; + } + + match vgicd_offset_prefix { + VGICD_REG_OFFSET_PREFIX_ISENABLER => { + emu_isenabler_access(&*vgic, emu_ctx); + } + VGICD_REG_OFFSET_PREFIX_ISPENDR => { + emu_ispendr_access(&*vgic, emu_ctx); + } + VGICD_REG_OFFSET_PREFIX_ISACTIVER => { + emu_isactiver_access(&*vgic, emu_ctx); + } + VGICD_REG_OFFSET_PREFIX_ICENABLER => { + emu_icenabler_access(&*vgic, emu_ctx); + } + VGICD_REG_OFFSET_PREFIX_ICPENDR => { + emu_icpendr_access(&*vgic, emu_ctx); + } + VGICD_REG_OFFSET_PREFIX_ICACTIVER => { + emu_icactiver_access(&*vgic, emu_ctx); + } + VGICD_REG_OFFSET_PREFIX_ICFGR => { + emu_icfgr_access(&*vgic, emu_ctx); + } + VGICD_REG_OFFSET_PREFIX_SGIR => { + emu_sgiregs_access(&*vgic, emu_ctx); + } + _ => { + match offset { + // VGICD_REG_OFFSET(CTLR) + 0 => { + emu_ctrl_access(&*vgic, emu_ctx); + } + // VGICD_REG_OFFSET(TYPER) + 0x004 => { + emu_typer_access(&*vgic, emu_ctx); + } + // VGICD_REG_OFFSET(IIDR) + 0x008 => { + emu_iidr_access(&*vgic, emu_ctx); + } + _ => { + if !emu_ctx.write { + let idx = emu_ctx.reg; + let val = 0; + current_cpu().set_gpr(idx, val); + } + } + } + if offset >= 0x400 && offset < 0x800 { + emu_ipriorityr_access(&*vgic, emu_ctx); + } else if offset >= 0x800 && offset < 0xc00 { + emu_itargetr_access(&*vgic, emu_ctx); + } + } + } + true +} + +pub fn vgicd_emu_access_is_vaild(emu_ctx: &EmuContext) -> bool { + let offset = emu_ctx.address & 0xfff; + let offset_prefix = (offset & 0xf80) >> 7; + match offset_prefix { + VGICD_REG_OFFSET_PREFIX_CTLR + | VGICD_REG_OFFSET_PREFIX_ISENABLER + | VGICD_REG_OFFSET_PREFIX_ISPENDR + | VGICD_REG_OFFSET_PREFIX_ISACTIVER + | VGICD_REG_OFFSET_PREFIX_ICENABLER + | VGICD_REG_OFFSET_PREFIX_ICPENDR + | VGICD_REG_OFFSET_PREFIX_ICACTIVER + | VGICD_REG_OFFSET_PREFIX_ICFGR => { + if emu_ctx.width != 4 || emu_ctx.address & 0x3 != 0 { + return false; + } + } + VGICD_REG_OFFSET_PREFIX_SGIR => { + if (emu_ctx.width == 4 && emu_ctx.address & 0x3 != 0) + || (emu_ctx.width == 2 && emu_ctx.address & 0x1 != 0) + { + return false; + } + } + _ => { + // TODO: hard code to rebuild (gicd IPRIORITYR and ITARGETSR) + if offset >= 0x400 && offset < 0xc00 { + if (emu_ctx.width == 4 && emu_ctx.address & 0x3 != 0) + || (emu_ctx.width == 2 && emu_ctx.address & 0x1 != 0) + { + return false; + } + } + } + } + true +} + +pub fn emu_intc_init(vm: &mut VM, emu_dev_id: usize) { + // let vgic_cpu_num = vm.config().cpu_num(); + // vm.init_intc_mode(true); + + let vgic_cpu_num = 1; + let vgic = Arc::new(Vgic::::default()); + + let mut vgicd = vgic.vgicd.lock(); + vgicd.typer = (GICD.lock().get_typer() & GICD_TYPER_CPUNUM_MSK as u32) + | (((vm.vcpu_num() - 1) << GICD_TYPER_CPUNUM_OFF) & GICD_TYPER_CPUNUM_MSK) as u32; + vgicd.iidr = GICD.lock().get_iidr(); + + for i in 0..GIC_SPI_MAX { + vgicd.interrupts.push(VgicInt::::new(i)); + } + drop(vgicd); + + for i in 0..vgic_cpu_num { + let mut cpu_priv = VgicCpuPriv::default(); + for int_idx in 0..GIC_PRIVATE_INT_NUM { + let vcpu = vm.vcpu(i).unwrap(); + let phys_id = vcpu.pcpu_id; + + cpu_priv.interrupts.push(VgicInt::::priv_new( + int_idx, + vcpu.clone(), + 1 << phys_id, + int_idx < GIC_SGIS_NUM, + )); + } + + let mut vgic_cpu_priv = vgic.cpu_priv.lock(); + vgic_cpu_priv.push(cpu_priv); + } + + vm.set_emu_devs(emu_dev_id, EmuDevs::::Vgic(vgic.clone())); +} + +pub fn gic_maintenance_handler() { + let misr = GICH.get_misr(); + let vm = active_vm(); + + let vgic = vm.vgic(); + // 0b1 End Of Interrupt maintenance interrupt asserted. + if misr & 1 != 0 { + handle_trapped_eoir(&*vgic, current_cpu().get_active_vcpu().unwrap().clone()); + } + + // 0b1 List Register Entry Not Present maintenance interrupt asserted. + if misr & (1 << 2) != 0 { + let mut hcr = GICH.get_hcr(); + // deal with eoi + while hcr & (0b11111 << 27) != 0 { + eoir_highest_spilled_active(&*vgic, current_cpu().get_active_vcpu().unwrap().clone()); + hcr -= 1 << 27; + GICH.set_hcr(hcr); + hcr = GICH.get_hcr(); + } + } + + // 0b1 No Pending maintenance interrupt asserted. + if misr & (1 << 3) != 0 { + refill_lrs(&*vgic, current_cpu().get_active_vcpu().unwrap().clone()); + } +} diff --git a/modules/axruntime/src/hv/aarch64_kernel/guest_psci.rs b/modules/axruntime/src/hv/aarch64_kernel/guest_psci.rs index 01d6701afd..d66501d255 100644 --- a/modules/axruntime/src/hv/aarch64_kernel/guest_psci.rs +++ b/modules/axruntime/src/hv/aarch64_kernel/guest_psci.rs @@ -1,15 +1,121 @@ +use smccc::psci::*; + use hypercraft::VCpu; -use axhal::arch::hv::ipi::*; +// use axhal::arch::hv::ipi::*; + +use super::ipi::{ + ipi_send_msg, + IpiType, IpiInnerMsg, IpiMessage, PowerEvent, IpiPowerMessage +}; use super::current_cpu; use super::vm_array::{init_vm_vcpu, run_vm_vcpu}; use crate::hv::HyperCraftHalImpl; +const PSCI_RET_SUCCESS: usize = 0; +const PSCI_RET_NOT_SUPPORTED: usize = 0xffff_ffff_ffff_ffff; //-1 +const PSCI_RET_INVALID_PARAMS: usize = 0xffff_ffff_ffff_fffe; // -2 +const PSCI_RET_ALREADY_ON: usize = 0xffff_ffff_ffff_fffc; // -4 + +const PSCI_TOS_NOT_PRESENT_MP: usize = 2; + +#[inline(never)] +pub fn smc_guest_handler( + fid: usize, + x1: usize, + x2: usize, + x3: usize, +) -> Result { + debug!( + "smc_guest_handler: fid {:#x}, x1 {:#x}, x2 {:#x}, x3 {:#x}", + fid, x1, x2, x3 + ); + let r = match fid as u32 { + PSCI_FEATURES => match x1 as u32 { + PSCI_VERSION | PSCI_CPU_ON_64 | PSCI_FEATURES => Ok(PSCI_RET_SUCCESS), + // | PSCI_CPU_SUSPEND_64| PSCI_SYSTEM_SUSPEND_64 + // | PSCI_SYSTEM_RESET2_64 => Ok(PSCI_RET_SUCCESS), + _ => Ok(PSCI_RET_NOT_SUPPORTED), + }, + PSCI_VERSION => Ok(smc_call(PSCI_VERSION, 0, 0, 0).0), + PSCI_CPU_ON_64 => Ok(psci_guest_cpu_on(x1, x2, x3)), + /* + PSCI_CPU_ON_64 => { + // unsafe { + // run_vm_vcpu(0, 1); + // } + + let smc_ret = smc_call(PSCI_CPU_ON_64, x1, x2, x3).0; + if smc_ret == 0 { + Ok(0) + }else { + // todo(); + Ok(0) + } + } + */ + // PSCI_SYSTEM_RESET => psci_guest_sys_reset(), + // PSCI_SYSTEM_RESET => Ok(smc_call(PSCI_SYSTEM_RESET, 0, 0, 0).0), + // PSCI_SYSTEM_OFF => psci_guest_sys_off(), + PSCI_SYSTEM_OFF => Ok(smc_call(PSCI_SYSTEM_OFF, 0, 0, 0).0), + PSCI_MIGRATE_INFO_TYPE => Ok(PSCI_TOS_NOT_PRESENT_MP), + PSCI_AFFINITY_INFO_64 => Ok(0), + _ => Err(()), + }; + debug!( + "smc_guest_handler: fid {:#x}, x1 {:#x}, x2 {:#x}, x3 {:#x} result: {:#x}", + fid, x1, x2, x3, r.unwrap(), + ); + r +} + +fn psci_guest_cpu_on(mpidr: usize, entry: usize, ctx: usize) -> usize { + debug!("this is vcpu id {}, entry:{:#x} ctx:{:#x}", mpidr, entry, ctx); + let pcpu_id = mpidr & 0xff; // vcpu and pcpu id are the same + let m = IpiPowerMessage { + src: 0, //vm id + event: PowerEvent::PsciIpiCpuOn, + entry, + context: ctx, + }; + + if !ipi_send_msg(pcpu_id, IpiType::Power, IpiInnerMsg::Power(m)) { + warn!("psci_guest_cpu_on: fail to send msg"); + return usize::MAX - 1; + } + + 0 +} + +#[inline(never)] +pub fn smc_call(x0: u32, x1: usize, x2: usize, x3: usize) -> (usize, usize, usize, usize) { + #[cfg(target_arch = "aarch64")] + unsafe { + let r0; + let r1; + let r2; + let r3; + core::arch::asm!( + "smc #0", + inout("x0") x0 as usize => r0, + inout("x1") x1 => r1, + inout("x2") x2 => r2, + inout("x3") x3 => r3, + options(nomem, nostack) + ); + (r0, r1, r2, r3) + } + + #[cfg(not(target_arch = "aarch64"))] + error!("smc not supported"); +} + + pub(crate) fn psci_ipi_handler(msg: &IpiMessage) { debug!("enter psci_ipi_handler"); match &msg.ipi_message { IpiInnerMsg::Power(power_msg) => { // only one vcpu for a pcpu and only one vm. need to modify in the future - let trgt_vcpu = current_cpu().get_active_vcpu(); + let trgt_vcpu = current_cpu().get_active_vcpu_mut().unwrap(); match power_msg.event { PowerEvent::PsciIpiCpuOn => { /* diff --git a/modules/axruntime/src/hv/aarch64_kernel/interrupt.rs b/modules/axruntime/src/hv/aarch64_kernel/interrupt.rs index c0a87203c7..dfbaf263ab 100644 --- a/modules/axruntime/src/hv/aarch64_kernel/interrupt.rs +++ b/modules/axruntime/src/hv/aarch64_kernel/interrupt.rs @@ -1,29 +1,42 @@ -use axhal::arch::hv::ipi::*; -use super::current_cpu; +use super::ipi::*; +use super::{current_cpu, active_vm}; +use hypercraft::{VM, VCpu}; +use super::vgic::vgic_inject; +use crate::{HyperCraftHalImpl, GuestPageTable}; pub fn handle_virtual_interrupt(irq_num: usize, src: usize) { - debug!("src {:#x} id{:#x} virtual interrupt not implement yet", src, irq_num); - /* - if int_id >= 16 && int_id < 32 { + debug!("src {:#x} id{:#x} virtual interrupt", src, irq_num); + + let vm = active_vm(); + // only one vm and one vcpu, every interrupt is match to this vcpu + //if irq_num >= 16 && irq_num < 32 { + let vcpu = current_cpu().get_active_vcpu().unwrap().clone(); + if vm.has_interrupt(irq_num) { + interrupt_vm_inject(vm, vcpu, irq_num); + } + /* if let Some(vcpu) = ¤t_cpu().active_vcpu { if let Some(active_vm) = vcpu.vm() { - if active_vm.has_interrupt(int_id) { - interrupt_vm_inject(active_vm, vcpu.clone(), int_id, src); + if active_vm.has_interrupt(irq_num) { + interrupt_vm_inject(active_vm, vcpu.clone(), irq_num, src); } } } - } + */ + //} + // todo: there is only one vcpu bind to a pcpu now + /* for vcpu in current_cpu().vcpu_array.iter() { if let Some(vcpu) = vcpu { match vcpu.vm() { Some(vm) => { - if vm.has_interrupt(int_id) { + if vm.has_interrupt(irq_num) { if vcpu.state() as usize == VcpuState::VcpuInv as usize { return true; } - interrupt_vm_inject(vm, vcpu.clone(), int_id, src); + interrupt_vm_inject(vm, vcpu.clone(), irq_num, src); return false; } } @@ -31,10 +44,36 @@ pub fn handle_virtual_interrupt(irq_num: usize, src: usize) { } } } - */ + */ + debug!( - "interrupt_handler: core {} receive unsupported int {}", + "interrupt_handler: core {} receive virtual int {}", current_cpu().cpu_id, irq_num ); -} \ No newline at end of file +} + +pub fn interrupt_vm_inject(vm: &mut VM, vcpu: VCpu, irq_num: usize) { + debug!("[interrupt_vm_inject] this is interrupt vm inject"); + let vgic = vm.vgic(); + // restore_vcpu_gic(current_cpu().active_vcpu.clone(), vcpu.clone()); + if let Some(cur_vcpu) = current_cpu().get_active_vcpu().clone() { + if cur_vcpu.vm_id == vcpu.vm_id { + debug!("[interrupt_vm_inject] before vgic_inject"); + vgic_inject(&*vgic, vcpu, irq_num); + debug!("[interrupt_vm_inject] after vm {} inject irq {}", vm.vm_id, irq_num); + return; + } + } + + // vcpu.push_int(irq_num); + // save_vcpu_gic(current_cpu().active_vcpu.clone(), vcpu.clone()); +} + +pub fn interrupt_vm_register(vm:& mut VM, id: usize) -> bool { + debug!("interrupt_vm_register id: {:#x}", id); + super::vgic::vgic_set_hw_int(vm, id); + vm.set_int_bit_map(id); + true +} + diff --git a/modules/axhal/src/arch/aarch64/hv/ipi.rs b/modules/axruntime/src/hv/aarch64_kernel/ipi.rs similarity index 73% rename from modules/axhal/src/arch/aarch64/hv/ipi.rs rename to modules/axruntime/src/hv/aarch64_kernel/ipi.rs index e4643a8e72..a42e438b82 100644 --- a/modules/axhal/src/arch/aarch64/hv/ipi.rs +++ b/modules/axruntime/src/hv/aarch64_kernel/ipi.rs @@ -1,8 +1,11 @@ use spin::Mutex; +use super::current_cpu; +use super::guest_psci::psci_ipi_handler; + extern crate alloc; use alloc::vec::Vec; -use crate::platform::aarch64_common::gic::*; +use axhal::{interrupt_cpu_ipi_send, IPI_IRQ_NUM}; pub static IPI_HANDLER_LIST: Mutex> = Mutex::new(Vec::new()); @@ -95,9 +98,7 @@ pub fn ipi_register(ipi_type: IpiType, handler: IpiHandlerFunc) -> bool { true } -#[no_mangle] pub fn ipi_send_msg(target_id: usize, ipi_type: IpiType, ipi_message: IpiInnerMsg) -> bool { - // push msg to cpu int list /* let ipi_handler_list = IPI_HANDLER_LIST.lock(); debug!("[ipi_send_msg] !!!!!!!!!!!!!!!!!!!!!!!!!!!Address of ipi_handler_list: {:p}", &*ipi_handler_list as *const _); @@ -105,6 +106,7 @@ pub fn ipi_send_msg(target_id: usize, ipi_type: IpiType, ipi_message: IpiInnerMs debug!("[ipi_send_msg] 111111111111 ipi_send_msg handler: {:#?}", ipi_handler_list[0].handler as *const()); drop(ipi_handler_list); */ + // push msg to cpu int list let msg = IpiMessage { ipi_type, ipi_message }; let mut cpu_int_list = CPU_INT_LIST.lock(); cpu_int_list[target_id].msg_queue.push(msg); @@ -113,8 +115,40 @@ pub fn ipi_send_msg(target_id: usize, ipi_type: IpiType, ipi_message: IpiInnerMs ipi_send(target_id) } -#[no_mangle] fn ipi_send(target_id: usize) -> bool { interrupt_cpu_ipi_send(target_id, IPI_IRQ_NUM); true } + +pub fn init_ipi() { + if !ipi_register(IpiType::Power, psci_ipi_handler) { + panic!("power_arch_init: failed to register ipi IpiTPower"); + } +} + +pub fn ipi_irq_handler() { + debug!("ipi handler"); + let cpu_id = current_cpu().cpu_id; + let mut cpu_if_list = CPU_INT_LIST.lock(); + let mut msg: Option = cpu_if_list[cpu_id].pop(); + drop(cpu_if_list); + + while !msg.is_none() { + let ipi_msg = msg.unwrap(); + let ipi_type = ipi_msg.ipi_type as usize; + + let ipi_handler_list = IPI_HANDLER_LIST.lock(); + let len = ipi_handler_list.len(); + let handler = ipi_handler_list[ipi_type].handler.clone(); + drop(ipi_handler_list); + + if len <= ipi_type { + debug!("illegal ipi type {}", ipi_type) + } else { + debug!("!!!!!!!!! this is handler: {:#?}", handler as *const()); + handler(&ipi_msg); + } + let mut cpu_int_list = CPU_INT_LIST.lock(); + msg = cpu_int_list[cpu_id].pop(); + } +} diff --git a/modules/axruntime/src/hv/aarch64_kernel/ipi_handler.rs b/modules/axruntime/src/hv/aarch64_kernel/ipi_handler.rs deleted file mode 100644 index fb7c90eab4..0000000000 --- a/modules/axruntime/src/hv/aarch64_kernel/ipi_handler.rs +++ /dev/null @@ -1,36 +0,0 @@ -use axhal::arch::hv::ipi::*; -use super::current_cpu; -use super::guest_psci::psci_ipi_handler; - -pub fn init_ipi() { - if !ipi_register(IpiType::Power, psci_ipi_handler) { - panic!("power_arch_init: failed to register ipi IpiTPower"); - } -} - -pub fn ipi_irq_handler() { - debug!("ipi handler"); - let cpu_id = current_cpu().cpu_id; - let mut cpu_if_list = CPU_INT_LIST.lock(); - let mut msg: Option = cpu_if_list[cpu_id].pop(); - drop(cpu_if_list); - - while !msg.is_none() { - let ipi_msg = msg.unwrap(); - let ipi_type = ipi_msg.ipi_type as usize; - - let ipi_handler_list = IPI_HANDLER_LIST.lock(); - let len = ipi_handler_list.len(); - let handler = ipi_handler_list[ipi_type].handler.clone(); - drop(ipi_handler_list); - - if len <= ipi_type { - debug!("illegal ipi type {}", ipi_type) - } else { - debug!("!!!!!!!!! this is handler: {:#?}", handler as *const()); - handler(&ipi_msg); - } - let mut cpu_int_list = CPU_INT_LIST.lock(); - msg = cpu_int_list[cpu_id].pop(); - } -} diff --git a/modules/axruntime/src/hv/aarch64_kernel/mod.rs b/modules/axruntime/src/hv/aarch64_kernel/mod.rs index 65367caaae..d5b2d3c371 100644 --- a/modules/axruntime/src/hv/aarch64_kernel/mod.rs +++ b/modules/axruntime/src/hv/aarch64_kernel/mod.rs @@ -1,20 +1,28 @@ -pub mod vm_array; -mod ipi_handler; -mod interrupt; +mod emu; +mod emuintc_handler; mod guest_psci; +mod interrupt; +mod ipi; +mod sync; +mod vgic; +pub mod vm_array; pub use vm_array::{ - VM_ARRAY, VM_MAX_NUM, - is_vcpu_init_ok, is_vcpu_primary_ok, init_vm_vcpu, add_vm, add_vm_vcpu, print_vm, run_vm_vcpu + VM_ARRAY, VM_MAX_NUM, + add_vm, add_vm_vcpu, get_vm, print_vm, + init_vm_vcpu, init_vm_emu_device, init_vm_passthrough_device, + is_vcpu_init_ok, is_vcpu_primary_ok, + run_vm_vcpu, }; -use hypercraft::PerCpu; -use crate::hv::HyperCraftHalImpl; +use crate::{HyperCraftHalImpl, GuestPageTable}; +use hypercraft::{PerCpu, VM}; +pub use emuintc_handler::gic_maintenance_handler; pub use interrupt::handle_virtual_interrupt; -pub use ipi_handler::{ipi_irq_handler, init_ipi}; +pub use ipi::{init_ipi, ipi_irq_handler, cpu_int_list_init}; -use axhal::{gicc_get_current_irq, deactivate_irq}; +use axhal::{deactivate_irq, gicc_get_current_irq}; /// get current cpu pub fn current_cpu() -> &'static mut PerCpu { @@ -22,12 +30,27 @@ pub fn current_cpu() -> &'static mut PerCpu { PerCpu::::ptr_for_cpu(cpu_id) } +/// get active vm +pub fn active_vm() -> &'static mut VM { + let cpu_id = axhal::cpu::this_cpu_id(); + let percpu = PerCpu::::ptr_for_cpu(cpu_id); + let vm_id = percpu.get_active_vcpu().unwrap().vm_id; + let vm_id = 0; // Replace this with your actual logic to get the active VM ID + match get_vm(vm_id) { + Some(vm) => vm, + None => panic!("No active VM found"), + } +} + pub fn secondary_main_hv(cpu_id: usize) { info!("Hello World from cpu {}", cpu_id); let (irq, src) = gicc_get_current_irq(); deactivate_irq(irq); - debug!("after wfi in secondary CPU {} irq id {} src {}", cpu_id, irq, src); + debug!( + "after wfi in secondary CPU {} irq id {} src {}", + cpu_id, irq, src + ); while !is_vcpu_primary_ok() { core::hint::spin_loop(); @@ -41,7 +64,8 @@ pub fn secondary_main_hv(cpu_id: usize) { core::hint::spin_loop(); } info!("vcpu {} init ok", cpu_id); - + debug!("is irq enabled: {}", axhal::arch::irqs_enabled()); - axhal::trap::handle_irq_extern_hv(irq, cpu_id); + let ctx = current_cpu().get_ctx().unwrap(); + axhal::trap::handle_irq_extern_hv(irq, cpu_id, ctx); } diff --git a/modules/axhal/src/arch/aarch64/hv/sync.rs b/modules/axruntime/src/hv/aarch64_kernel/sync.rs similarity index 60% rename from modules/axhal/src/arch/aarch64/hv/sync.rs rename to modules/axruntime/src/hv/aarch64_kernel/sync.rs index 5b12b97a72..2fabdc88ce 100644 --- a/modules/axhal/src/arch/aarch64/hv/sync.rs +++ b/modules/axruntime/src/hv/aarch64_kernel/sync.rs @@ -8,19 +8,32 @@ // MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. // See the Mulan PSL v2 for more details. -use hypercraft::arch::ContextFrame; -use hypercraft::arch::ContextFrameTrait; +use hypercraft::arch::{ContextFrame, ContextFrameTrait}; use hypercraft::arch::vcpu::VmCpuRegisters; use hypercraft::arch::hvc::{HVC_SYS, HVC_SYS_BOOT, hvc_guest_handler}; +use hypercraft::arch::emu::EmuContext; + +use axhal::arch::hv::exception_utils::*; +use axhal::{gic_is_priv, gic_lrs, GICD, GICH, GICV, GICC}; use super::guest_psci::smc_guest_handler; -use super::exception::*; +use super::current_cpu; +use super::emu::emu_handler; +use super::interrupt::handle_virtual_interrupt; const HVC_RETURN_REG: usize = 0; const SMC_RETURN_REG: usize = 0; -pub fn data_abort_handler(ctx: &mut ContextFrame) { - /* +#[no_mangle] +pub extern "C" fn data_abort_handler(ctx: &mut ContextFrame) { + current_cpu().set_ctx(ctx); + + let prio25 = GICD.lock().get_priority(25); + let prio27 = GICD.lock().get_priority(27); + let prio1 = GICD.lock().get_priority(1); + // let state = GICD.lock().get_enable(25); + debug!("[data_abort_handler] prio25:{:#x}, prio27:{:#x} prio1:{:#x}", prio25, prio27, prio1); + let emu_ctx = EmuContext { address: exception_fault_addr(), width: exception_data_abort_access_width(), @@ -29,14 +42,16 @@ pub fn data_abort_handler(ctx: &mut ContextFrame) { reg: exception_data_abort_access_reg(), reg_width: exception_data_abort_access_reg_width(), }; - */ - debug!("data fault addr 0x{:x}, esr: 0x{:x}", - exception_fault_addr(), exception_esr()); + // if ctx.exception_pc() == 0xffffa23d3a94fc6c { + // read_timer_regs(); + // } + debug!("data fault addr 0x{:#x}, esr: 0x{:#x}, elr:{:#x}", + exception_fault_addr(), exception_esr(), ctx.exception_pc()); let elr = ctx.exception_pc(); if !exception_data_abort_handleable() { panic!( - "Data abort not handleable 0x{:x}, esr 0x{:x}", + "Data abort not handleable 0x{:#x}, esr 0x{:#x}", exception_fault_addr(), exception_esr() ); @@ -49,9 +64,9 @@ pub fn data_abort_handler(ctx: &mut ContextFrame) { exception_fault_addr(), ctx ); } - /* + if !emu_handler(&emu_ctx) { - active_vm().unwrap().show_pagetable(emu_ctx.address); + // active_vm().unwrap().show_pagetable(emu_ctx.address); info!( "write {}, width {}, reg width {}, addr {:x}, iss {:x}, reg idx {}, reg val 0x{:x}, esr 0x{:x}", exception_data_abort_access_is_write(), @@ -60,7 +75,7 @@ pub fn data_abort_handler(ctx: &mut ContextFrame) { emu_ctx.address, exception_iss(), emu_ctx.reg, - ctx.get_gpr(emu_ctx.reg), + ctx.gpr(emu_ctx.reg), exception_esr() ); panic!( @@ -68,13 +83,15 @@ pub fn data_abort_handler(ctx: &mut ContextFrame) { emu_ctx.address, elr ); } - */ + let val = elr + exception_next_instruction_step(); ctx.set_exception_pc(val); + + current_cpu().clear_ctx(); } -#[inline(never)] -pub fn hvc_handler(ctx: &mut ContextFrame) { +#[no_mangle] +pub extern "C" fn hvc_handler(ctx: &mut ContextFrame) { let x0 = ctx.gpr(0); let x1 = ctx.gpr(1); let x2 = ctx.gpr(2); @@ -87,6 +104,37 @@ pub fn hvc_handler(ctx: &mut ContextFrame) { let hvc_type = (mode >> 8) & 0xff; let event = mode & 0xff; + current_cpu().set_ctx(ctx); +/* + let misr = GICH.get_misr(); + let hcr = GICH.get_hcr(); + let gicv_ctlr = GICV.get_ctlr(); + let eisr0 = GICH.get_eisr_by_idx(0); + let lr0 = GICH.get_lr_by_idx(0); + let gicc_ctl = GICC.get_ctlr(); + debug!("[hvc_handler] why!!!!!!!!!!!!!!! misr: {:#x} eisr0:{:#x} lr0:{:#x} hcr:{:#x} gicv_ctlr:{:#x} gicc_ctl:{:#x}", misr, eisr0,lr0, hcr, gicv_ctlr, gicc_ctl); + + debug!("this is x0: {}", x0); + let prio25 = GICD.lock().get_priority(25); + let prio27 = GICD.lock().get_priority(27); + let state = GICD.lock().get_enable(25); + debug!("[hvc_handler] 25 enabled:{} prio25 {:#x} prio27 {:#x}", state, prio25, prio27); + //axhal::gicc_clear_current_irq(x0, false); + // axhal::gicv_clear_current_irq(x0, false); +*/ + ctx.set_gpr(HVC_RETURN_REG, 0); + + // handle_virtual_interrupt(79, 0); +/* let misr = GICH.get_misr(); + let hcr = GICH.get_hcr(); + let gicv_eoi = GICV.get_ctlr(); + let gicv_iar = GICV.get_iar(); + let eisr0 = GICH.get_eisr_by_idx(0); + let lr0 = GICH.get_lr_by_idx(0); + let gicc_iar = GICC.get_iar(); + debug!("after inject misr: {:#x} eisr0:{:#x} lr0:{:#x} hcr:{:#x} gicv_ctlr:{:#x} gicv_iar:{:#x} gicc_iar:{:#x}", misr, eisr0,lr0, hcr, gicv_eoi, gicv_iar, gicc_iar); +*/ + /* match hvc_guest_handler(hvc_type, event, x0, x1, x2, x3, x4, x5, x6) { Ok(val) => { ctx.set_gpr(HVC_RETURN_REG, val); @@ -96,6 +144,7 @@ pub fn hvc_handler(ctx: &mut ContextFrame) { ctx.set_gpr(HVC_RETURN_REG, usize::MAX); } } + if hvc_type==HVC_SYS && event== HVC_SYS_BOOT { unsafe { let regs: &mut VmCpuRegisters = core::mem::transmute(x1); // x1 is the vm regs context @@ -111,14 +160,19 @@ pub fn hvc_handler(ctx: &mut ContextFrame) { ctx.spsr = regs.guest_trap_context_regs.spsr; } } + */ + current_cpu().clear_ctx(); } -pub fn smc_handler(ctx: &mut ContextFrame) { +#[no_mangle] +pub extern "C" fn smc_handler(ctx: &mut ContextFrame) { let fid = ctx.gpr(0); let x1 = ctx.gpr(1); let x2 = ctx.gpr(2); let x3 = ctx.gpr(3); + current_cpu().set_ctx(ctx); + match smc_guest_handler(fid, x1, x2, x3) { Ok(val) => { ctx.set_gpr(SMC_RETURN_REG, val); @@ -132,4 +186,6 @@ pub fn smc_handler(ctx: &mut ContextFrame) { let elr = ctx.exception_pc(); let val = elr + exception_next_instruction_step(); ctx.set_exception_pc(val); + + current_cpu().clear_ctx(); } diff --git a/modules/axruntime/src/hv/aarch64_kernel/vgic.rs b/modules/axruntime/src/hv/aarch64_kernel/vgic.rs new file mode 100644 index 0000000000..7c803f831a --- /dev/null +++ b/modules/axruntime/src/hv/aarch64_kernel/vgic.rs @@ -0,0 +1,1674 @@ +use arm_gic::{ + GIC_CONFIG_BITS, GIC_PRIO_BITS, GIC_PRIVATE_INT_NUM, GIC_SGIS_NUM, GIC_TARGETS_MAX, + GIC_TARGET_BITS, +}; +use axhal::{gic_is_priv, gic_lrs, GICD, GICH, GICV}; +use hypercraft::arch::emu::EmuContext; +use hypercraft::arch::utils::*; +use hypercraft::arch::vgic::{Vgic, VgicInt, VgicIntInner}; +use hypercraft::{IrqState, VCpu, VM}; + +use crate::{GuestPageTable, HyperCraftHalImpl}; +use hypercraft::{GuestPageTableTrait, HyperCraftHal}; + +use super::vm_array::get_vm; +use super::{active_vm, current_cpu}; + +fn remove_int_list(vgic: &Vgic, vcpu: VCpu, interrupt: VgicInt, is_pend: bool) { + let mut cpu_priv = vgic.cpu_priv.lock(); + let vcpu_id = vcpu.vcpu_id; + let int_id = interrupt.id(); + if is_pend { + if !interrupt.in_pend() { + return; + } + for i in 0..cpu_priv[vcpu_id].pend_list.len() { + if cpu_priv[vcpu_id].pend_list[i].id() == int_id { + // if int_id == 297 { + // println!("remove int {} in pend list[{}]", int_id, i); + // } + cpu_priv[vcpu_id].pend_list.remove(i); + break; + } + } + interrupt.set_in_pend_state(false); + } else { + if !interrupt.in_act() { + return; + } + for i in 0..cpu_priv[vcpu_id].act_list.len() { + if cpu_priv[vcpu_id].act_list[i].id() == int_id { + cpu_priv[vcpu_id].act_list.remove(i); + break; + } + } + interrupt.set_in_act_state(false); + }; +} + +fn add_int_list(vgic: &Vgic, vcpu: VCpu, interrupt: VgicInt, is_pend: bool) { + let mut cpu_priv = vgic.cpu_priv.lock(); + let vcpu_id = vcpu.vcpu_id; + if is_pend { + interrupt.set_in_pend_state(true); + cpu_priv[vcpu_id].pend_list.push_back(interrupt); + } else { + interrupt.set_in_act_state(true); + cpu_priv[vcpu_id].act_list.push_back(interrupt); + } +} + +/// update vgic int list according to the coming interrupt state +fn update_int_list(vgic: &Vgic, vcpu: VCpu, interrupt: VgicInt) { + let state = interrupt.state().to_num(); + + // if state is pending and the interrupt is not pending, add it to the pending list + // bool means is_pend + if state & 1 != 0 && !interrupt.in_pend() { + add_int_list(vgic, vcpu.clone(), interrupt.clone(), true); + } else if state & 1 == 0 { + remove_int_list(vgic, vcpu.clone(), interrupt.clone(), true); + } + // if state is active and the interrupt is not active, add it to the active list + if state & 2 != 0 && !interrupt.in_act() { + add_int_list(vgic, vcpu.clone(), interrupt.clone(), false); + } else if state & 2 == 0 { + remove_int_list(vgic, vcpu.clone(), interrupt.clone(), false); + } + + if interrupt.id() < GIC_SGIS_NUM as u16 { + if vgic.cpu_priv_sgis_pend(vcpu.vcpu_id, interrupt.id() as usize) != 0 + && !interrupt.in_pend() + { + add_int_list(vgic, vcpu, interrupt, true); + } + } +} + +/// Get vgic int list head +fn int_list_head(vgic: &Vgic, vcpu: VCpu, is_pend: bool) -> Option> { + let cpu_priv = vgic.cpu_priv.lock(); + let vcpu_id = vcpu.vcpu_id; + if is_pend { + if cpu_priv[vcpu_id].pend_list.is_empty() { + None + } else { + Some(cpu_priv[vcpu_id].pend_list[0].clone()) + } + } else { + if cpu_priv[vcpu_id].act_list.is_empty() { + None + } else { + Some(cpu_priv[vcpu_id].act_list[0].clone()) + } + } +} + +fn get_int(vgic: &Vgic, vcpu: VCpu, int_id: usize) -> Option> { + if int_id < GIC_PRIVATE_INT_NUM { + let vcpu_id = vcpu.vcpu_id; + return Some(vgic.cpu_priv_interrupt(vcpu_id, int_id)); + } else if int_id >= GIC_PRIVATE_INT_NUM && int_id < 1024 { + // hard code for max irq + return Some(vgic.vgicd_interrupt(int_id - GIC_PRIVATE_INT_NUM)); + } + return None; +} + +// if the interrupt is invalid, just remove it, otherwise, according to the interrupt state, update the vgic pending or active list +fn remove_lr(vgic: &Vgic, vcpu: VCpu, interrupt: VgicInt) -> bool { + if !vgic_owns(vcpu.clone(), interrupt.clone()) { + return false; + } + let int_lr_idx = interrupt.lr(); + let int_id = interrupt.id() as usize; + let vcpu_id = vcpu.vcpu_id; + + if !interrupt.in_lr() { + return false; + } + + // set gich_lr[int_lr_idx] to 0, save the origin lr value to lv_val + let mut lr_val = 0; + if let Some(lr) = gich_get_lr(interrupt.clone()) { + GICH.set_lr_by_idx(int_lr_idx as usize, 0); + lr_val = lr; + } + // set this interrupt not in lr + interrupt.set_in_lr(false); + + let lr_state = (lr_val >> 28) & 0b11; + // if the origin lr value state is not invalid(pending or active), reset interrupt state + if lr_state != 0 { + interrupt.set_state(IrqState::num_to_state(lr_state as usize)); + if int_id < GIC_SGIS_NUM { + // if interrupt is in active state, add it to cpu_priv active list + if interrupt.state().to_num() & 2 != 0 { + vgic.set_cpu_priv_sgis_act(vcpu_id, int_id, ((lr_val >> 10) & 0b111) as u8); + } + // if interrupt is in pending state, add it to cpu_priv pending list + // ((lr_val >> 10) & 0b111) is the target cpu id + else if interrupt.state().to_num() & 1 != 0 { + let pend = vgic.cpu_priv_sgis_pend(vcpu_id, int_id); + vgic.set_cpu_priv_sgis_pend( + vcpu_id, + int_id, + pend | (1 << ((lr_val >> 10) & 0b111) as u8), + ); + } + } + // add this interrupt to the corresponding list + update_int_list(vgic, vcpu, interrupt.clone()); + + // if int is pending, signal a maintenance interrupt + if (interrupt.state().to_num() & 1 != 0) && interrupt.enabled() { + // debug!("remove_lr: interrupt_state {}", interrupt.state().to_num()); + let hcr = GICH.get_hcr(); + GICH.set_hcr(hcr | (1 << 3)); + } + return true; + } + false +} + +fn add_lr(vgic: &Vgic, vcpu: VCpu, interrupt: VgicInt) -> bool { + // Check if the interrupt is enabled and not already in the List Register. + // If either condition is not met, return false. + debug!("[add_lr] irq:{}, target {}", interrupt.id(), interrupt.targets()); + if !interrupt.enabled() || interrupt.in_lr() { + return false; + } + + // Get the number of List Registers. + let gic_lrs = gic_lrs(); + debug!("[add_lr] this is gic_lr number {}", gic_lrs); + let mut lr_idx = None; + // Find an empty slot in the List Registers. + // elrsr: The corresponding List register does not contain a valid interrupt. + + let elrsr0 = GICH.get_elrsr_by_idx(0); + debug!("[add_lr] elrsr0: {:#x}", elrsr0); + + for i in 0..gic_lrs { + if (GICH.get_elrsr_by_idx(i / 32) & (1 << (i % 32))) != 0 { + lr_idx = Some(i); + break; + } + } + debug!("[add_lr] this is lr_idx {:?}", lr_idx); + // If no empty slot was found, we need to spill an existing interrupt. + if lr_idx.is_none() { + // Initialize variables to keep track of the interrupt with the lowest priority. + let mut pend_found = 0; + let mut act_found = 0; + let mut min_prio_act = 0; + let mut min_prio_pend = 0; + let mut act_ind = None; + let mut pend_ind = None; + // Iterate over all List Registers to find the interrupt with the lowest priority. + for i in 0..gic_lrs { + let lr = GICH.get_lr_by_idx(i); + // [27:23] Priority The priority of this interrupt. + let lr_prio = (lr >> 23) & 0b11111; + // [29:28] State The state of the interrupt. + let lr_state = (lr >> 28) & 0b11; + // Check if the interrupt is active. + if lr_state & 2 != 0 { + if lr_prio > min_prio_act { + min_prio_act = lr_prio; + act_ind = Some(i); + } + act_found += 1; + } + // Check if the interrupt is pending. + else if lr_state & 1 != 0 { + if lr_prio > min_prio_pend { + min_prio_pend = lr_prio; + pend_ind = Some(i); + } + pend_found += 1; + } + } + // Choose the interrupt to spill based on the number of active and pending interrupts. First spill pending interrupts, then active interrupts. + if pend_found > 1 { + lr_idx = pend_ind; + } else if act_found > 1 { + lr_idx = act_ind; + } + // If an interrupt was chosen to be spilled, remove it from the List Register and yield its ownership. + if let Some(idx) = lr_idx { + let spilled_int = get_int( + vgic, + vcpu.clone(), + GICH.get_lr_by_idx(idx) as usize & 0b11_1111_1111, + ) + .unwrap(); + // If the interrupt that we're going to spill is not the same as the interrupt we're trying to add, + // lock the spilled interrupt to prevent other threads from modifying it while we're working with it. + let spilled_int_lock; + if spilled_int.id() != interrupt.id() { + spilled_int_lock = spilled_int.lock.lock(); + } + remove_lr(vgic, vcpu.clone(), spilled_int.clone()); + vgic_int_yield_owner(vcpu.clone(), spilled_int.clone()); + // if spilled_int.id() != interrupt.id() { + // drop(spilled_int_lock); + // } + } + } + + // If an empty slot was found or an interrupt was spilled, write the new interrupt to the List Register. + // Otherwise, if the interrupt is pending, enable maintenance interrupts. + match lr_idx { + Some(idx) => { + write_lr(vgic, vcpu, interrupt, idx); + return true; + } + None => { + // turn on maintenance interrupts + if vgic_get_state(interrupt) & 1 != 0 { + let hcr = GICH.get_hcr(); + GICH.set_hcr(hcr | (1 << 3)); + } + } + } + + false +} + +fn write_lr(vgic: &Vgic, vcpu: VCpu, interrupt: VgicInt, lr_idx: usize) { + + // Get the ID and priority of the vCPU and the interrupt. + let vcpu_id = vcpu.vcpu_id; + let int_id = interrupt.id() as usize; + let int_prio = interrupt.get_priority(); + debug!("write lr: lr_idx {} vcpu_id:{}, int_id:{}, int_prio:{}", lr_idx, vcpu_id, int_id, int_prio); + // Get the ID of the interrupt that is currently in the List Register. + let prev_int_id = vgic.cpu_priv_curr_lrs(vcpu_id, lr_idx) as usize; + debug!("write lr: prev_int_id {}", prev_int_id); + // If the current interrupt is not the same as the interrupt we're trying to add, + // we need to remove the current interrupt from the List Register. + // This may happen if there is no empty slot in the List Registers and we need to spill an existing interrupt. + if prev_int_id != int_id { + let prev_interrupt_option = get_int(vgic, vcpu.clone(), prev_int_id); + if let Some(prev_interrupt) = prev_interrupt_option { + let prev_interrupt_lock = prev_interrupt.lock.lock(); + if vgic_owns(vcpu.clone(), prev_interrupt.clone()) { + if prev_interrupt.in_lr() && prev_interrupt.lr() == lr_idx as u16 { + prev_interrupt.set_in_lr(false); + let prev_id = prev_interrupt.id() as usize; + if !gic_is_priv(prev_id) { + vgic_int_yield_owner(vcpu.clone(), prev_interrupt.clone()); + } + } + } + drop(prev_interrupt_lock); + } + } + + // Get the state of the interrupt and initialize the List Register value. + let state = vgic_get_state(interrupt.clone()); + debug!("write lr: interrupt state {}", state); + let mut lr = (int_id & 0b11_1111_1111) | (((int_prio as usize >> 3) & 0b1_1111) << 23); + + // If the interrupt is a hardware interrupt, set the appropriate bits in the List Register. + if vgic_int_is_hw(interrupt.clone()) { + debug!("write lr: this is hw interrupt"); + // [31] HW Indicates whether this virtual interrupt is a hardware interrupt meaning that it corresponds to a physical interrupt. + lr |= 1 << 31; + // When GICH_LR.HW is set to 1, this field indicates the physical interrupt ID that the hypervisor forwards to the Distributor. + lr |= (0b11_1111_1111 & int_id) << 10; + // 0b11: pending and active. + if state == 3 { + lr |= (2 & 0b11) << 28; + } else { + lr |= (state & 0b11) << 28; + } + let gicd = GICD.lock(); + if gicd.get_state(int_id) != 2 { + gicd.set_state(int_id, 2, current_cpu().cpu_id); + } + } + // If the interrupt is a software-generated interrupt (SGI), set the appropriate bits in the List Register. + else if int_id < GIC_SGIS_NUM { + // active state + if (state & 2) != 0 { + // ((vgic.cpu_priv_sgis_act(vcpu_id, int_id) as usize) << 10) & (0b111 << 10): cpu id + lr |= ((vgic.cpu_priv_sgis_act(vcpu_id, int_id) as usize) << 10) & (0b111 << 10); + // set active. + lr |= (2 & 0b11) << 28; + } + // not active + else { + let mut idx = GIC_TARGETS_MAX - 1; + // Loop through the targets of the SGI find target cpu id + while idx as isize >= 0 { + if (vgic.cpu_priv_sgis_pend(vcpu_id, int_id) & (1 << idx)) != 0 { + lr |= (idx & 0b111) << 10; + let pend = vgic.cpu_priv_sgis_pend(vcpu_id, int_id); + // clear the cpu idx corresponding pending bit + vgic.set_cpu_priv_sgis_pend(vcpu_id, int_id, pend & !(1 << idx)); + lr |= (1 & 0b11) << 28; + break; + } + idx -= 1; + } + } + // [19] EOI Indicates whether this interrupt triggers an EOI maintenance interrupt, + // 1: A maintenance interrupt is asserted to signal EOI when the interrupt state is invalid, which typically occurs when the interrupt is deactivated. + if vgic.cpu_priv_sgis_pend(vcpu_id, int_id) != 0 { + lr |= 1 << 19; + } + } else { + if !gic_is_priv(int_id) && !vgic_int_is_hw(interrupt.clone()) { + lr |= 1 << 19; + } + + lr |= (state & 0b11) << 28; + } + + // Set the state of the interrupt to inactive, mark it as being in the List Register, and set the List Register index in the interrupt. + interrupt.set_state(IrqState::IrqSInactive); + interrupt.set_in_lr(true); + interrupt.set_lr(lr_idx as u16); + vgic.set_cpu_priv_curr_lrs(vcpu_id, lr_idx, int_id as u16); + debug!("write lr: lr value {:#x}", lr); + GICH.set_lr_by_idx(lr_idx, lr as u32); + + update_int_list(vgic, vcpu, interrupt); + debug!("write lr: end"); +} + +fn route(vgic: &Vgic, vcpu: VCpu, interrupt: VgicInt) { + debug!("[route]"); + let cpu_id = current_cpu().cpu_id; + if let IrqState::IrqSInactive = interrupt.state() { + return; + } + + if !interrupt.enabled() { + return; + } + + let int_targets = interrupt.targets(); + // not consider ipi in multi core + debug!("route: int_targets {:#x}, irq: {}", int_targets, interrupt.id()); + add_lr(vgic, vcpu.clone(), interrupt.clone()); + /*if (int_targets & (1 << cpu_id)) != 0 { + // debug!("vm{} route addr lr for int {}", vcpu.vm_id(), interrupt.id()); + add_lr(vgic, vcpu.clone(), interrupt.clone()); + } + + if !interrupt.in_lr() && (int_targets & !(1 << cpu_id)) != 0 { + let vcpu_vm_id = vcpu.vm_id; + + let ipi_msg = IpiInitcMessage { + event: InitcEvent::VgicdRoute, + vm_id: vcpu_vm_id, + int_id: interrupt.id(), + val: 0, + }; + vgic_int_yield_owner(vcpu, interrupt); + ipi_intra_broadcast_msg(active_vm().unwrap(), IpiType::IpiTIntc, IpiInnerMsg::Initc(ipi_msg)); + } + */ +} + +fn set_enable(vgic: &Vgic, vcpu: VCpu, int_id: usize, en: bool) { + if int_id < GIC_SGIS_NUM { + return; + } + match get_int(vgic, vcpu.clone(), int_id) { + Some(interrupt) => { + let interrupt_lock = interrupt.lock.lock(); + if interrupt.enabled() ^ en { + interrupt.set_enabled(en); + if !interrupt.enabled() { + remove_lr(vgic, vcpu.clone(), interrupt.clone()); + } else { + route(vgic, vcpu.clone(), interrupt.clone()); + } + if interrupt.hw() { + GICD.lock().set_enable(interrupt.id() as usize, en); + } + } + vgic_int_yield_owner(vcpu, interrupt.clone()); + /* + if vgic_int_is_owner(vcpu.clone(), interrupt.clone()) { + if interrupt.enabled() ^ en { + interrupt.set_enabled(en); + if !interrupt.enabled() { + remove_lr(vgic, vcpu.clone(), interrupt.clone()); + } else { + route(vgic, vcpu.clone(), interrupt.clone()); + } + if interrupt.hw() { + GICD.set_enable(interrupt.id() as usize, en); + } + } + vgic_int_yield_owner(vcpu, interrupt.clone()); + } else { + let int_phys_id = interrupt.owner_phys_id().unwrap(); + let vcpu_vm_id = vcpu.vm_id(); + let ipi_msg = IpiInitcMessage { + event: InitcEvent::VgicdSetEn, + vm_id: vcpu_vm_id, + int_id: interrupt.id(), + val: en as u8, + }; + if !ipi_send_msg(int_phys_id, IpiType::IpiTIntc, IpiInnerMsg::Initc(ipi_msg)) { + debug!( + "vgicd_set_enable: Failed to send ipi message, target {} type {}", + int_phys_id, 0 + ); + } + } + */ + drop(interrupt_lock); + } + None => { + debug!("vgicd_set_enable: interrupt {} is illegal", int_id); + return; + } + } +} + +fn get_enable(vgic: &Vgic, vcpu: VCpu, int_id: usize) -> bool { + get_int(vgic, vcpu, int_id).unwrap().enabled() +} + +fn set_pend(vgic: &Vgic, vcpu: VCpu, int_id: usize, pend: bool) { + // TODO: sgi_get_pend ? + if bit_extract(int_id, 0, 10) < GIC_SGIS_NUM { + set_enable(vgic, vcpu, int_id, pend); + return; + } + + let interrupt_option = get_int(vgic, vcpu.clone(), bit_extract(int_id, 0, 10)); + + if let Some(interrupt) = interrupt_option { + let interrupt_lock = interrupt.lock.lock(); + remove_lr(vgic, vcpu.clone(), interrupt.clone()); + + let state = interrupt.state().to_num(); + // set the state to right value + if pend && ((state & 1) == 0) { + interrupt.set_state(IrqState::num_to_state(state | 1)); + } else if !pend && (state & 1) != 0 { + interrupt.set_state(IrqState::num_to_state(state & !1)); + } + update_int_list(vgic, vcpu.clone(), interrupt.clone()); + + let state = interrupt.state().to_num(); + if interrupt.hw() { + let vgic_int_id = interrupt.id() as usize; + GICD.lock().set_state( + vgic_int_id, + if state == 1 { 2 } else { state }, + current_cpu().cpu_id, + ) + } + route(vgic, vcpu.clone(), interrupt.clone()); + vgic_int_yield_owner(vcpu, interrupt.clone()); + drop(interrupt_lock); + /* + if vgic_int_is_owner(vcpu.clone(), interrupt.clone()) { + remove_lr(vgic, vcpu.clone(), interrupt.clone()); + + let state = interrupt.state().to_num(); + if pend && ((state & 1) == 0) { + interrupt.set_state(IrqState::num_to_state(state | 1)); + } else if !pend && (state & 1) != 0 { + interrupt.set_state(IrqState::num_to_state(state & !1)); + } + update_int_list(vgic, vcpu.clone(), interrupt.clone()); + + let state = interrupt.state().to_num(); + if interrupt.hw() { + let vgic_int_id = interrupt.id() as usize; + GICD.set_state(vgic_int_id, if state == 1 { 2 } else { state }) + } + route(vgic, vcpu.clone(), interrupt.clone()); + vgic_int_yield_owner(vcpu, interrupt.clone()); + drop(interrupt_lock); + } else { + let vm_id = vcpu.vm_id(); + + let m = IpiInitcMessage { + event: InitcEvent::VgicdSetPend, + vm_id, + int_id: interrupt.id(), + val: pend as u8, + }; + match interrupt.owner() { + Some(owner) => { + let phys_id = owner.phys_id(); + + drop(interrupt_lock); + if !ipi_send_msg(phys_id, IpiType::IpiTIntc, IpiInnerMsg::Initc(m)) { + debug!( + "vgicd_set_pend: Failed to send ipi message, target {} type {}", + phys_id, 0 + ); + } + } + None => { + panic!( + "set_pend: Core {} int {} has no owner", + current_cpu().id, + interrupt.id() + ); + } + } + } + */ + } +} + +fn set_active(vgic: &Vgic, vcpu: VCpu, int_id: usize, act: bool) { + let interrupt_option = get_int(vgic, vcpu.clone(), bit_extract(int_id, 0, 10)); + if let Some(interrupt) = interrupt_option { + let interrupt_lock = interrupt.lock.lock(); + remove_lr(vgic, vcpu.clone(), interrupt.clone()); + let state = interrupt.state().to_num(); + if act && ((state & 2) == 0) { + interrupt.set_state(IrqState::num_to_state(state | 2)); + } else if !act && (state & 2) != 0 { + interrupt.set_state(IrqState::num_to_state(state & !2)); + } + update_int_list(vgic, vcpu.clone(), interrupt.clone()); + + let state = interrupt.state().to_num(); + if interrupt.hw() { + let vgic_int_id = interrupt.id() as usize; + GICD.lock().set_state( + vgic_int_id, + if state == 1 { 2 } else { state }, + current_cpu().cpu_id, + ) + } + route(vgic, vcpu.clone(), interrupt.clone()); + vgic_int_yield_owner(vcpu, interrupt.clone()); + /* + if vgic_int_is_owner(vcpu.clone(), interrupt.clone()) { + remove_lr(vgic, vcpu.clone(), interrupt.clone()); + let state = interrupt.state().to_num(); + if act && ((state & 2) == 0) { + interrupt.set_state(IrqState::num_to_state(state | 2)); + } else if !act && (state & 2) != 0 { + interrupt.set_state(IrqState::num_to_state(state & !2)); + } + update_int_list(vgic, vcpu.clone(), interrupt.clone()); + + let state = interrupt.state().to_num(); + if interrupt.hw() { + let vgic_int_id = interrupt.id() as usize; + GICD.set_state(vgic_int_id, if state == 1 { 2 } else { state }) + } + route(vgic, vcpu.clone(), interrupt.clone()); + vgic_int_yield_owner(vcpu, interrupt.clone()); + } else { + let vm_id = vcpu.vm_id(); + + let m = IpiInitcMessage { + event: InitcEvent::VgicdSetPend, + vm_id, + int_id: interrupt.id(), + val: act as u8, + }; + let phys_id = interrupt.owner_phys_id().unwrap(); + if !ipi_send_msg(phys_id, IpiType::IpiTIntc, IpiInnerMsg::Initc(m)) { + debug!( + "vgicd_set_active: Failed to send ipi message, target {} type {}", + phys_id, 0 + ); + } + } + */ + drop(interrupt_lock); + } +} + +fn set_icfgr(vgic: &Vgic, vcpu: VCpu, int_id: usize, cfg: u8) { + let interrupt_option = get_int(vgic, vcpu.clone(), int_id); + if let Some(interrupt) = interrupt_option { + let interrupt_lock = interrupt.lock.lock(); + interrupt.set_cfg(cfg); + if interrupt.hw() { + GICD.lock().set_icfgr(interrupt.id() as usize, cfg); + } + vgic_int_yield_owner(vcpu, interrupt.clone()); + /* + if vgic_int_is_owner(vcpu.clone(), interrupt.clone()) { + interrupt.set_cfg(cfg); + if interrupt.hw() { + GICD.set_icfgr(interrupt.id() as usize, cfg); + } + vgic_int_yield_owner(vcpu, interrupt.clone()); + } else { + let m = IpiInitcMessage { + event: InitcEvent::VgicdSetCfg, + vm_id: vcpu.vm_id(), + int_id: interrupt.id(), + val: cfg, + }; + if !ipi_send_msg( + interrupt.owner_phys_id().unwrap(), + IpiType::IpiTIntc, + IpiInnerMsg::Initc(m), + ) { + debug!( + "set_icfgr: Failed to send ipi message, target {} type {}", + interrupt.owner_phys_id().unwrap(), + 0 + ); + } + } + */ + drop(interrupt_lock); + } else { + unimplemented!(); + } +} + +fn get_icfgr(vgic: &Vgic, vcpu: VCpu, int_id: usize) -> u8 { + let interrupt_option = get_int(vgic, vcpu, int_id); + if let Some(interrupt) = interrupt_option { + return interrupt.cfg(); + } else { + unimplemented!(); + } +} + +fn sgi_set_pend(vgic: &Vgic, vcpu: VCpu, int_id: usize, pend: bool) { + // let begin = time_current_us(); + if bit_extract(int_id, 0, 10) > GIC_SGIS_NUM { + return; + } + + let interrupt_option = get_int(vgic, vcpu.clone(), bit_extract(int_id, 0, 10)); + let source = bit_extract(int_id, 10, 5); + + if let Some(interrupt) = interrupt_option { + let interrupt_lock = interrupt.lock.lock(); + remove_lr(vgic, vcpu.clone(), interrupt.clone()); + let vcpu_id = vcpu.vcpu_id; + + let vgic_int_id = interrupt.id() as usize; + let pendstate = vgic.cpu_priv_sgis_pend(vcpu_id, vgic_int_id); + // let pendstate = cpu_priv[vcpu_id].sgis[vgic_int_id].pend; + let new_pendstate = if pend { + pendstate | (1 << source) as u8 + } else { + pendstate & !(1 << source) as u8 + }; + if (pendstate ^ new_pendstate) != 0 { + // cpu_priv[vcpu_id].sgis[vgic_int_id].pend = new_pendstate; + vgic.set_cpu_priv_sgis_pend(vcpu_id, vgic_int_id, new_pendstate); + let state = interrupt.state().to_num(); + if new_pendstate != 0 { + interrupt.set_state(IrqState::num_to_state(state | 1)); + } else { + interrupt.set_state(IrqState::num_to_state(state & !1)); + } + + update_int_list(vgic, vcpu.clone(), interrupt.clone()); + + // debug!("state {}", interrupt.state().to_num()); + match interrupt.state() { + IrqState::IrqSInactive => { + debug!("inactive"); + } + _ => { + add_lr(vgic, vcpu, interrupt.clone()); + } + } + } + drop(interrupt_lock); + } else { + debug!( + "sgi_set_pend: interrupt {} is None", + bit_extract(int_id, 0, 10) + ); + } + // let end = time_current_us(); + // debug!("sgi_set_pend[{}]", end - begin); +} + +fn set_priority(vgic: &Vgic, vcpu: VCpu, int_id: usize, mut prio: u8) { + let interrupt_option = get_int(vgic, vcpu.clone(), int_id); + prio &= 0xf0; // gic-400 only allows 4 priority bits in non-secure state + + if let Some(interrupt) = interrupt_option { + let interrupt_lock = interrupt.lock.lock(); + if interrupt.get_priority() != prio { + remove_lr(vgic, vcpu.clone(), interrupt.clone()); + let prev_prio = interrupt.get_priority(); + interrupt.set_priority(prio); + if prio <= prev_prio { + route(vgic, vcpu.clone(), interrupt.clone()); + } + if interrupt.hw() { + GICD.lock().set_priority(interrupt.id() as usize, prio); + } + } + vgic_int_yield_owner(vcpu, interrupt.clone()); + /* + if vgic_int_is_owner(vcpu.clone(), interrupt.clone()) { + if interrupt.get_priority() != prio { + remove_lr(vgic, vcpu.clone(), interrupt.clone()); + let prev_prio = interrupt.get_priority(); + interrupt.set_priority(prio); + if prio <= prev_prio { + route(vgic, vcpu.clone(), interrupt.clone()); + } + if interrupt.hw() { + GICD.lock().set_priority(interrupt.id() as usize, prio); + } + } + vgic_int_yield_owner(vcpu, interrupt.clone()); + } else { + let vm_id = vcpu.vm_id; + + let m = IpiInitcMessage { + event: InitcEvent::VgicdSetPrio, + vm_id, + int_id: interrupt.id(), + val: prio, + }; + if !ipi_send_msg( + interrupt.owner_phys_id().unwrap(), + IpiType::IpiTIntc, + IpiInnerMsg::Initc(m), + ) { + debug!( + "set_priority: Failed to send ipi message, target {} type {}", + interrupt.owner_phys_id().unwrap(), + 0 + ); + } + } + */ + drop(interrupt_lock); + } +} + +fn get_priority(vgic: &Vgic, vcpu: VCpu, int_id: usize) -> u8 { + let interrupt_option = get_int(vgic, vcpu, int_id); + return interrupt_option.unwrap().get_priority(); +} + +fn set_target(vgic: &Vgic, vcpu: VCpu, int_id: usize, target: u8) { + let interrupt_option = get_int(vgic, vcpu.clone(), int_id); + if let Some(interrupt) = interrupt_option { + let interrupt_lock = interrupt.lock.lock(); + if interrupt.targets() != target { + interrupt.set_targets(target); + let mut ptrgt = 0; + for cpuid in 0..8 { + if bit_get(target as usize, cpuid) != 0 { + ptrgt = bit_set(ptrgt, cpuid) + } + } + if interrupt.hw() { + GICD.lock().set_target_cpu(interrupt.id() as usize, ptrgt as u8); + } + if vgic_get_state(interrupt.clone()) != 0 { + route(vgic, vcpu.clone(), interrupt.clone()); + } + } + /* + if vgic_int_is_owner(vcpu.clone(), interrupt.clone()) { + if interrupt.targets() != target { + interrupt.set_targets(target); + let mut ptrgt = 0; + for cpuid in 0..8 { + if bit_get(target as usize, cpuid) != 0 { + ptrgt = bit_set(ptrgt, Platform::cpuid_to_cpuif(cpuid)) + } + } + if interrupt.hw() { + GICD.set_target(interrupt.id() as usize, ptrgt as u8); + } + if vgic_get_state(interrupt.clone()) != 0 { + route(vgic, vcpu.clone(), interrupt.clone()); + } + } + vgic_int_yield_owner(vcpu, interrupt.clone()); + } else { + let vm_id = vcpu.vm_id(); + let m = IpiInitcMessage { + event: InitcEvent::VgicdSetTrgt, + vm_id, + int_id: interrupt.id(), + val: target, + }; + if !ipi_send_msg( + interrupt.owner_phys_id().unwrap(), + IpiType::IpiTIntc, + IpiInnerMsg::Initc(m), + ) { + debug!( + "set_target: Failed to send ipi message, target {} type {}", + interrupt.owner_phys_id().unwrap(), + 0 + ); + } + } + */ + drop(interrupt_lock); + } +} + +fn get_target(vgic: &Vgic, vcpu: VCpu, int_id: usize) -> u8 { + let interrupt_option = get_int(vgic, vcpu, int_id); + return interrupt_option.unwrap().targets(); +} + +/// inject interrupt to vgic +pub fn vgic_inject(vgic: &Vgic, vcpu: VCpu, int_id: usize) { + debug!("[vgic_inject] Core {} inject int {} to vm{}", current_cpu().cpu_id, int_id, vcpu.vm_id); + let interrupt_option = get_int(vgic, vcpu.clone(), bit_extract(int_id, 0, 10)); + if let Some(interrupt) = interrupt_option { + if interrupt.hw() { + debug!("[vgic_inject] interrupt is hw"); + let interrupt_lock = interrupt.lock.lock(); + interrupt.set_owner(vcpu.clone()); + interrupt.set_state(IrqState::IrqSPend); + update_int_list(vgic, vcpu.clone(), interrupt.clone()); + interrupt.set_in_lr(false); + route(vgic, vcpu, interrupt.clone()); + drop(interrupt_lock); + } else { + set_pend(vgic, vcpu, int_id, true); + } + } +} + +/// access emulated ctlr +pub fn emu_ctrl_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("this is emu_ctrl_access"); + if emu_ctx.write { + let prev_ctlr = vgic.vgicd_ctlr(); + let idx = emu_ctx.reg; + vgic.set_vgicd_ctlr(current_cpu().get_gpr(idx) as u32 & 0x1); + // only one cpu for a vm, do not need ipi broadcast? + /* + if prev_ctlr ^ vgic.vgicd_ctlr() != 0 { + let enable = vgic.vgicd_ctlr() != 0; + let hcr = GICH.get_hcr(); + if enable { + GICH.set_hcr(hcr | 1); + } else { + GICH.set_hcr(hcr & !1); + } + + let m = IpiInitcMessage { + event: InitcEvent::VgicdGichEn, + vm_id: active_vm_id(), + int_id: 0, + val: enable as u8, + }; + ipi_intra_broadcast_msg( + active_vm().unwrap(), + IpiType::IpiTIntc, + IpiInnerMsg::Initc(m), + ); + } + */ + } else { + let idx = emu_ctx.reg; + let val = vgic.vgicd_ctlr() as usize; + current_cpu().set_gpr(idx, val); + } +} + +/// access emulated typer +pub fn emu_typer_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("this is emu_typer_access"); + if !emu_ctx.write { + let idx = emu_ctx.reg; + let val = vgic.vgicd_typer() as usize; + current_cpu().set_gpr(idx, val); + } else { + debug!("emu_typer_access: can't write to RO reg"); + } +} + +/// access emulated iidr +pub fn emu_iidr_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("this is emu_iidr_access"); + if !emu_ctx.write { + let idx = emu_ctx.reg; + let val = vgic.vgicd_iidr() as usize; + current_cpu().set_gpr(idx, val); + } else { + debug!("emu_iidr_access: can't write to RO reg"); + } +} + +/// access emulated gicd enable group +pub fn emu_enabler_access(vgic: &Vgic, emu_ctx: &EmuContext, set: bool) { + debug!("[emu_enabler_access] this is emu_enabler_access emu_ctx: {:?}", emu_ctx); + // the offset of the required GICD_IxENABLER is (reg base + (4*n)) + // reg_idx is + let reg_idx = (emu_ctx.address & 0b111_1111) / 4; + let idx = emu_ctx.reg; + let mut val = if emu_ctx.write { + current_cpu().get_gpr(idx) + } else { + 0 + }; + debug!("[emu_enabler_access] this is write val:{:#x}", val); + // caculate the first interrupt in the th register + let first_int = reg_idx * 32; + let vm_id = active_vm().vm_id; + let vm = active_vm(); + let mut vm_has_interrupt_flag = false; + + for i in 0..32 { + if vm.has_interrupt(first_int + i) || vm.emu_has_interrupt(first_int + i) { + debug!("[emu_enabler_access] this is vm has interrupt: {}", first_int+i); + vm_has_interrupt_flag = true; + break; + } + } + if first_int >= 16 && !vm_has_interrupt_flag { + debug!( + "[emu_isenabler_access]: vm[{}] does not have interrupt {}", + vm_id, first_int + ); + return; + } + + if emu_ctx.write { + for i in 0..32 { + if bit_get(val, i) != 0 { + set_enable(vgic, current_cpu().get_active_vcpu().unwrap().clone(), first_int + i, set); + debug!("[emu_enabler_access] set interrupt enable: {:#x} first_int: {:#x}", first_int + i, first_int); + } + } + } else { + for i in 0..32 { + if get_enable(vgic, current_cpu().get_active_vcpu().unwrap().clone(), first_int + i) { + val |= 1 << i; + } + } + let idx = emu_ctx.reg; + current_cpu().set_gpr(idx, val); + } +} + +/// access emulated gicd isenable +pub fn emu_isenabler_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("[emu_isenabler_access] this is emu_isenabler_access"); + emu_enabler_access(vgic, emu_ctx, true); +} + +/// access emulated gicd icenabler +pub fn emu_icenabler_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("this is emu_icenabler_access"); + emu_enabler_access(vgic, emu_ctx, false); +} + +/// access emulated gicd pend group +pub fn emu_pendr_access(vgic: &Vgic, emu_ctx: &EmuContext, set: bool) { + debug!("this is emu_pendr_access"); + // the offset of the required GICD_IxPENDR is (reg base + (4*n)) + // reg_idx is + let reg_idx = (emu_ctx.address & 0b1111111) / 4; + let idx = emu_ctx.reg; + let mut val = if emu_ctx.write { + current_cpu().get_gpr(idx) + } else { + 0 + }; + + // caculate the first interrupt in the th register + let first_int = reg_idx * 32; + let vm_id = active_vm().vm_id; + let vm = active_vm(); + let mut vm_has_interrupt_flag = false; + + for i in 0..emu_ctx.width { + if vm.has_interrupt(first_int + i) || vm.emu_has_interrupt(first_int + i) { + vm_has_interrupt_flag = true; + break; + } + } + if first_int >= 16 && !vm_has_interrupt_flag { + warn!( + "emu_pendr_access: vm[{}] does not have interrupt {}", + vm_id, first_int + ); + return; + } + + if emu_ctx.write { + for i in 0..32 { + if bit_get(val, i) != 0 { + set_pend(vgic, current_cpu().get_active_vcpu().unwrap().clone(), first_int + i, set); + } + } + } else { + for i in 0..32 { + match get_int(vgic, current_cpu().get_active_vcpu().unwrap().clone(), first_int + i) { + Some(interrupt) => { + if vgic_get_state(interrupt.clone()) & 1 != 0 { + val |= 1 << i; + } + } + None => {} + } + } + let idx = emu_ctx.reg; + current_cpu().set_gpr(idx, val); + } +} + +/// access emulated gicd ispendr +pub fn emu_ispendr_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("this is emu_ispendr_access"); + emu_pendr_access(vgic, emu_ctx, true); +} + +/// access emulated gicd icpendr +pub fn emu_icpendr_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("this is emu_icpendr_access"); + emu_pendr_access(vgic, emu_ctx, false); +} + +/// access emulated gicd active group +pub fn emu_activer_access(vgic: &Vgic, emu_ctx: &EmuContext, set: bool) { + debug!("this is emu_activer_access"); + // the offset of the required GICD_IxACTIVER is (reg base + (4*n)) + // reg_idx is + let reg_idx = (emu_ctx.address & 0b111_1111) / 4; + let idx = emu_ctx.reg; + let mut val = if emu_ctx.write { + current_cpu().get_gpr(idx) + } else { + 0 + }; + + // caculate the first interrupt in the th register + let first_int = reg_idx * 32; + let vm_id = active_vm().vm_id; + let vm = active_vm(); + let mut vm_has_interrupt_flag = false; + + for i in 0..32 { + if vm.has_interrupt(first_int + i) || vm.emu_has_interrupt(first_int + i) { + vm_has_interrupt_flag = true; + break; + } + } + if first_int >= 16 && !vm_has_interrupt_flag { + warn!( + "emu_activer_access: vm[{}] does not have interrupt {}", + vm_id, first_int + ); + return; + } + + if emu_ctx.write { + for i in 0..32 { + if bit_get(val, i) != 0 { + set_active(vgic, current_cpu().get_active_vcpu().unwrap().clone(), first_int + i, set); + } + } + } else { + for i in 0..32 { + match get_int(vgic, current_cpu().get_active_vcpu().unwrap().clone(), first_int + i) { + Some(interrupt) => { + if vgic_get_state(interrupt.clone()) & 2 != 0 { + val |= 1 << i; + } + } + None => {} + } + } + let idx = emu_ctx.reg; + current_cpu().set_gpr(idx, val); + } +} + +/// access emulated gicd isactiver +pub fn emu_isactiver_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("this is emu_isactiver_access"); + emu_activer_access(vgic, emu_ctx, true); +} + +/// access emulated gicd icactiver +pub fn emu_icactiver_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("this is emu_icactiver_access"); + emu_activer_access(vgic, emu_ctx, false); +} + +/// access emulated gicd icfgr +pub fn emu_icfgr_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("this is emu_icfgr_access"); + let first_int = (32 / GIC_CONFIG_BITS) * bit_extract(emu_ctx.address, 0, 9) / 4; + let vm_id = active_vm().vm_id; + let vm = active_vm(); + let mut vm_has_interrupt_flag = false; + + if emu_ctx.write { + for i in 0..emu_ctx.width * 8 { + if vm.has_interrupt(first_int + i) || vm.emu_has_interrupt(first_int + i) { + vm_has_interrupt_flag = true; + break; + } + } + if first_int >= 16 && !vm_has_interrupt_flag { + warn!( + "emu_icfgr_access: vm[{}] does not have interrupt {}", + vm_id, first_int + ); + return; + } + } + + if emu_ctx.write { + let idx = emu_ctx.reg; + let cfg = current_cpu().get_gpr(idx); + let mut irq = first_int; + let mut bit = 0; + while bit < emu_ctx.width * 8 { + set_icfgr(vgic, + current_cpu().get_active_vcpu().unwrap().clone(), + irq, + bit_extract(cfg as usize, bit, 2) as u8, + ); + bit += 2; + irq += 1; + } + } else { + let mut cfg = 0; + let mut irq = first_int; + let mut bit = 0; + while bit < emu_ctx.width * 8 { + cfg |= (get_icfgr(vgic, current_cpu().get_active_vcpu().unwrap().clone(), irq) as usize) << bit; + bit += 2; + irq += 1; + } + let idx = emu_ctx.reg; + let val = cfg; + current_cpu().set_gpr(idx, val); + } +} + +/// access emulated gicd sgi related registers +pub fn emu_sgiregs_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("this is emu_sgiregs_access"); + let idx = emu_ctx.reg; + let val = if emu_ctx.write { + current_cpu().get_gpr(idx) + } else { + 0 + }; + let vm = active_vm(); + + // if the address is sgir (offset 0x0f00) + if bit_extract(emu_ctx.address, 0, 12) == bit_extract(usize::from(axhal::GICD_BASE + 0x0f00) + 0x0f00, 0, 12) { + if emu_ctx.write { + // TargetListFilter, bits [25:24] Determines how the Distributor processes the requested SGI. + let sgir_target_list_filter = bit_extract(val, 24, 2); + let mut trgtlist = 0; + match sgir_target_list_filter { + // 0b00 Forward the interrupt to the CPU interfaces specified by GICD_SGIR.CPUTargetList[23:16]. + 0 => { + trgtlist = vgic_target_translate(vm, bit_extract(val, 16, 8) as u32, true) + as usize; + } + // 0b01 Forward the interrupt to all CPU interfaces except that of the PE that requested the interrupt. + 1 => { + // todo: implement multi cpu for one vm + // trgtlist = active_vm_ncpu() & !(1 << current_cpu().id); + } + // 0b10 Forward the interrupt only to the CPU interface of the PE that requested the interrupt. + 2 => { + trgtlist = 1 << current_cpu().cpu_id; + } + // 0b11 Reserved. + 3 => { + return; + } + _ => {} + } + // GICv2 only support 8 pe. doto sgi between multi core + /* + for i in 0..8 { + if trgtlist & (1 << i) != 0 { + let m = IpiInitcMessage { + event: InitcEvent::VgicdSetPend, + vm_id: active_vm_id(), + int_id: (bit_extract(val, 0, 8) | (active_vcpu_id() << 10)) as u16, + val: true as u8, + }; + if !ipi_send_msg(i, IpiType::IpiTIntc, IpiInnerMsg::Initc(m)) { + debug!( + "emu_sgiregs_access: Failed to send ipi message, target {} type {}", + i, 0 + ); + } + } + } + */ + } + } else { + // TODO: CPENDSGIR and SPENDSGIR access + warn!("unimplemented: CPENDSGIR and SPENDSGIR access"); + } +} + +/// access emulated gicd ipriorityr +pub fn emu_ipriorityr_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("this is emu_ipriorityr_access"); + let idx = emu_ctx.reg; + let mut val = if emu_ctx.write { + current_cpu().get_gpr(idx) + } else { + 0 + }; + let first_int = (8 / GIC_PRIO_BITS) * bit_extract(emu_ctx.address, 0, 9); + let vm_id = active_vm().vm_id; + let vm = active_vm(); + let mut vm_has_interrupt_flag = false; + + if emu_ctx.write { + for i in 0..emu_ctx.width { + if vm.has_interrupt(first_int + i) || vm.emu_has_interrupt(first_int + i) { + vm_has_interrupt_flag = true; + break; + } + } + if first_int >= 16 && !vm_has_interrupt_flag { + warn!( + "emu_ipriorityr_access: vm[{}] does not have interrupt {}", + vm_id, first_int + ); + return; + } + } + + if emu_ctx.write { + for i in 0..emu_ctx.width { + set_priority(vgic, + current_cpu().get_active_vcpu().unwrap().clone(), + first_int + i, + bit_extract(val, GIC_PRIO_BITS * i, GIC_PRIO_BITS) as u8, + ); + } + } else { + for i in 0..emu_ctx.width { + val |= (get_priority(vgic, current_cpu().get_active_vcpu().unwrap().clone(), first_int + i) + as usize) + << (GIC_PRIO_BITS * i); + } + let idx = emu_ctx.reg; + current_cpu().set_gpr(idx, val); + } +} + +/// access emulated gicd itargetr +pub fn emu_itargetr_access(vgic: &Vgic, emu_ctx: &EmuContext) { + debug!("[emu_itargetr_access] this is emu_itargetr_access"); + let idx = emu_ctx.reg; + let mut val = if emu_ctx.write { + current_cpu().get_gpr(idx) + } else { + 0 + }; + let first_int = (8 / GIC_TARGET_BITS) * bit_extract(emu_ctx.address, 0, 9); + + if emu_ctx.write { + val = vgic_target_translate(active_vm(), val as u32, true) as usize; + for i in 0..emu_ctx.width { + set_target(vgic, + current_cpu().get_active_vcpu().unwrap().clone(), + first_int + i, + bit_extract(val, GIC_TARGET_BITS * i, GIC_TARGET_BITS) as u8, + ); + } + } else { + // debug!("read, first_int {}, width {}", first_int, emu_ctx.width); + for i in 0..emu_ctx.width { + // debug!("{}", get_target(vgic, active_vcpu().unwrap(), first_int + i)); + val |= (get_target(vgic, current_cpu().get_active_vcpu().unwrap().clone(), first_int + i) as usize) + << (GIC_TARGET_BITS * i); + } + debug!("[emu_itargetr_access] after read val {}", val); + val = vgic_target_translate(active_vm(), val as u32, false) as usize; + let idx = emu_ctx.reg; + current_cpu().set_gpr(idx, val); + } + debug!("[emu_itargetr_access] in the end of emu_itargetr_access"); +} + +// End Of Interrupt maintenance interrupt asserted. +pub fn handle_trapped_eoir(vgic: &Vgic, vcpu: VCpu) { + debug!("this is handle_trapped_eoir"); + let gic_lrs = gic_lrs(); + // find the first 1 in eisr0 and eisr1 + let mut lr_idx_option = bitmap_find_nth( + GICH.get_eisr_by_idx(0) as usize | ((GICH.get_eisr_by_idx(1) as usize) << 32), + 0, + gic_lrs, + 1, + true, + ); + // clear eoi lr circularly + while lr_idx_option.is_some() { + // clear corresponding lr + let lr_idx = lr_idx_option.unwrap(); + let lr_val = GICH.get_lr_by_idx(lr_idx) as usize; + GICH.set_lr_by_idx(lr_idx, 0); + + // clear interrupt state, set it not in lr + match get_int(vgic, vcpu.clone(), bit_extract(lr_val, 0, 10)) { + Some(interrupt) => { + let interrupt_lock = interrupt.lock.lock(); + interrupt.set_in_lr(false); + if (interrupt.id() as usize) < GIC_SGIS_NUM { + add_lr(vgic, vcpu.clone(), interrupt.clone()); + } else { + vgic_int_yield_owner(vcpu.clone(), interrupt.clone()); + } + drop(interrupt_lock); + } + None => { + unimplemented!(); + } + } + lr_idx_option = bitmap_find_nth( + GICH.get_eisr_by_idx(0) as usize | ((GICH.get_eisr_by_idx(1) as usize) << 32), + 0, + gic_lrs, + 1, + true, + ); + } +} + +// No Pending maintenance interrupt asserted. (no List register is in the pending state.) +pub fn refill_lrs(vgic: &Vgic, vcpu: VCpu) { + debug!("this is refill_lrs"); + let gic_lrs = gic_lrs(); + let mut has_pending = false; + + for i in 0..gic_lrs { + let lr = GICH.get_lr_by_idx(i) as usize; + // [29:28] state. 0b1: pending + if bit_extract(lr, 28, 2) & 1 != 0 { + has_pending = true; + } + } + + // Find the index of the first empty LR. + let mut lr_idx_option = bitmap_find_nth( + GICH.get_elrsr_by_idx(0) as usize | ((GICH.get_elrsr_by_idx(1) as usize) << 32), + 0, + gic_lrs, + 1, + true, + ); + + // refill empty LR until there is no more empty LR. + while lr_idx_option.is_some() { + let mut interrupt_opt: Option> = None; + let mut prev_pend = false; + // Get the first active and pending interrupts. + let active_head = int_list_head(vgic, vcpu.clone(), false); + let pend_head = int_list_head(vgic, vcpu.clone(), true); + // firstly add first active interrupt to lr if it is not in lr, otherwise add first pending interrupt to lr if it is not in lr + if has_pending { + match active_head { + Some(active_int) => { + if !active_int.in_lr() { + interrupt_opt = Some(active_int.clone()); + } + } + None => {} + } + } + if interrupt_opt.is_none() { + if let Some(pend_int) = pend_head { + if !pend_int.in_lr() { + interrupt_opt = Some(pend_int.clone()); + prev_pend = true; + } + } + } + + // If an interrupt has been selected... + match interrupt_opt { + Some(interrupt) => { + vgic_int_is_owner(vcpu.clone(), interrupt.clone()); + write_lr(vgic, vcpu.clone(), interrupt.clone(), lr_idx_option.unwrap()); + has_pending = has_pending || prev_pend; + } + None => { + // debug!("no int to refill"); + // If no interrupt has been selected, disable the LR refill maintenance. + let hcr = GICH.get_hcr(); + GICH.set_hcr(hcr & !(1 << 3)); + break; + } + } + + lr_idx_option = bitmap_find_nth( + GICH.get_elrsr_by_idx(0) as usize | ((GICH.get_elrsr_by_idx(1) as usize) << 32), + 0, + gic_lrs, + 1, + true, + ); + } + // debug!("end refill lrs"); +} + +// List Register Entry Not Present maintenance interrupt asserted. +// Generic Interrupt Controller (GIC) has attempted to access an interrupt that is not present in any of the List Registers (LRs). +pub fn eoir_highest_spilled_active(vgic: &Vgic, vcpu: VCpu) { + debug!("this is eoir_highest_spilled_active"); + // get pending interrupt + let interrupt = int_list_head(vgic, vcpu.clone(), false); + match interrupt { + Some(int) => { + int.lock.lock(); + // if interrupt does not have an owner, set it to current vcpu + vgic_int_is_owner(vcpu.clone(), int.clone()); + + let state = int.state().to_num(); + // if state is active, set it to inactive + int.set_state(IrqState::num_to_state(state & !2)); + update_int_list(vgic, vcpu.clone(), int.clone()); + + if vgic_int_is_hw(int.clone()) { + GICD.lock().set_active(int.id() as usize, false); + } else { + if int.state().to_num() & 1 != 0 { + add_lr(vgic, vcpu, int); + } + } + } + None => {} + } +} + +fn vgic_target_translate(vm:&mut VM, target: u32, v2p: bool) -> u32 { + let from = target.to_le_bytes(); + let mut result = 0; + let converted_values = from.map(|x| { + if v2p { + vm.vcpu_to_pcpu_mask(x as usize, 8) as u32 + } else { + vm.pcpu_to_vcpu_mask(x as usize, 8) as u32 + } + }); + // debug!("print converted_values{:?}", converted_values.len()); + for (idx, val) in converted_values + .iter() + .enumerate() + { + // debug!("idx {} val{}", idx, val); + result |= (*val as u32) << (8 * idx); + if idx >= 4 { + panic!("illegal idx, from len {}", from.len()); + } + } + result +} + +fn vgic_owns( + vcpu: VCpu, + interrupt: VgicInt, +) -> bool { + if gic_is_priv(interrupt.id() as usize) { + return true; + } + + let vcpu_id = vcpu.vcpu_id; + let pcpu_id = vcpu.pcpu_id; + match interrupt.owner() { + Some(owner) => { + let owner_vcpu_id = owner.vcpu_id; + let owner_pcpu_id = owner.pcpu_id; + return owner_vcpu_id == vcpu_id && owner_pcpu_id == pcpu_id; + } + None => return false, + } +} + +fn vgic_get_state(interrupt: VgicInt) -> usize { + let mut state = interrupt.state().to_num(); + + if interrupt.in_lr() && interrupt.owner_phys_id().unwrap() == current_cpu().cpu_id { + let lr_option = gich_get_lr(interrupt.clone()); + if let Some(lr_val) = lr_option { + state = lr_val as usize; + } + } + + if interrupt.id() as usize >= GIC_SGIS_NUM { + return state; + } + if interrupt.owner().is_none() { + return state; + } + + let vm = get_vm(interrupt.owner_vm_id().unwrap()).unwrap(); + let vgic = vm.vgic(); + let vcpu_id = interrupt.owner_id().unwrap(); + + if vgic.cpu_priv_sgis_pend(vcpu_id, interrupt.id() as usize) != 0 { + state |= 1; + } + + state +} + +fn vgic_int_yield_owner( + vcpu: VCpu, + interrupt: VgicInt, +) { + // the vcpu is not the interrupt owner + if !vgic_owns(vcpu, interrupt.clone()) { + return; + } + // the interrupt is cpu private int or it has already been in lr + if usize::from(interrupt.id()) < GIC_PRIVATE_INT_NUM || interrupt.in_lr() { + return; + } + // if this interrupt is not active, clear its owner. + if vgic_get_state(interrupt.clone()) & 2 == 0 { + interrupt.clear_owner(); + } +} + +fn gich_get_lr(interrupt: VgicInt) -> Option { + let cpu_id = current_cpu().cpu_id; + let phys_id = interrupt.owner_phys_id().unwrap(); + + if !interrupt.in_lr() || phys_id != cpu_id { + return None; + } + + let lr_val = GICH.get_lr_by_idx(interrupt.lr() as usize); + // interrupt is in lr and is pending or active + if (lr_val & 0b11_1111_1111 == interrupt.id() as u32) && (lr_val >> 28 & 0b11 != 0) { + return Some(lr_val as u32); + } + return None; +} + +fn vgic_int_is_owner( + vcpu: VCpu, + interrupt: VgicInt, +) -> bool { + // if interrupt.owner().is_none() { + // interrupt.set_owner(vcpu.clone()); + // return true; + // } + let vcpu_id = vcpu.vcpu_id; + let vcpu_vm_id = vcpu.vm_id; + + match interrupt.owner() { + Some(owner) => { + let owner_vcpu_id = owner.vcpu_id(); + let owner_vm_id = owner.vm_id; + + return owner_vm_id == vcpu_vm_id && owner_vcpu_id == vcpu_id; + } + None => { + interrupt.set_owner(vcpu); + return true; + } + } + + // let owner_vcpu_id = interrupt.owner_id().unwrap(); + // let owner_vm_id = interrupt.owner_vm_id().unwrap(); + + return false; +} + +pub fn vgic_set_hw_int(vm:&mut VM, int_id: usize) { + if int_id < GIC_SGIS_NUM { + return; + } + /* + if !vm.has_vgic() { + return; + } + */ + let vgic = vm.vgic(); + + if int_id < GIC_PRIVATE_INT_NUM { + for i in 0..vm.vcpu_num() { + let interrupt_option = get_int(&vgic, vm.vcpu(i).unwrap().clone(), int_id); + match interrupt_option { + Some(interrupt) => { + let interrupt_lock = interrupt.lock.lock(); + interrupt.set_hw(true); + drop(interrupt_lock); + } + None => {} + } + } + } else { + let interrupt_option = get_int(&vgic, vm.vcpu(0).unwrap().clone(), int_id); + match interrupt_option { + Some(interrupt) => { + let interrupt_lock = interrupt.lock.lock(); + interrupt.set_hw(true); + drop(interrupt_lock); + } + None => {} + } + } +} + +fn vgic_int_is_hw(interrupt: VgicInt) -> bool { + interrupt.id() as usize >= GIC_SGIS_NUM && interrupt.hw() +} diff --git a/modules/axruntime/src/hv/aarch64_kernel/vm_array.rs b/modules/axruntime/src/hv/aarch64_kernel/vm_array.rs index d0c57310f5..1e27e9d5ed 100644 --- a/modules/axruntime/src/hv/aarch64_kernel/vm_array.rs +++ b/modules/axruntime/src/hv/aarch64_kernel/vm_array.rs @@ -1,21 +1,26 @@ extern crate alloc; - -use core::sync::atomic::{AtomicUsize, Ordering}; use alloc::vec::Vec; +use core::sync::atomic::{AtomicUsize, Ordering}; +use hypercraft::{PerCpu, VCpu, VM}; +use hypercraft::arch::emu::EmuDeviceType; use lazy_init::LazyInit; -use hypercraft::{VM, VCpu, PerCpu}; - -use crate::{HyperCraftHalImpl, GuestPageTable}; use axhal::cpu::this_cpu_id; + +use super::emu::emu_register_dev; +use super::emuintc_handler::{emu_intc_handler, emu_intc_init}; +use super::interrupt::interrupt_vm_register; +use crate::{GuestPageTable, HyperCraftHalImpl}; + const VCPU_CNT: usize = 2; static INITED_VCPUS: AtomicUsize = AtomicUsize::new(0); pub const VM_MAX_NUM: usize = 8; -pub static mut VM_ARRAY: LazyInit>>> = LazyInit::new(); +pub static mut VM_ARRAY: LazyInit>>> = + LazyInit::new(); /// Add vm vcpu by index -pub fn add_vm_vcpu(vm_id: usize, vcpu:VCpu) { +pub fn add_vm_vcpu(vm_id: usize, vcpu: VCpu) { if vm_id >= VM_MAX_NUM { panic!("vm_id {} out of bound", vm_id); } @@ -30,17 +35,71 @@ pub fn add_vm_vcpu(vm_id: usize, vcpu:VCpu) { } /// Init vm vcpu by index -pub fn init_vm_vcpu(vm_id: usize, vcpu_id: usize, entry:usize, x0:usize) { +pub fn init_vm_vcpu(vm_id: usize, vcpu_id: usize, entry: usize, x0: usize) { if vm_id >= VM_MAX_NUM { panic!("vm_id {} out of bound", vm_id); } unsafe { if let Some(vm_option) = VM_ARRAY.get_mut(vm_id) { if let Some(vm) = vm_option { + // init vcpu vm.init_vm_vcpu(vcpu_id, entry, x0); } } - } // debug!("finish init_vm_vcpu vm_id:{} vcpu {:?}", vm_id + } +} + +/// Init vm emulated device +pub fn init_vm_emu_device(vm_id: usize) { + if vm_id >= VM_MAX_NUM { + panic!("vm_id {} out of bound", vm_id); + } + unsafe { + if let Some(vm_option) = VM_ARRAY.get_mut(vm_id) { + if let Some(vm) = vm_option { + // init emu intc + let idx = 0; + vm.set_intc_dev_id(idx); + emu_register_dev( + EmuDeviceType::EmuDeviceTGicd, + vm.vm_id, + idx, + 0x8000000, // emu_dev.base_ipa, + 0x1000, // emu_dev.length, + emu_intc_handler, + ); + emu_intc_init(vm, idx); + } + } + } +} + +/// init vm passthrough device +pub fn init_vm_passthrough_device(vm_id: usize) { + if vm_id >= VM_MAX_NUM { + panic!("vm_id {} out of bound", vm_id); + } + unsafe { + if let Some(vm_option) = VM_ARRAY.get_mut(vm_id) { + if let Some(vm) = vm_option { + // hard code for qemu vm + let mut irqs = Vec::new(); + irqs.push(33); + irqs.push(27); // virtual timer + // irqs.push(30); + irqs.push(32 + 0x28); + irqs.push(32 + 0x29); + irqs.push(0x3e + 0x11); // what interrupt???? + for irq in irqs { + debug!("this is irq: {:#x}", irq); + if !interrupt_vm_register(vm, irq) { + warn!("vm{} register irq{} failed", vm_id, irq); + } + debug!("after register for vm irq: {:#x}", irq); + } + } + } + } } /// Add vm to vm array @@ -53,10 +112,9 @@ pub fn add_vm(vm_id: usize, vm: VM) { while VM_ARRAY.len() <= vm_id { VM_ARRAY.push(None); } - + VM_ARRAY[vm_id] = Some(vm); } - } /// Print vm info @@ -70,8 +128,20 @@ pub fn print_vm(vm_id: usize) { } } +/// Get vm by id +pub fn get_vm(vm_id: usize) -> Option<&'static mut VM> { + unsafe { + if let Some(vm_option) = VM_ARRAY.get_mut(vm_id) { + if let Some(vm) = vm_option { + return Some(vm); + } + } + } + None +} + /// Run vm by id -pub fn run_vm_vcpu(vm_id: usize, vcpu_id: usize) { +pub fn run_vm_vcpu(vm_id: usize, vcpu_id: usize) ->! { unsafe { debug!("current pcpu id: {} vcpu id:{}", this_cpu_id(), vcpu_id); if let Some(vm_option) = VM_ARRAY.get_mut(vm_id) { @@ -80,6 +150,7 @@ pub fn run_vm_vcpu(vm_id: usize, vcpu_id: usize) { } } } + loop{} } /// Checks if the initialization of the virtual machine array is successful. @@ -90,8 +161,8 @@ pub fn is_vcpu_init_ok() -> bool { } /// Checks if the primary virtual CPU (vCPU) is in a valid state. -/// +/// /// Returns `true` if the primary vCPU is in a valid state, `false` otherwise. pub fn is_vcpu_primary_ok() -> bool { INITED_VCPUS.load(Ordering::Acquire) == 1 -} \ No newline at end of file +} diff --git a/modules/axruntime/src/hv/mod.rs b/modules/axruntime/src/hv/mod.rs index 34315121e1..0bf28dd885 100644 --- a/modules/axruntime/src/hv/mod.rs +++ b/modules/axruntime/src/hv/mod.rs @@ -11,7 +11,7 @@ pub mod kernel; #[cfg(target_arch = "aarch64")] pub use kernel::{ VM_ARRAY, VM_MAX_NUM, - is_vcpu_init_ok, is_vcpu_primary_ok, init_vm_vcpu, add_vm, add_vm_vcpu, print_vm, run_vm_vcpu + is_vcpu_init_ok, is_vcpu_primary_ok, get_vm, init_vm_vcpu, init_vm_emu_device, init_vm_passthrough_device, add_vm, add_vm_vcpu, print_vm, run_vm_vcpu }; #[cfg(target_arch = "aarch64")] diff --git a/modules/axruntime/src/lib.rs b/modules/axruntime/src/lib.rs index e471c00c5a..54b19934d6 100644 --- a/modules/axruntime/src/lib.rs +++ b/modules/axruntime/src/lib.rs @@ -44,13 +44,19 @@ pub use hv::HyperCraftHalImpl; #[cfg(all(target_arch = "aarch64", feature = "hv"))] pub use hv::{ - VM_ARRAY, VM_MAX_NUM, - is_vcpu_init_ok, is_vcpu_primary_ok, init_vm_vcpu, add_vm, add_vm_vcpu, print_vm, run_vm_vcpu + VM_ARRAY, VM_MAX_NUM, + add_vm, add_vm_vcpu, get_vm, print_vm, + init_vm_vcpu, init_vm_emu_device, init_vm_passthrough_device, + is_vcpu_init_ok, is_vcpu_primary_ok, + run_vm_vcpu, }; #[cfg(all(target_arch = "aarch64", feature = "hv"))] -use axhal::IPI_IRQ_NUM; +use axhal::{IPI_IRQ_NUM, MAINTENANCE_IRQ_NUM}; #[cfg(all(target_arch = "aarch64", feature = "hv"))] -use crate::hv::kernel::{ipi_irq_handler, init_ipi}; +use crate::hv::kernel::{ + ipi_irq_handler, init_ipi, cpu_int_list_init, + gic_maintenance_handler +}; const LOGO: &str = r#" d8888 .d88888b. .d8888b. @@ -316,15 +322,50 @@ fn init_interrupt() { axtask::on_timer_tick(); }); - } + } - // Setup IPI interrupt handler for hv + // Setup interrupt handler for hv #[cfg(all(target_arch = "aarch64", feature = "hv"))] { + // IPI interrupt handler debug!("init ipi interrupt handler"); axhal::irq::register_handler(IPI_IRQ_NUM, ipi_irq_handler); - axhal::arch::hv::ipi::cpu_int_list_init(); + cpu_int_list_init(); init_ipi(); + + // Maintenance interrupt handler + debug!("init maintenance interrupt handler"); + axhal::irq::register_handler(axhal::MAINTENANCE_IRQ_NUM, gic_maintenance_handler); + + + axhal::GICD.lock().print_prio(); +/* + debug!("init hypervisor timer interrupt handler"); + use axhal::time::HYPERVISOR_TIMER_IRQ_NUM; + // Setup timer interrupt handler + const PERIODIC_INTERVAL_NANOS: u64 = + axhal::time::NANOS_PER_SEC / axconfig::TICKS_PER_SEC as u64; + + #[percpu::def_percpu] + static NEXT_DEADLINE: u64 = 0; + + fn update_timer() { + let now_ns = axhal::time::current_time_nanos(); + // Safety: we have disabled preemption in IRQ handler. + let mut deadline = unsafe { NEXT_DEADLINE.read_current_raw() }; + if now_ns >= deadline { + deadline = now_ns + PERIODIC_INTERVAL_NANOS; + } + unsafe { NEXT_DEADLINE.write_current_raw(deadline + PERIODIC_INTERVAL_NANOS) }; + axhal::time::set_oneshot_timer(deadline); + } + + axhal::irq::register_handler(HYPERVISOR_TIMER_IRQ_NUM, || { + update_timer(); + #[cfg(feature = "multitask")] + axtask::on_timer_tick(); + }); +*/ } // Enable IRQs before starting app diff --git a/modules/axruntime/src/mp.rs b/modules/axruntime/src/mp.rs index a570535eee..775836132b 100644 --- a/modules/axruntime/src/mp.rs +++ b/modules/axruntime/src/mp.rs @@ -63,6 +63,14 @@ pub extern "C" fn rust_main_secondary(cpu_id: usize) -> ! { #[cfg(feature = "irq")] axhal::arch::enable_irqs(); + // run multi vm, this will not return + // todo: add more feature for multi vm + #[cfg(feature = "hv")] + /* + unsafe { + secondary_vm(cpu_id); + } + */ #[cfg(feature = "multitask")] { debug!("secondary CPU {} enter idle loop", cpu_id); @@ -75,7 +83,11 @@ pub extern "C" fn rust_main_secondary(cpu_id: usize) -> ! { #[cfg(feature = "hv")] { debug!("after wfi!!!!!!!!!!!"); - crate::hv::secondary_main_hv(cpu_id); + // crate::hv::secondary_main_hv(cpu_id); } } } + +extern "C" { + fn secondary_vm(cpu_id: usize) -> !; +} \ No newline at end of file diff --git a/modules/axruntime/src/trap.rs b/modules/axruntime/src/trap.rs index 31bdf91e3c..1836558d36 100644 --- a/modules/axruntime/src/trap.rs +++ b/modules/axruntime/src/trap.rs @@ -1,5 +1,8 @@ -// #[cfg(all(feature = "hv", target_arch = "aarch64"))] -// use crate::hv::aarch64_kernel::handle_virtual_interrupt; +#[cfg(all(feature = "hv", target_arch = "aarch64"))] +use hypercraft::arch::{ContextFrame, ContextFrameTrait}; +#[cfg(all(feature = "hv", target_arch = "aarch64"))] +use crate::hv::kernel::{handle_virtual_interrupt, current_cpu}; + struct TrapHandlerImpl; #[crate_interface::impl_interface] @@ -13,13 +16,24 @@ impl axhal::trap::TrapHandler for TrapHandlerImpl { } } #[cfg(all(feature = "hv", target_arch = "aarch64"))] - fn handle_irq_hv(irq_num: usize, src: usize) { - // if axhal::irq::irq_num_exist(irq_num) { + fn handle_irq_hv(irq_num: usize, src: usize, ctx: &mut ContextFrame) { + current_cpu().set_ctx(ctx); + if axhal::irq::irq_num_exist(irq_num) { let guard = kernel_guard::NoPreempt::new(); axhal::irq::dispatch_irq(irq_num); drop(guard); - // }else { // sgi - // handle_virtual_interrupt(irq_num, src); - // } + }else { + handle_virtual_interrupt(irq_num, src); + } + + debug!("[handle_irq_hv] before deactivate irq {} ", irq_num); + + if irq_num==axhal::IPI_IRQ_NUM || irq_num==axhal::MAINTENANCE_IRQ_NUM || irq_num==axhal::time::HYPERVISOR_TIMER_IRQ_NUM { + axhal::gicc_clear_current_irq(irq_num, true); + } else { + axhal::gicc_clear_current_irq(irq_num, false); + } + + current_cpu().clear_ctx(); } } diff --git a/ulib/libax/src/hv.rs b/ulib/libax/src/hv.rs index 83fbae90ed..7a3ba5fe91 100644 --- a/ulib/libax/src/hv.rs +++ b/ulib/libax/src/hv.rs @@ -15,6 +15,9 @@ pub use hypercraft::{HyperCallMsg, VmExitInfo, GuestPhysAddr, GuestVirtAddr, Hos pub use hypercraft::VcpusArray; #[cfg(target_arch = "aarch64")] pub use axruntime::{ - VM_ARRAY, VM_MAX_NUM, - is_vcpu_init_ok, is_vcpu_primary_ok, init_vm_vcpu, add_vm, add_vm_vcpu, print_vm, run_vm_vcpu + VM_ARRAY, VM_MAX_NUM, + add_vm, add_vm_vcpu, get_vm, print_vm, + init_vm_vcpu, init_vm_emu_device, init_vm_passthrough_device, + is_vcpu_init_ok, is_vcpu_primary_ok, + run_vm_vcpu, };