diff --git a/README.md b/README.md index b16f294..934a0da 100644 --- a/README.md +++ b/README.md @@ -15,15 +15,27 @@ We are currently developing MilvusVisor as a research activity to achieve HPC en Currently, MilvusVisor provides the following function. +You can build with enabling some functions by `make custom_all FEATURES=feature1,feautre2,...`.(`featureN` is described like `Feature Name: feature_name` in each section.) + - Protecting non-volatile data in devices from guest OS (e.g. Firmware, MAC address) - - Supported device: Intel I210, Mellanox Technologies MT27800 -- Protecting MilvusVisor itself against DMA attack + - Intel I210 (Feature Name: `i210`) + - Protect EEPROM from writing access + - Mellanox Technologies MT27800 (Feature Name: `mt27800`) + - Protect from firmware update +- Protecting MilvusVisor itself against DMA attack (Feature Name: `smmu`) - Using SMMUv3 Stage 2 Page Translation to protect from DMA attack -- Fast restore: Fast restoring the guest environments without reboot the machine + - Stage 1 translation is available from guest OS +- Fast restore: Fast restoring the guest environments without reboot the machine (Feature Name: `fast_restore`) - Taking a snapshot just before the first boot of the guest OS - Restoring it on rebooting/shutting down the guest OS -- Protecting ACPI Tables from write accesses +- Protecting ACPI Tables from write accesses (Feature Name: `acpi_table_protection`) - For the Fast Restore +- Linked-List Style Memory Allocator (Feature Name: `advanced_memory_manager`) +- Contiguous Bit (Feature Name: `contiguous_bit`) + - Set contiguous bit enabled if available (TLB will be optimized by the contiguous bit) + - Some machine may noe work fine with the contiguous bit +- A64FX specific registers' initialization (Feature Name: `a64fx`) + - Initialize some a64fx specific registers during boot ## Tested machines @@ -31,18 +43,17 @@ We have tested MilvusVisor on the following machines. - FIJITSU FX700 - GIGABYTE E252-P30 -- AML-S805X-AC - QEMU The following table shows which feature worked on which machines. -| Test items \\ Machine | FX700 | E252-P30 | AML | QEMU | -|:------------------------------------------------------------|:-----:|:--------:|:---:|:----:| -| Booting Linux on MilvusVisor (Multi-core) | o | o | o | o | -| Protecting non-volatile data of Intel I210 | o | - | - | - | -| Protecting firmware update of Mellanox Technologies MT27800 | o | - | - | - | -| Protecting MilvusVisor itself against DMA attack | o | - | - | - | -| Fast Restore | o | - | - | - | +| Test items \\ Machine | FX700 | E252-P30 | QEMU | +|:------------------------------------------------------------|:-----:|:--------:|:----:| +| Booting Linux on MilvusVisor (Multi-core) | o | o | o | +| Protecting non-volatile data of Intel I210 | o | - | - | +| Protecting firmware update of Mellanox Technologies MT27800 | o | - | - | +| Protecting MilvusVisor itself against DMA attack | o | - | - | +| Fast Restore | o | - | - | ## How to build the hypervisor @@ -67,7 +78,7 @@ For example, if you want to build the hypervisor only with the device protection make custom_all FEATURES=i210,mt27800 ``` -Next (How to run the hypervisor)[#How to run the hypervisor] +Next [How to run the hypervisor](#how-to-run-the-hypervisor) ### By docker #### Requirements @@ -96,7 +107,7 @@ make QEMU_EFI=/usr/share/qemu-efi/QEMU_EFI.fd run #Please set the path of your Q ### On a physical machine from a USB memory stick #### Requirement - Prepare a USB memory that has an EFI (FAT) partition that has `/EFI/BOOT/` directory. Please confirm that there is no important file in the partition. -- Prepare a physical machine that has ARMv8-A or later, and UEFI firmware. +- Prepare a physical machine that has ARMv8.1-A or later, and UEFI firmware. #### Steps 1. Attach your USB memory stick to the development machine which built the hypervisor binary. diff --git a/src/Makefile b/src/Makefile index 7e5528e..2427730 100644 --- a/src/Makefile +++ b/src/Makefile @@ -15,6 +15,8 @@ MKDIR = mkdir -p QEMU = qemu-system-aarch64 RM = rm -rf CARGO = cargo +export PROJECT_HASH := $(shell git rev-parse HEAD 2> /dev/null) +export RUSTC_VERSION := $(shell rustc --version 2> /dev/null) CARGO_BUILD_OPTION = --release MOUNT = mount UMOUNT = umount diff --git a/src/common/Cargo.toml b/src/common/Cargo.toml index 7173c1a..5ffbedc 100644 --- a/src/common/Cargo.toml +++ b/src/common/Cargo.toml @@ -6,7 +6,8 @@ # http://opensource.org/licenses/mit-license.php [package] name = "common" -version = "0.4.0" +version = "1.0.0" edition = "2021" -[dependencies] +[features] +advanced_memory_manager = [] diff --git a/src/common/src/acpi/madt.rs b/src/common/src/acpi/madt.rs index 9709c44..d6bde98 100644 --- a/src/common/src/acpi/madt.rs +++ b/src/common/src/acpi/madt.rs @@ -16,6 +16,7 @@ const MADT_STRUCT_SIZE: usize = core::mem::size_of::(); const STRUCT_TYPE_GICC: u8 = 0x0B; const STRUCT_TYPE_GICD: u8 = 0x0C; +const STRUCT_TYPE_ITS: u8 = 0x0F; const GICC_FLAGS_ENABLED: u32 = 1; @@ -57,16 +58,17 @@ pub struct GicCpuInterfaceStructure { spe_overflow_interrupt: u16, } -/// MADTのリストから順次GicCpuInterfaceStructureを検出し、MPIDRを返却するIterです -/// -/// このIteratorはMADTのInterrupt Controller Structure配列からGicCpuInterfaceStructureを先頭から順に -/// 取得し、その中にあるMPIDRの値を返します。なお該当MPIDRが有効でない([`GICC_FLAGS_ENABLED`]が立ってない) -/// 場合はスキップします。 +/// The iterator to get MPIDR which is enabled(`GICC_FLAGS_ENABLED` is enabled) pub struct GicCpuInterfaceStructureList { pointer: usize, limit: usize, } +pub struct GicInterruptTranslationServiceStructureList { + pointer: usize, + limit: usize, +} + impl MADT { pub fn get_gic_list(&self) -> GicCpuInterfaceStructureList { let length = self.length as usize - MADT_STRUCT_SIZE; @@ -91,6 +93,16 @@ impl MADT { } return None; } + + pub fn get_gic_its_list(&self) -> GicInterruptTranslationServiceStructureList { + let length = self.length as usize - MADT_STRUCT_SIZE; + let pointer = self as *const _ as usize + MADT_STRUCT_SIZE; + + GicInterruptTranslationServiceStructureList { + pointer, + limit: pointer + length, + } + } } impl Iterator for GicCpuInterfaceStructureList { @@ -118,3 +130,21 @@ impl Iterator for GicCpuInterfaceStructureList { } } } + +impl Iterator for GicInterruptTranslationServiceStructureList { + type Item = usize; + fn next(&mut self) -> Option { + if self.pointer >= self.limit { + return None; + } + let record_base = self.pointer; + let record_type = unsafe { *(record_base as *const u8) }; + let record_length = unsafe { *((record_base + 1) as *const u8) }; + + self.pointer += record_length as usize; + match record_type { + STRUCT_TYPE_ITS => Some(unsafe { *((record_base + 8) as *const u64) } as usize), + _ => self.next(), + } + } +} diff --git a/src/common/src/cpu.rs b/src/common/src/cpu.rs index 1017eba..3ae65df 100644 --- a/src/common/src/cpu.rs +++ b/src/common/src/cpu.rs @@ -16,15 +16,17 @@ use core::arch::asm; #[derive(Clone)] pub struct InterruptFlag(u64); -/* CPU Bit Fields */ +pub const AA64_INSTRUCTION_SIZE: usize = 4; + +/* DAIF */ pub const DAIF_IRQ_BIT: u64 = 7; pub const DAIF_FIQ_BIT: u64 = 6; -/* CNTHCTL_EL2 Register */ +/* CNTHCTL_EL2 */ pub const CNTHCTL_EL2_EL1PCEN: u64 = 1 << 1; pub const CNTHCTL_EL2_EL1PCTEN: u64 = 1 << 0; -/* CPACR_EL1 Register */ +/* CPACR_EL1 */ pub const CPACR_EL1_TTA_BIT_OFFSET: u64 = 28; //pub const CPACR_EL1_TTA: u64 = 1 << CPACR_EL1_TTA_BIT_OFFSET; pub const CPACR_EL1_FPEN_BITS_OFFSET: u64 = 20; @@ -32,7 +34,7 @@ pub const CPACR_EL1_FPEN_BITS_OFFSET: u64 = 20; pub const CPACR_EL1_ZEN_BITS_OFFSET: u64 = 16; //pub const CPACR_EL1_ZEN: u64 = 0b11 << CPACR_EL1_ZEN_BITS_OFFSET; -/* CPTR_EL2 Register */ +/* CPTR_EL2 */ pub const CPTR_EL2_TTA_BIT_OFFSET_WITH_E2H: u64 = 28; pub const CPTR_EL2_TTA_WITH_E2H: u64 = 1 << CPTR_EL2_TTA_BIT_OFFSET_WITH_E2H; pub const CPTR_EL2_TTA_BIT_OFFSET_WITHOUT_E2H: u64 = 20; @@ -45,7 +47,7 @@ pub const CPTR_EL2_ZEN: u64 = 0b11 << CPTR_EL2_ZEN_BITS_OFFSET; pub const CPTR_EL2_ZEN_NO_TRAP: u64 = 0b11 << CPTR_EL2_ZEN_BITS_OFFSET; //pub const CPTR_EL2_RES1: u64 = 0b11111111 | (1 << 9) | (0b11 << 12); -/* TCR_EL2 Register */ +/* TCR_EL2 */ pub const TCR_EL2_DS_BIT_OFFSET_WITHOUT_E2H: u64 = 32; pub const TCR_EL2_DS_WITHOUT_E2H: u64 = 1 << TCR_EL2_DS_BIT_OFFSET_WITHOUT_E2H; pub const TCR_EL2_TCMA_BIT_OFFSET_WITHOUT_E2H: u64 = 30; @@ -69,7 +71,7 @@ pub const TCR_EL2_TG0_WITHOUT_E2H: u64 = 0b11 << TCR_EL2_TG0_BITS_OFFSET_WITHOUT pub const TCR_EL2_T0SZ_BITS_OFFSET_WITHOUT_E2H: u64 = 0; pub const TCR_EL2_T0SZ_WITHOUT_E2H: u64 = 0b111111 << TCR_EL2_T0SZ_BITS_OFFSET_WITHOUT_E2H; -/* TCR_EL1 Register */ +/* TCR_EL1 */ pub const TCR_EL1_DS_BIT_OFFSET: u64 = 59; //pub const TCR_EL1_DS: u64 = 1 << TCR_EL1_DS_BIT_OFFSET; pub const TCR_EL1_TCMA0_BIT_OFFSET: u64 = 57; @@ -90,7 +92,7 @@ pub const TCR_EL1_IPS_BITS_OFFSET: u64 = 32; //pub const TCR_EL1_IPS: u64 = 0b111 << TCR_EL1_IPS_BITS_OFFSET; pub const TCR_EL1_EPD1: u64 = 1 << 23; -/* HCR_EL2 Register */ +/* HCR_EL2 */ pub const HCR_EL2_FIEN: u64 = 1 << 47; pub const HCR_EL2_API: u64 = 1 << 41; pub const HCR_EL2_APK: u64 = 1 << 40; @@ -100,7 +102,7 @@ pub const HCR_EL2_RW: u64 = 1 << 31; pub const HCR_EL2_TSC: u64 = 1 << 19; pub const HCR_EL2_VM: u64 = 1 << 0; -/* VTCR_EL2 Register */ +/* VTCR_EL2 */ pub const VTCR_EL2_SL2_BIT_OFFSET: u64 = 33; pub const VTCR_EL2_SL2: u64 = 1 << VTCR_EL2_SL2_BIT_OFFSET; pub const VTCR_EL2_RES1: u64 = 1 << 31; @@ -108,6 +110,7 @@ pub const VTCR_EL2_HWU_BITS_OFFSET: u64 = 25; pub const VTCR_EL2_PS_BITS_OFFSET: u64 = 16; pub const VTCR_EL2_PS: u64 = 0b111 << VTCR_EL2_PS_BITS_OFFSET; pub const VTCR_EL2_TG0_BITS_OFFSET: u64 = 14; +pub const VTCR_EL2_TG0: u64 = 0b11 << VTCR_EL2_TG0_BITS_OFFSET; pub const VTCR_EL2_SH0_BITS_OFFSET: u64 = 12; pub const VTCR_EL2_ORG0_BITS_OFFSET: u64 = 10; pub const VTCR_EL2_IRG0_BITS_OFFSET: u64 = 8; @@ -116,6 +119,10 @@ pub const VTCR_EL2_SL0: u64 = 0b11 << VTCR_EL2_SL0_BITS_OFFSET; pub const VTCR_EL2_T0SZ_BITS_OFFSET: u64 = 0; pub const VTCR_EL2_T0SZ: u64 = 0b111111 << VTCR_EL2_T0SZ_BITS_OFFSET; +/* SPSR_EL2 */ +pub const SPSR_EL2_M: u64 = 0b1111; +pub const SPSR_EL2_M_EL0T: u64 = 0b0000; + /* ID_AA64PFR0_EL1 */ pub const ID_AA64PFR0_EL1_SVE: u64 = 0b1111 << 32; pub const ID_AA64PFR0_EL1_GIC: u64 = 0b1111 << 24; @@ -126,9 +133,7 @@ pub const ID_AA64MMFR0_EL1_PARANGE: u64 = 0b1111; /* ZCR_EL2 */ pub const MAX_ZCR_EL2_LEN: u64 = 0x1ff; -/// SMC Calling Convention 1.2に沿ったSMCを発行 -/// -/// 指定したレジスタの値をセットした状況でSMC #0を発行します。 +/// Execute SMC #0 with SMC Calling Convention 1.2 pub fn secure_monitor_call( x0: &mut u64, x1: &mut u64, @@ -248,6 +253,25 @@ pub fn set_vtcr_el2(vtcr_el2: u64) { unsafe { asm!("msr vtcr_el2, {:x}", in(reg) vtcr_el2) }; } +#[inline(always)] +pub fn get_hcr_el2() -> u64 { + let hcr_el2: u64; + unsafe { asm!("mrs {:x}, hcr_el2", out(reg) hcr_el2) }; + return hcr_el2; +} + +#[inline(always)] +pub fn set_hcr_el2(hcr_el2: u64) { + unsafe { asm!("msr hcr_el2, {:x}", in(reg) hcr_el2) }; +} + +#[inline(always)] +pub fn get_current_el() -> u64 { + let current_el: u64; + unsafe { asm!("mrs {:x}, currentel", out(reg) current_el) }; + return current_el; +} + #[inline(always)] pub fn set_icc_sgi1r_el1(icc_sgi1r_el1: u64) { unsafe { asm!("msr icc_sgi1r_el1, {:x}", in(reg) icc_sgi1r_el1) }; @@ -342,6 +366,35 @@ pub fn set_mair_el1(mair_el1: u64) { unsafe { asm!("msr mair_el1, {:x}", in(reg) mair_el1) }; } +#[inline(always)] +pub fn get_cnthctl_el2() -> u64 { + let cnthctl_el2: u64; + unsafe { asm!("mrs {:x}, cnthctl_el2", out(reg) cnthctl_el2) }; + return cnthctl_el2; +} + +#[inline(always)] +pub fn set_cnthctl_el2(cnthctl_el2: u64) { + unsafe { asm!("msr cnthctl_el2, {:x}", in(reg) cnthctl_el2) }; +} + +#[inline(always)] +pub fn set_cntvoff_el2(cntvoff_el2: u64) { + unsafe { asm!("msr cntvoff_el2, {:x}", in(reg) cntvoff_el2) }; +} + +#[inline(always)] +pub fn get_cptr_el2() -> u64 { + let cptr_el2: u64; + unsafe { asm!("mrs {:x}, cptr_el2", out(reg) cptr_el2) }; + return cptr_el2; +} + +#[inline(always)] +pub fn set_cptr_el2(cptr_el2: u64) { + unsafe { asm!("msr cptr_el2, {:x}", in(reg) cptr_el2) }; +} + #[inline(always)] pub fn get_cpacr_el1() -> u64 { let cpacr_el1: u64; @@ -354,6 +407,18 @@ pub fn set_cpacr_el1(cpacr_el1: u64) { unsafe { asm!("msr cpacr_el1, {:x}", in(reg) cpacr_el1) }; } +#[inline(always)] +pub fn get_sctlr_el2() -> u64 { + let sctlr_el2: u64; + unsafe { asm!("mrs {:x}, sctlr_el2", out(reg) sctlr_el2) }; + return sctlr_el2; +} + +#[inline(always)] +pub fn set_sctlr_el2(sctlr_el2: u64) { + unsafe { asm!("msr sctlr_el2, {:x}", in(reg) sctlr_el2) }; +} + #[inline(always)] pub fn get_sctlr_el1() -> u64 { let sctlr_el1: u64; @@ -366,6 +431,18 @@ pub fn set_sctlr_el1(sctlr_el1: u64) { unsafe { asm!("msr sctlr_el1, {:x}", in(reg) sctlr_el1) }; } +#[inline(always)] +pub fn get_vbar_el2() -> u64 { + let vbar_el2: u64; + unsafe { asm!("mrs {:x}, vbar_el2", out(reg) vbar_el2) }; + return vbar_el2; +} + +#[inline(always)] +pub fn set_vbar_el2(vbar_el2: u64) { + unsafe { asm!("msr vbar_el2, {:x}", in(reg) vbar_el2) }; +} + #[inline(always)] pub fn get_vbar_el1() -> u64 { let vbar_el1: u64; @@ -378,6 +455,27 @@ pub fn set_vbar_el1(vbar_el1: u64) { unsafe { asm!("msr vbar_el1, {:x}", in(reg) vbar_el1) }; } +#[inline(always)] +pub fn get_esr_el2() -> u64 { + let esr_el2: u64; + unsafe { asm!("mrs {:x}, esr_el2", out(reg) esr_el2) }; + return esr_el2; +} + +#[inline(always)] +pub fn get_far_el2() -> u64 { + let far_el2: u64; + unsafe { asm!("mrs {:x}, far_el2", out(reg) far_el2) }; + return far_el2; +} + +#[inline(always)] +pub fn get_hpfar_el2() -> u64 { + let hpfar_el2: u64; + unsafe { asm!("mrs {:x}, hpfar_el2", out(reg) hpfar_el2) }; + return hpfar_el2; +} + #[inline(always)] pub fn get_spsr_el2() -> u64 { let spsr_el2: u64; @@ -402,6 +500,13 @@ pub fn set_elr_el2(elr_el2: u64) { unsafe { asm!("msr elr_el2, {:x}", in(reg) elr_el2) }; } +#[inline(always)] +pub fn get_sp() -> u64 { + let sp: u64; + unsafe { asm!("mov {:x}, sp", out(reg) sp) }; + return sp; +} + #[inline(always)] pub fn get_sp_el1() -> u64 { let sp_el1: u64; @@ -421,6 +526,13 @@ pub fn get_id_aa64mmfr0_el1() -> u64 { return id_aa64mmfr0_el1; } +#[inline(always)] +pub fn get_id_aa64pfr0_el1() -> u64 { + let id_aa64pfr0_el1: u64; + unsafe { asm!("mrs {:x}, id_aa64pfr0_el1", out(reg) id_aa64pfr0_el1) }; + return id_aa64pfr0_el1; +} + #[inline(always)] pub fn get_mpidr_el1() -> u64 { let mpidr_el1: u64; @@ -428,6 +540,11 @@ pub fn get_mpidr_el1() -> u64 { return mpidr_el1; } +#[inline(always)] +pub fn advance_elr_el2() { + set_elr_el2(get_elr_el2() + AA64_INSTRUCTION_SIZE as u64); +} + #[inline(always)] pub fn flush_tlb_el2() { unsafe { @@ -454,6 +571,16 @@ pub fn flush_tlb_el1() { }; } +#[inline(always)] +pub fn dsb() { + unsafe { asm!("dsb sy") } +} + +#[inline(always)] +pub fn isb() { + unsafe { asm!("isb") } +} + #[inline(always)] pub fn flush_tlb_ipa_is(address: u64) { unsafe { asm!("TLBI IPAS2E1IS, {:x}", in(reg) address) }; @@ -474,44 +601,40 @@ pub fn send_event_all() { unsafe { asm!("SEV") }; } -/// 現時点の割り込み状況を保存し、IRQ/FIQを禁止 +/// Save current interrupt status and disable IRQ/FIQ /// -/// # Return Value -/// 保存された割り込み状況、 [`local_irq_fiq_restore`]の引数として使用 +/// # Result +/// Saved interrupt status, should be used as the argument of [`local_irq_fiq_restore`] pub fn local_irq_fiq_save() -> InterruptFlag { let mut daif: u64; unsafe { asm!("mrs {:x}, DAIF", out(reg) daif) }; let flag = InterruptFlag(daif); daif |= (1 << DAIF_IRQ_BIT) | (1 << DAIF_FIQ_BIT); - unsafe { - asm!(" dsb ish - isb - msr DAIF, {:x}", in(reg) daif) - }; + dsb(); + isb(); + unsafe { asm!("msr DAIF, {:x}", in(reg) daif) }; flag } -/// 割り込み状況を復元 +/// Restore interrupt /// /// # Arguments -/// * f: 保存された割り込み状況、 [`local_irq_fiq_save`]の戻り値 +/// * f - InterruptFlag, the returned value of [`local_irq_fiq_restore`] pub fn local_irq_fiq_restore(f: InterruptFlag) { - unsafe { - asm!(" dsb ish - isb - msr DAIF, {:x}", in(reg) f.0) - }; + dsb(); + isb(); + unsafe { asm!("msr DAIF, {:x}", in(reg) f.0) }; } -/// 渡された仮想アドレスをEL2での仮想アドレスと解釈し、物理アドレスに変換 +/// Convert virtual address of EL2 for read access to physical address /// -/// AT S1E2Rを使用して、物理アドレスに変換します。マップされてない場合などはErrを返します。 +/// This function uses AT S1E2R instruction. /// /// # Arguments -/// virtual_address: 変換を行う仮想アドレス +/// * virtual_address - the virtual address to convert /// -/// # Return Value -/// 変換に成功した場合はOk(physical_address)、失敗した場合はErr(()) +/// # Result +/// If succeeded, returns Ok(physical_address), otherwise(the address is not accessible) returns Err(()) pub fn convert_virtual_address_to_physical_address_el2_read( virtual_address: usize, ) -> Result { @@ -532,15 +655,15 @@ pub fn convert_virtual_address_to_physical_address_el2_read( } } -/// 渡された仮想アドレスをEL2での仮想アドレスと解釈し、物理アドレスに変換 +/// Convert virtual address of EL2 for write access to physical address /// -/// AT S1E2Wを使用して、物理アドレスに変換します。マップされてない場合などはErrを返します。 +/// This function uses AT S1E2W instruction. /// /// # Arguments -/// virtual_address: 変換を行う仮想アドレス +/// * virtual_address - the virtual address to convert /// -/// # Return Value -/// 変換に成功した場合はOk(physical_address)、失敗した場合はErr(()) +/// # Result +/// If succeeded, returns Ok(physical_address), otherwise(the address is not accessible) returns Err(()) pub fn convert_virtual_address_to_physical_address_el2_write( virtual_address: usize, ) -> Result { @@ -561,15 +684,46 @@ pub fn convert_virtual_address_to_physical_address_el2_write( } } -/// 渡された仮想アドレスをEL1での仮想アドレスと解釈し、中間物理アドレス(IPA)に変換 +/// Convert virtual address of EL0 for read access to intermediate physical address +/// +/// This function uses AT S1E0R instruction. +/// +/// # Arguments +/// * virtual_address - **the virtual address of EL0** to convert +/// +/// # Result +/// If succeeded, returns Ok(intermediate_physical_address), +/// otherwise(the address is not accessible) returns Err(()) +pub fn convert_virtual_address_to_intermediate_physical_address_el0_read( + virtual_address: usize, +) -> Result { + let aligned_virtual_address = virtual_address & PAGE_MASK; + let offset = virtual_address & !PAGE_MASK; + let aligned_physical_address: usize; + unsafe { + asm!(" at S1E0R, {:x} + mrs {:x}, par_el1", + in(reg) (aligned_virtual_address), + out(reg) aligned_physical_address) + }; + + if (aligned_physical_address & 1) == 0 { + Ok((aligned_physical_address & bitmask!(51, PAGE_SHIFT)) + offset) + } else { + Err(()) + } +} + +/// Convert virtual address of EL1 for read access to intermediate physical address /// -/// AT S1E1Rを使用して、中間物理アドレスに変換します。マップされてない場合などはErrを返します。 +/// This function uses AT S1E1R instruction. /// /// # Arguments -/// virtual_address: 変換を行う仮想アドレス +/// * virtual_address - **the virtual address of EL1** to convert /// -/// # Return Value -/// 変換に成功した場合はOk(physical_address)、失敗した場合はErr(()) +/// # Result +/// If succeeded, returns Ok(intermediate_physical_address), +/// otherwise(the address is not accessible) returns Err(()) pub fn convert_virtual_address_to_intermediate_physical_address_el1_read( virtual_address: usize, ) -> Result { @@ -590,15 +744,16 @@ pub fn convert_virtual_address_to_intermediate_physical_address_el1_read( } } -/// 渡された仮想アドレスをEL1での仮想アドレスと解釈し、中間物理アドレス(IPA)に変換 +/// Convert virtual address of EL1 for write access to intermediate physical address /// -/// AT S1E1Wを使用して、中間物理アドレスに変換します。マップされてない場合などはErrを返します。 +/// This function uses AT S1E1W instruction. /// /// # Arguments -/// virtual_address: 変換を行う仮想アドレス +/// * `virtual_address` - **the virtual address of EL1** to convert /// -/// # Return Value -/// 変換に成功した場合はOk(physical_address)、失敗した場合はErr(()) +/// # Result +/// If succeeded, returns Ok(intermediate_physical_address), +/// otherwise(the address is not accessible) returns Err(()) pub fn convert_virtual_address_to_intermediate_physical_address_el1_write( virtual_address: usize, ) -> Result { @@ -621,8 +776,8 @@ pub fn convert_virtual_address_to_intermediate_physical_address_el1_write( /// Halt Loop /// -/// CPUを待機状態にさせ停止させる -/// マルチコア制御には未対応 +/// Stop the cpu. +/// This function does not support to stop all cpus. pub fn halt_loop() -> ! { loop { unsafe { asm!("wfi") }; diff --git a/src/common/src/lib.rs b/src/common/src/lib.rs index b2069d3..5df310e 100644 --- a/src/common/src/lib.rs +++ b/src/common/src/lib.rs @@ -9,21 +9,36 @@ pub mod acpi; pub mod cpu; +#[cfg(feature = "advanced_memory_manager")] +pub mod memory_allocator; pub mod paging; pub mod serial_port; pub mod smmu; +pub mod spin_flag; +#[cfg(not(feature = "advanced_memory_manager"))] +pub mod stack_memory_allocator; -use crate::serial_port::SerialPortInfo; +#[cfg(feature = "advanced_memory_manager")] +pub use memory_allocator::MemoryAllocator; + +#[cfg(not(feature = "advanced_memory_manager"))] +pub use stack_memory_allocator::MemoryAllocator; -use core::mem::MaybeUninit; +use crate::serial_port::SerialPortInfo; -/// 読み込むハイパーバイザー本体の位置 +/// The name of this hypervisor +pub const HYPERVISOR_NAME: &'static str = "MilvusVisor"; +/// The hash value of VCS from the environment variable +pub const HYPERVISOR_HASH_INFO: Option<&'static str> = if let Some(s) = option_env!("PROJECT_HASH") && s.len() != 0 {Some(s)}else{None}; +/// The compiler information from the environment variables +pub const COMPILER_INFO: Option<&'static str> =if let Some(s) = option_env!("RUSTC_VERSION") && s.len() != 0 {Some(s)}else{None}; +/// The path of hypervisor_kernel pub const HYPERVISOR_PATH: &'static str = "\\EFI\\BOOT\\hypervisor_kernel"; -/// ハイパーバイザーをマップするアドレス +/// The virtual address to map hypervisor_kernel (same as hypervisor_kernel/config/linkerscript.ld) pub const HYPERVISOR_VIRTUAL_BASE_ADDRESS: usize = 0x7FC0000000; -/// シリアルポートのI/Oアドレスをマップするアドレス +/// The virtual address of serial port MMIO pub const HYPERVISOR_SERIAL_BASE_ADDRESS: usize = 0x7FD0000000; -/// 起動時に確保するメモリ量 +/// The memory size to allocate pub const ALLOC_SIZE: usize = 256 * 1024 * 1024; /* 256 MB */ pub const MAX_PHYSICAL_ADDRESS: usize = (1 << (52 + 1)) - 1; /* Armv8.2-A */ //pub const MAX_PHYSICAL_ADDRESS: usize = (1 << (48 + 1)) - 1;/* Armv8.0 */ @@ -33,7 +48,7 @@ pub const PAGE_MASK: usize = !(PAGE_SIZE - 1); pub const STAGE_2_PAGE_SHIFT: usize = 12; pub const STAGE_2_PAGE_SIZE: usize = 1 << STAGE_2_PAGE_SHIFT; pub const STAGE_2_PAGE_MASK: usize = !(STAGE_2_PAGE_SIZE - 1); -/// 各CPUに割り当てるスタックのページ数 +/// The stack pages to assing each cpu. /// STACK_SIZE = STACK_PAGES << PAGE_SHIFT = STACK_PAGES * PAGE_SIZE pub const STACK_PAGES: usize = 16; @@ -58,6 +73,15 @@ pub struct MemorySaveListEntry { pub saved_address: usize, pub num_of_pages: u64, } + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum MemoryAllocationError { + AddressNotAvailable, + InvalidSize, + InvalidAddress, + EntryPoolRunOut, +} + /// if [`MemorySaveListEntry::saved_address`] was this value, it indicates the entry is ondemand save area. pub const MEMORY_SAVE_ADDRESS_ONDEMAND_FLAG: usize = usize::MAX; @@ -65,7 +89,10 @@ pub const MEMORY_SAVE_ADDRESS_ONDEMAND_FLAG: usize = usize::MAX; pub struct SystemInformation { pub vbar_el2: u64, pub acpi_rsdp_address: Option, - pub memory_pool: &'static ([MaybeUninit; ALLOC_SIZE / PAGE_SIZE], usize), + pub available_memory_info: ( + usize, /* base_address */ + usize, /* number of pages */ + ), pub memory_save_list: *mut [MemorySaveListEntry], pub serial_port: Option, pub ecam_info: Option, diff --git a/src/common/src/memory_allocator.rs b/src/common/src/memory_allocator.rs new file mode 100644 index 0000000..fbf8815 --- /dev/null +++ b/src/common/src/memory_allocator.rs @@ -0,0 +1,656 @@ +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) +// All rights reserved. +// +// This software is released under the MIT License. +// http://opensource.org/licenses/mit-license.php + +//! +//! Memory Allocator +//! + +use crate::{MemoryAllocationError, PAGE_SHIFT}; + +/// Advanced Memory Allocator +/// +/// The memory allocator which can allocate/free memory with alignment. +/// +/// If you modified the member, please adjust [`Self::init`] +pub struct MemoryAllocator { + free_memory_size: usize, + first_entry: *mut MemoryEntry, + free_list: [Option<*mut MemoryEntry>; Self::NUM_OF_FREE_LIST], + memory_entry_pool: [MemoryEntry; Self::NUM_OF_POOL_ENTRIES], +} + +struct MemoryEntry { + /* Contains free memory area */ + previous: Option<*mut Self>, + next: Option<*mut Self>, + list_prev: Option<*mut Self>, + list_next: Option<*mut Self>, + start: usize, + end: usize, + enabled: bool, +} + +/// ATTENTION: free_list's Iter(not normal next) +struct FreeListIterMut { + entry: Option<*mut MemoryEntry>, +} + +impl MemoryAllocator { + const NUM_OF_POOL_ENTRIES: usize = 64; + const NUM_OF_FREE_LIST: usize = 12; + + /// Setup myself with with allocated address + /// + /// All members of Self are uninitialized. + /// Please be careful when you assign some value into the member which has a drop trait. + /// (Use `core::mem::forget(core::mem::replace(&mut self.member, new_value))`) + /// This function is used to reduce the stack usage. + /// (`aarch64-unknown-uefi` does not have __chkstk) + /// + /// # Panics + /// If [`Self::free`] is failed, this function panics. + /// + /// # Arguments + /// * `self` - mutable reference of Self, it may be uninitialized + /// * `allocated_address` - the base address allocated + /// * `allocated_size` - the allocated size + pub fn init(&mut self, allocated_address: usize, allocated_size: usize) { + use core::mem::{forget, replace}; + + /* Initialize members */ + self.free_memory_size = 0; + self.first_entry = core::ptr::null_mut(); + forget(replace(&mut self.free_list, [None; Self::NUM_OF_FREE_LIST])); + + for e in &mut self.memory_entry_pool { + forget(replace( + e, + MemoryEntry { + previous: None, + next: None, + list_prev: None, + list_next: None, + start: 0, + end: 0, + enabled: false, + }, + )); + } + + self.free(allocated_address, allocated_size) + .expect("Failed to init memory"); + } + + /* + /// Setup MemoryAllocator with allocated address, and return Self + pub fn create(allocated_address: usize, allocated_size: usize) -> Self { + use core::mem::MaybeUninit; + + let mut pool: [MaybeUninit; Self::NUM_OF_POOL_ENTRIES] = + MaybeUninit::uninit_array(); + for e in &mut pool { + e.write(MemoryEntry { + previous: None, + next: None, + list_prev: None, + list_next: None, + start: 0, + end: 0, + enabled: false, + }); + } + + let mut s = Self { + free_memory_size: 0, + first_entry: core::ptr::null_mut(), + free_list: [None; Self::NUM_OF_FREE_LIST], + memory_entry_pool: unsafe { MaybeUninit::array_assume_init(pool) }, + }; + s.free(allocated_address, allocated_size) + .expect("Failed to init memory"); + + return s; + } + */ + + fn create_memory_entry(&mut self) -> Result<&'static mut MemoryEntry, MemoryAllocationError> { + for e in &mut self.memory_entry_pool { + if !e.is_enabled() { + e.set_enabled(); + e.init(); + return Ok(unsafe { &mut *(e as *mut _ as usize as *mut MemoryEntry) }); + } + } + Err(MemoryAllocationError::EntryPoolRunOut) + } + + fn search_entry_containing_address_mut( + &mut self, + address: usize, + ) -> Option<&'static mut MemoryEntry> { + let mut entry = unsafe { &mut *self.first_entry }; + while entry.get_start_address() < address && entry.get_end_address() < address { + if let Some(t) = entry.get_next_entry() { + entry = t; + } else { + return None; + } + } + if address >= entry.get_start_address() && address <= entry.get_end_address() { + return Some(entry); + } + None + } + + fn search_entry_previous_address_mut( + &mut self, + address: usize, + ) -> Option<&'static mut MemoryEntry> { + let mut entry = unsafe { &mut *self.first_entry }; + while entry.get_start_address() < address { + if let Some(t) = entry.get_next_entry() { + entry = t; + } else { + return if entry.get_end_address() <= address { + Some(entry) + } else { + entry.get_prev_entry() + }; + } + } + entry.get_prev_entry() + } + + fn define_used_memory( + &mut self, + start_address: usize, + size: usize, + align_order: usize, + target_entry: &mut Option<&mut MemoryEntry>, + ) -> Result<(), MemoryAllocationError> { + if size == 0 || self.free_memory_size < size { + return Err(MemoryAllocationError::InvalidSize); + } + if align_order != 0 { + let (aligned_start_address, aligned_size) = + Self::align_address_and_size(start_address, size, align_order); + return self.define_used_memory(aligned_start_address, aligned_size, 0, target_entry); + } + let entry = if let Some(t) = target_entry { + assert!(t.get_start_address() <= start_address); + assert!(t.get_end_address() >= Self::size_to_end_address(size, start_address)); + t + } else if let Some(t) = self.search_entry_containing_address_mut(start_address) { + t + } else { + return Err(MemoryAllocationError::InvalidAddress); + }; + + if entry.get_start_address() == start_address { + if entry.get_end_address() == Self::size_to_end_address(size, start_address) { + /* Delete the entry */ + if entry.is_first_entry() { + if let Some(next) = entry.get_next_entry() { + self.first_entry = next as *mut _; + } else { + return Err(MemoryAllocationError::AddressNotAvailable); + } + } + self.unchain_entry_from_free_list(entry); + entry.delete(); + if target_entry.is_some() { + *target_entry = None; + } + } else { + let old_size = entry.get_size(); + entry.set_range(start_address + size, entry.get_end_address()); + self.chain_entry_to_free_list(entry, Some(old_size)); + } + } else if entry.get_end_address() == start_address { + if size != 1 { + return Err(MemoryAllocationError::InvalidAddress); + } + /* Allocate 1 byte of end_address */ + entry.set_range(entry.get_start_address(), start_address - 1); + self.chain_entry_to_free_list(entry, Some(entry.get_size() + size)); + } else if entry.get_end_address() == Self::size_to_end_address(size, start_address) { + let old_size = entry.get_size(); + entry.set_range(entry.get_start_address(), start_address - 1); + self.chain_entry_to_free_list(entry, Some(old_size)); + } else { + let new_entry = self.create_memory_entry()?; + let old_size = entry.get_size(); + new_entry.set_range(start_address + size, entry.get_end_address()); + entry.set_range(entry.get_start_address(), start_address - 1); + if let Some(next) = entry.get_next_entry() { + new_entry.chain_after_me(next); + } + entry.chain_after_me(new_entry); + self.chain_entry_to_free_list(entry, Some(old_size)); + self.chain_entry_to_free_list(new_entry, None); + } + self.free_memory_size -= size; + return Ok(()); + } + + fn define_free_memory( + &mut self, + start_address: usize, + size: usize, + ) -> Result<(), MemoryAllocationError> { + if size == 0 { + return Err(MemoryAllocationError::InvalidSize); + } + let entry = self + .search_entry_previous_address_mut(start_address) + .unwrap_or(unsafe { &mut *self.first_entry }); + let end_address = Self::size_to_end_address(size, start_address); + + if entry.get_start_address() <= start_address && entry.get_end_address() >= end_address { + /* already freed */ + return Err(MemoryAllocationError::InvalidAddress); + } else if entry.get_end_address() >= start_address && !entry.is_first_entry() { + /* Free duplicated area */ + return self.define_free_memory( + entry.get_end_address() + 1, + Self::size_from_address(entry.get_end_address() + 1, end_address), + ); + } else if entry.get_end_address() == end_address { + /* Free duplicated area */ + /* entry may be first entry */ + return self.define_free_memory(start_address, size - entry.get_size()); + } + + let mut processed = false; + let old_size = entry.get_size(); + let address_after_entry = entry.get_end_address() + 1; + + if address_after_entry == start_address { + entry.set_range(entry.get_start_address(), end_address); + processed = true; + } + + if entry.is_first_entry() && entry.get_start_address() == end_address + 1 { + entry.set_range(start_address, entry.get_end_address()); + processed = true; + } + + if let Some(next) = entry.get_next_entry() { + if next.get_start_address() <= start_address { + assert!(!processed); + return if next.get_end_address() >= end_address { + Err(MemoryAllocationError::InvalidAddress) /* already freed */ + } else { + self.define_free_memory( + next.get_end_address() + 1, + end_address - next.get_end_address(), + ) + }; + } + if next.get_start_address() == end_address + 1 { + let next_old_size = next.get_size(); + next.set_range(start_address, next.get_end_address()); + self.chain_entry_to_free_list(next, Some(next_old_size)); + processed = true; + } + + if (next.get_start_address() == entry.get_end_address() + 1) + || (processed && address_after_entry >= next.get_start_address()) + { + entry.set_range( + entry.get_start_address(), + entry.get_end_address().max(next.get_end_address()), + ); + + self.unchain_entry_from_free_list(next); + next.delete(); + } + if processed { + self.free_memory_size += size; + self.chain_entry_to_free_list(entry, Some(old_size)); + return Ok(()); + } + let new_entry = self.create_memory_entry()?; + new_entry.set_range(start_address, end_address); + if new_entry.get_end_address() < entry.get_start_address() { + if let Some(prev_entry) = entry.get_prev_entry() { + assert!(prev_entry.get_end_address() < new_entry.get_start_address()); + prev_entry.chain_after_me(new_entry); + new_entry.chain_after_me(entry); + } else { + self.first_entry = new_entry as *mut _; + new_entry.chain_after_me(entry); + } + } else { + next.set_prev_entry(new_entry); + new_entry.set_next_entry(next); + entry.chain_after_me(new_entry); + } + self.free_memory_size += size; + self.chain_entry_to_free_list(entry, Some(old_size)); + self.chain_entry_to_free_list(new_entry, None); + Ok(()) + } else { + if processed { + self.free_memory_size += size; + self.chain_entry_to_free_list(entry, Some(old_size)); + return Ok(()); + } + let new_entry = self.create_memory_entry()?; + new_entry.set_range(start_address, end_address); + if entry.get_end_address() < new_entry.get_start_address() { + entry.chain_after_me(new_entry); + } else { + if let Some(prev_entry) = entry.get_prev_entry() { + assert!(prev_entry.get_end_address() < entry.get_start_address()); + prev_entry.chain_after_me(new_entry); + } else { + self.first_entry = new_entry as *mut _; + } + new_entry.chain_after_me(entry); + } + self.free_memory_size += size; + self.chain_entry_to_free_list(entry, Some(old_size)); + self.chain_entry_to_free_list(new_entry, None); + Ok(()) + } + } + + pub fn allocate( + &mut self, + size: usize, + align_order: usize, + ) -> Result { + if size == 0 || self.free_memory_size <= size { + return Err(MemoryAllocationError::InvalidSize); + } + let page_order = Self::size_to_page_order(size); + for i in page_order..Self::NUM_OF_FREE_LIST { + let first_entry = if let Some(t) = self.free_list[i] { + unsafe { &mut *t } + } else { + continue; + }; + + for entry in first_entry.list_iter_mut() { + if entry.get_size() >= size { + let address_to_allocate = if align_order != 0 { + let (aligned_address, aligned_available_size) = + Self::align_address_and_available_size( + entry.get_start_address(), + entry.get_size(), + align_order, + ); + if aligned_available_size < size { + continue; + } + aligned_address + } else { + entry.get_start_address() + }; + self.define_used_memory(address_to_allocate, size, 0, &mut Some(entry))?; + return Ok(address_to_allocate); + } + } + } + Err(MemoryAllocationError::AddressNotAvailable) + } + + pub fn free(&mut self, start_address: usize, size: usize) -> Result<(), MemoryAllocationError> { + if self.free_memory_size == 0 { + let first_entry = self.create_memory_entry()?; + + first_entry.init(); + first_entry.set_range( + start_address, + Self::size_to_end_address(size, start_address), + ); + first_entry.set_enabled(); + self.chain_entry_to_free_list(first_entry, None); + self.first_entry = first_entry; + self.free_memory_size = size; + } else { + self.define_free_memory(start_address, size)?; + } + return Ok(()); + } + + fn unchain_entry_from_free_list(&mut self, entry: &mut MemoryEntry) { + let order = Self::size_to_page_order(entry.get_size()); + if self.free_list[order] == Some(entry as *mut _) { + self.free_list[order] = entry.list_next; + } + entry.unchain_from_freelist(); + } + + fn chain_entry_to_free_list(&mut self, entry: &mut MemoryEntry, old_size: Option) { + let new_order = Self::size_to_page_order(entry.get_size()); + if let Some(old_size) = old_size { + if old_size == entry.get_size() { + return; + } + let old_order = Self::size_to_page_order(old_size); + if self.free_list[old_order] == Some(entry as *mut _) { + self.free_list[old_order] = entry.list_next; + } + entry.unchain_from_freelist(); + } + assert_eq!(entry.list_next, None); + assert_eq!(entry.list_prev, None); + + if self.free_list[new_order].is_none() { + self.free_list[new_order] = Some(entry as *mut _); + } else { + let mut list_entry: &mut MemoryEntry = + unsafe { &mut *self.free_list[new_order].unwrap() }; + if list_entry.get_size() >= entry.get_size() { + list_entry.list_prev = Some(entry as *mut _); + entry.list_next = Some(list_entry as *mut _); + self.free_list[new_order] = Some(entry as *mut _); + } else { + loop { + if let Some(next_entry) = + list_entry.list_next.and_then(|n| Some(unsafe { &mut *n })) + { + if next_entry.get_size() >= entry.get_size() { + list_entry.list_next = Some(entry as *mut _); + entry.list_prev = Some(list_entry as *mut _); + entry.list_next = Some(next_entry as *mut _); + next_entry.list_prev = Some(entry as *mut _); + break; + } + list_entry = next_entry; + } else { + list_entry.list_next = Some(entry as *mut _); + entry.list_prev = Some(list_entry as *mut _); + break; + } + } + } + } + } + + #[inline] + const fn size_to_page_order(size: usize) -> usize { + let mut order = 0; + while size > (1 << (order + PAGE_SHIFT)) { + order += 1; + if order == Self::NUM_OF_FREE_LIST - 1 { + return order; + } + } + order + } + + #[inline] + const fn align_address_and_size( + address: usize, + size: usize, + align_order: usize, + ) -> (usize /* address */, usize /* size */) { + let align_size = 1 << align_order; + let mask = !(align_size - 1); + let aligned_address = address & mask; + let aligned_size = ((size + (address - aligned_address) - 1) & mask) + align_size; + (aligned_address, aligned_size) + } + + #[inline] + const fn align_address_and_available_size( + start_address: usize, + size: usize, + align_order: usize, + ) -> (usize /* address */, usize /* size */) { + if start_address == 0 { + return (0, size); + } + let align_size = 1 << align_order; + let mask = !(align_size - 1); + let aligned_address = ((start_address - 1) & mask) + align_size; + assert!(aligned_address >= start_address); + if size > (aligned_address - start_address) { + (aligned_address, size - (aligned_address - start_address)) + } else { + (aligned_address, 0) + } + } + + const fn size_to_end_address(size: usize, start_address: usize) -> usize { + start_address + size - 1 + } + + const fn size_from_address(start_address: usize, end_address: usize) -> usize { + assert!(start_address <= end_address); + end_address - start_address + 1 + } +} + +impl MemoryEntry { + pub fn init(&mut self) { + self.previous = None; + self.next = None; + self.list_prev = None; + self.list_next = None; + } + + pub fn delete(&mut self) { + if let Some(previous) = self.get_prev_entry() { + if let Some(next) = self.get_next_entry() { + previous.chain_after_me(next); + } else { + previous.unset_next_entry(); + } + } else if let Some(next) = self.get_next_entry() { + next.unset_prev_entry(); + } + self.previous = None; + self.next = None; + self.set_disabled(); + } + + pub fn set_enabled(&mut self) { + self.enabled = true; + } + + pub fn set_disabled(&mut self) { + self.enabled = false; + } + + pub fn is_enabled(&self) -> bool { + self.enabled + } + + pub fn set_range(&mut self, start: usize, end: usize) { + assert!(start < end); + self.start = start; + self.end = end; + } + + pub fn get_start_address(&self) -> usize { + self.start + } + + pub fn get_end_address(&self) -> usize { + self.end + } + + pub fn get_prev_entry(&self) -> Option<&'static mut Self> { + if let Some(previous) = self.previous { + unsafe { Some(&mut *previous) } + } else { + None + } + } + + pub fn set_prev_entry(&mut self, prev: &mut Self) { + self.previous = Some(prev as *mut _); + } + + pub fn unset_prev_entry(&mut self) { + self.previous = None; + } + + pub fn get_next_entry(&self) -> Option<&'static mut Self> { + if let Some(next) = self.next { + unsafe { Some(&mut *next) } + } else { + None + } + } + + pub fn set_next_entry(&mut self, next: &mut Self) { + self.next = Some(next as *mut _); + } + + pub fn unset_next_entry(&mut self) { + self.next = None; + } + + pub fn get_size(&self) -> usize { + MemoryAllocator::size_from_address(self.start, self.end) + } + + pub fn chain_after_me(&mut self, entry: &mut Self) { + self.next = Some(entry as *mut _); + entry.previous = Some(self as *mut _); + } + + pub fn is_first_entry(&self) -> bool { + self.previous == None + } + + pub fn unchain_from_freelist(&mut self) { + if let Some(prev_address) = self.list_prev { + let prev_entry = unsafe { &mut *prev_address }; + prev_entry.list_next = self.list_next; + } + if let Some(next_address) = self.list_next { + let next_entry = unsafe { &mut *next_address }; + next_entry.list_prev = self.list_prev; + } + self.list_next = None; + self.list_prev = None; + } + + pub fn list_iter_mut(&mut self) -> FreeListIterMut { + FreeListIterMut { + entry: Some(self as *mut _), + } + } +} + +impl Iterator for FreeListIterMut { + type Item = &'static mut MemoryEntry; + fn next(&mut self) -> Option { + if let Some(address) = self.entry { + let entry = unsafe { &mut *(address as *mut MemoryEntry) }; + self.entry = entry.list_next; /* ATTENTION: get **free_list's** next */ + Some(entry) + } else { + None + } + } +} diff --git a/src/common/src/paging.rs b/src/common/src/paging.rs index f9f131e..4b9f82a 100644 --- a/src/common/src/paging.rs +++ b/src/common/src/paging.rs @@ -9,12 +9,12 @@ //! Paging //! -use crate::bitmask; use crate::cpu::{ get_mair_el2, TCR_EL2_DS_BIT_OFFSET_WITHOUT_E2H, TCR_EL2_DS_WITHOUT_E2H, TCR_EL2_T0SZ_BITS_OFFSET_WITHOUT_E2H, TCR_EL2_T0SZ_WITHOUT_E2H, TCR_EL2_TG0_BITS_OFFSET_WITHOUT_E2H, TCR_EL2_TG0_WITHOUT_E2H, }; +use crate::{bitmask, PAGE_MASK, PAGE_SIZE, STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE}; pub const PAGE_TABLE_SIZE: usize = 0x1000; @@ -127,7 +127,13 @@ pub const fn table_level_to_table_shift( translation_granule_shift + 9 * (3 - table_level) as usize } -/// 現時点ではTTBR0_EL2のみ対応 +/// Get first level of page table of TTBR_EL2 +/// +/// # Arguments +/// * `tcr_el2` - the value to calculate first level +/// +/// # Result +/// Returns (first level of page table, the left-shift value of first level table's granule) pub const fn get_initial_page_table_level_and_bits_to_shift(tcr_el2: u64) -> (i8, usize) { let tcr_el2_ds = ((tcr_el2 & TCR_EL2_DS_WITHOUT_E2H) >> TCR_EL2_DS_BIT_OFFSET_WITHOUT_E2H) as u8; @@ -173,3 +179,15 @@ pub const fn calculate_number_of_concatenated_page_tables( 2u8.pow(((43 - ((3 - initial_lookup_level) as u8) * 9) - t0sz) as u32) } } + +pub const fn page_align_up(size: usize) -> usize { + //assert_ne!(size, 0); + assert!(size != 0); + ((size - 1) & PAGE_MASK) + PAGE_SIZE +} + +pub const fn stage2_page_align_up(size: usize) -> usize { + //assert_ne!(size, 0); + assert!(size != 0); + ((size - 1) & STAGE_2_PAGE_MASK) + STAGE_2_PAGE_SIZE +} diff --git a/src/common/src/smmu.rs b/src/common/src/smmu.rs index 5e71a3e..1abac1c 100644 --- a/src/common/src/smmu.rs +++ b/src/common/src/smmu.rs @@ -1,4 +1,5 @@ // Copyright (c) 2022 RIKEN +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) // All rights reserved. // // This software is released under the MIT License. @@ -23,10 +24,52 @@ pub const SMMU_IDR5: usize = 0x14; pub const SMMU_CR0: usize = 0x20; pub const SMMU_CR0ACK: usize = 0x24; pub const SMMU_CR1: usize = 0x28; +pub const SMMU_CR2: usize = 0x2C; pub const SMMU_GBPA: usize = 0x44; +pub const SMMU_AGBPA: usize = 0x48; +pub const SMMU_IRQ_CTRL: usize = 0x50; +pub const SMMU_IRQ_CTRLACK: usize = 0x54; +pub const SMMU_GERRORN: usize = 0x64; +pub const SMMU_GERROR_IRQ_CFG0: usize = 0x68; +pub const SMMU_GERROR_IRQ_CFG1: usize = 0x70; +pub const SMMU_GERROR_IRQ_CFG2: usize = 0x74; pub const SMMU_STRTAB_BASE: usize = 0x80; +pub const SMMU_STRTAB_BASE_HIGH: usize = 0x84; pub const SMMU_STRTAB_BASE_CFG: usize = 0x88; - +pub const SMMU_CMDQ_BASE: usize = 0x90; +pub const SMMU_CMDQ_PROD: usize = 0x98; +pub const SMMU_CMDQ_CONS: usize = 0x9C; +pub const SMMU_EVENTQ_BASE: usize = 0xA0; +pub const SMMU_EVENTQ_PROD_ALIAS: usize = 0xA8; +pub const SMMU_EVENTQ_COS_ALIAS: usize = 0xAC; +pub const SMMU_EVENTQ_IRQ_CFG0: usize = 0xB0; +pub const SMMU_EVENTQ_IRQ_CFG1: usize = 0xB8; +pub const SMMU_EVENTQ_IRQ_CFG2: usize = 0xBC; +pub const SMMU_PRIQ_BASE: usize = 0xC0; +pub const SMMU_PRIQ_IRQ_CFG0: usize = 0xD0; +pub const SMMU_PRIQ_IRQ_CFG1: usize = 0xD8; +pub const SMMU_PRIQ_IRQ_CFG2: usize = 0xDC; +pub const SMMU_GATOS_CTRL: usize = 0x100; +pub const SMMU_GATOS_SID: usize = 0x0108; +pub const SMMU_GATOS_ADDR: usize = 0x0110; +pub const SMMU_GATOS_PAR: usize = 0x0118; +pub const SMMU_GMPAM: usize = 0x0138; +pub const SMMU_GBPMPAM: usize = 0x013C; +pub const SMMU_VATOS_SEL: usize = 0x0180; + +/* +pub const SMMU_VATOS_SEL: usize = 0x0180; +pub const SMMU_VATOS_SID: usize = 0x0A08; +pub const SMMU_VATOS_ADDR: usize = 0x0A10; +pub const SMMU_VATOS_PAR: usize = 0x0A18; +*/ + +pub const SMMU_CMDQ_CONTROL_PAGE_BASE: usize = 0x4000; +pub const SMMU_CMDQ_CONTROL_PAGE_BASE_END: usize = 0x4000 + 32 * 255; + +pub const SMMU_IDR0_VATOS: u32 = 1 << 20; +pub const SMMU_IDR0_CD2L: u32 = 1 << 19; +pub const SMMU_IDR0_VMID16: u32 = 1 << 18; pub const SMMU_IDR0_HYP: u32 = 1 << 9; pub const SMMU_IDR0_S1P: u32 = 1 << 1; pub const SMMU_IDR0_S2P: u32 = 1 << 0; @@ -37,22 +80,58 @@ pub const SMMU_IDR0_ST_LEVEL: u32 = 0b11 << SMMU_IDR0_ST_LEVEL_BITS_OFFSET; pub const SMMU_IDR5_GRAN4K: u32 = 1 << 4; -pub const SMMU_CR0_SMMUEN: u32 = 1 << 0; +pub const SMMU_CR0_SMMUEN_BIT_OFFSET: u32 = 0; +pub const SMMU_CR0_SMMUEN: u32 = 1 << SMMU_CR0_SMMUEN_BIT_OFFSET; +pub const SMMU_CR0_EVENTQEN: u32 = 1 << 2; +pub const SMMU_CR0_VMW: u32 = 0b111 << 6; pub const SMMU_CR1_TABLE_SH_BITS_OFFSET: u32 = 10; +pub const SMMU_CR1_QUEUE_SH: u32 = 0b11 << 4; +pub const SMMU_CR1_QUEUE_OC: u32 = 0b11 << 2; +pub const SMMU_CR1_QUEUE_IC: u32 = 0b11 << 0; + +pub const SMMU_CR2_E2H: u32 = 1; + +pub const SMMU_STRTAB_BASE_ADDRESS: u64 = bitmask!(51, 6); -pub const SMMU_GBPA_SHCFG_BITS_OFFSET: u32 = 12; +pub const SMMU_STRTAB_BASE_CFG_FMT_BITS_OFFSET: u32 = 16; +pub const SMMU_STRTAB_BASE_CFG_FMT: u32 = 0b11 << SMMU_STRTAB_BASE_CFG_FMT_BITS_OFFSET; +pub const SMMU_STRTAB_BASE_CFG_FMT_2LEVEL: u32 = 0b01 << SMMU_STRTAB_BASE_CFG_FMT_BITS_OFFSET; +pub const SMMU_STRTAB_BASE_CFG_SPLIT_BITS_OFFSET: u32 = 6; +pub const SMMU_STRTAB_BASE_CFG_SPLIT: u32 = 0b11111 << SMMU_STRTAB_BASE_CFG_SPLIT_BITS_OFFSET; +pub const SMMU_STRTAB_BASE_CFG_LOG2SIZE_BITS_OFFSET: u32 = 0; +pub const SMMU_STRTAB_BASE_CFG_LOG2SIZE: u32 = + 0b111111 << SMMU_STRTAB_BASE_CFG_LOG2SIZE_BITS_OFFSET; -type SteArrayBaseType = u64; +pub const SMMU_GBPA_UPDATE: u32 = 1 << 31; +pub const SMMU_GBPA_ABORT: u32 = 1 << 20; +pub const SMMU_GBPA_SHCFG_INCOMING: u32 = 0b01 << 12; + +pub const SMMU_VATOS_SID_SUBSTREAM_ID: u64 = bitmask!(51, 32); + +pub type SteArrayBaseType = u64; const STE_ARRAY_BASE_TYPE_BITS: SteArrayBaseType = (core::mem::size_of::() * 8) as SteArrayBaseType; -const STE_V: SteArrayBaseType = 1 << 0; +pub const STE_V_INDEX: usize = 0; +pub const STE_V: SteArrayBaseType = 1 << 0; const STE_CONFIG_OFFSET: SteArrayBaseType = 1; -const STE_CONFIG_INDEX: usize = (1 / STE_ARRAY_BASE_TYPE_BITS) as usize; +pub const STE_CONFIG_INDEX: usize = (1 / STE_ARRAY_BASE_TYPE_BITS) as usize; const STE_CONFIG: SteArrayBaseType = 0b111 << STE_CONFIG_OFFSET; +const STE_S2HWU_OFFSET: SteArrayBaseType = 72 % STE_ARRAY_BASE_TYPE_BITS; +const STE_S2HWU_INDEX: usize = (72 / STE_ARRAY_BASE_TYPE_BITS) as usize; +const STE_S2HWU: SteArrayBaseType = 0b1111 << STE_S2HWU_OFFSET; + +const STE_S2FWB_OFFSET: SteArrayBaseType = 89 % STE_ARRAY_BASE_TYPE_BITS; +const STE_S2FWB_INDEX: usize = (89 / STE_ARRAY_BASE_TYPE_BITS) as usize; +const STE_S2FWB: SteArrayBaseType = 0b1 << STE_S2FWB_OFFSET; + +const STE_S2VMID_OFFSET: SteArrayBaseType = 128 % STE_ARRAY_BASE_TYPE_BITS; +const STE_S2VMID_INDEX: usize = (128 / STE_ARRAY_BASE_TYPE_BITS) as usize; +const STE_S2VMID: SteArrayBaseType = bitmask!(143 - 128, 0) << STE_S2VMID_OFFSET; + const STE_S2T0SZ_OFFSET: SteArrayBaseType = 160 % STE_ARRAY_BASE_TYPE_BITS; const STE_S2T0SZ_INDEX: usize = (160 / STE_ARRAY_BASE_TYPE_BITS) as usize; const STE_S2T0SZ: SteArrayBaseType = 0b111111 << STE_S2T0SZ_OFFSET; @@ -85,6 +164,42 @@ const STE_S2AA64_OFFSET: SteArrayBaseType = 179 % STE_ARRAY_BASE_TYPE_BITS; const STE_S2AA64_INDEX: usize = (179 / STE_ARRAY_BASE_TYPE_BITS) as usize; const STE_S2AA64: SteArrayBaseType = 0b1 << STE_S2AA64_OFFSET; +const STE_S2ENDI_OFFSET: SteArrayBaseType = 180 % STE_ARRAY_BASE_TYPE_BITS; +const STE_S2ENDI_INDEX: usize = (180 / STE_ARRAY_BASE_TYPE_BITS) as usize; +const STE_S2ENDI: SteArrayBaseType = 0b1 << STE_S2ENDI_OFFSET; + +const STE_S2AFFD_OFFSET: SteArrayBaseType = 181 % STE_ARRAY_BASE_TYPE_BITS; +const STE_S2AFFD_INDEX: usize = (181 / STE_ARRAY_BASE_TYPE_BITS) as usize; +const STE_S2AFFD: SteArrayBaseType = 0b1 << STE_S2AFFD_OFFSET; + +const STE_S2PTW_OFFSET: SteArrayBaseType = 182 % STE_ARRAY_BASE_TYPE_BITS; +const STE_S2PTW_INDEX: usize = (182 / STE_ARRAY_BASE_TYPE_BITS) as usize; +const STE_S2PTW: SteArrayBaseType = 0b1 << STE_S2PTW_OFFSET; + +const STE_S2HD_OFFSET: SteArrayBaseType = 183 % STE_ARRAY_BASE_TYPE_BITS; +const STE_S2HD_INDEX: usize = (183 / STE_ARRAY_BASE_TYPE_BITS) as usize; +const STE_S2HD: SteArrayBaseType = 0b1 << STE_S2HD_OFFSET; + +const STE_S2HA_OFFSET: SteArrayBaseType = 184 % STE_ARRAY_BASE_TYPE_BITS; +const STE_S2HA_INDEX: usize = (184 / STE_ARRAY_BASE_TYPE_BITS) as usize; +const STE_S2HA: SteArrayBaseType = 0b1 << STE_S2HA_OFFSET; + +const STE_S2S_OFFSET: SteArrayBaseType = 185 % STE_ARRAY_BASE_TYPE_BITS; +const STE_S2S_INDEX: usize = (185 / STE_ARRAY_BASE_TYPE_BITS) as usize; +const STE_S2S: SteArrayBaseType = 0b1 << STE_S2S_OFFSET; + +const STE_S2R_OFFSET: SteArrayBaseType = 186 % STE_ARRAY_BASE_TYPE_BITS; +const STE_S2R_INDEX: usize = (186 / STE_ARRAY_BASE_TYPE_BITS) as usize; +const STE_S2R: SteArrayBaseType = 0b1 << STE_S2R_OFFSET; + +const STE_S2NSW_OFFSET: SteArrayBaseType = 192 % STE_ARRAY_BASE_TYPE_BITS; +const STE_S2NSW_INDEX: usize = (192 / STE_ARRAY_BASE_TYPE_BITS) as usize; +const STE_S2NSW: SteArrayBaseType = 0b1 << STE_S2NSW_OFFSET; + +const STE_S2NSA_OFFSET: SteArrayBaseType = 193 % STE_ARRAY_BASE_TYPE_BITS; +const STE_S2NSA_INDEX: usize = (193 / STE_ARRAY_BASE_TYPE_BITS) as usize; +const STE_S2NSA: SteArrayBaseType = 0b1 << STE_S2NSA_OFFSET; + const STE_S2TTB_OFFSET: SteArrayBaseType = 196 % STE_ARRAY_BASE_TYPE_BITS; const STE_S2TTB_INDEX: usize = (196 / STE_ARRAY_BASE_TYPE_BITS) as usize; const STE_S2TTB: SteArrayBaseType = (bitmask!(51, 4) >> 4) << STE_S2TTB_OFFSET; @@ -98,18 +213,56 @@ impl StreamTableEntry { Self([0; 8]) } + pub fn is_validated(&self) -> bool { + (self.0[STE_V_INDEX] & STE_V) != 0 + } + pub fn validate(&mut self) { - self.0[0] |= STE_V; + self.0[STE_V_INDEX] |= STE_V; + } + + pub fn get_config(&self) -> SteArrayBaseType { + (self.0[STE_CONFIG_INDEX] & STE_CONFIG) >> STE_CONFIG_OFFSET } - pub fn set_config(&mut self, is_stage1_bypassed: bool, is_stage2_bypassed: bool) { + pub fn is_stage1_bypassed(&self) -> bool { + (((self.0[STE_CONFIG_INDEX] & STE_CONFIG) >> STE_CONFIG_OFFSET) & 1) == 0 + } + + pub fn is_traffic_can_pass(&self) -> bool { + ((self.0[STE_CONFIG_INDEX] >> STE_CONFIG_OFFSET) & 0b100) != 0 + } + + pub fn set_config( + &mut self, + is_traffic_can_pass: bool, + is_stage1_bypassed: bool, + is_stage2_bypassed: bool, + ) { self.0[STE_CONFIG_INDEX] = (self.0[STE_CONFIG_INDEX] & (!STE_CONFIG)) - | ((0b100 - | (!is_stage1_bypassed as SteArrayBaseType) - | ((!is_stage2_bypassed as SteArrayBaseType) << 1)) + | ((((is_traffic_can_pass as SteArrayBaseType) << 2) + | ((!is_stage2_bypassed as SteArrayBaseType) << 1) + | (!is_stage1_bypassed as SteArrayBaseType)) << STE_CONFIG_OFFSET); } + pub fn set_s2hwu(&mut self, hwu: u8) { + assert!(hwu <= 0b1111); + self.0[STE_S2HWU_INDEX] = (self.0[STE_S2HWU_INDEX] & (!STE_S2HWU)) + | ((hwu as SteArrayBaseType) << STE_S2HWU_OFFSET); + } + + pub fn set_s2fwb(&mut self, fwb: u8) { + assert!(fwb < 2); + self.0[STE_S2FWB_INDEX] = (self.0[STE_S2FWB_INDEX] & (!STE_S2FWB)) + | ((fwb as SteArrayBaseType) << STE_S2FWB_OFFSET); + } + + pub fn set_s2vmid(&mut self, vmid: u16) { + self.0[STE_S2VMID_INDEX] = (self.0[STE_S2VMID_INDEX] & (!STE_S2VMID)) + | ((vmid as SteArrayBaseType) << STE_S2VMID_OFFSET); + } + pub fn set_s2t0sz(&mut self, s2t0sz: u32) { self.0[STE_S2T0SZ_INDEX] = (self.0[STE_S2T0SZ_INDEX] & (!STE_S2T0SZ)) | ((s2t0sz as SteArrayBaseType) << STE_S2T0SZ_OFFSET); @@ -164,10 +317,209 @@ impl StreamTableEntry { | ((is_aa64 as SteArrayBaseType) << STE_S2AA64_OFFSET); } + pub fn set_s2endi(&mut self, is_big_endian: bool) { + self.0[STE_S2ENDI_INDEX] = (self.0[STE_S2ENDI_INDEX] & (!STE_S2ENDI)) + | ((is_big_endian as SteArrayBaseType) << STE_S2ENDI_OFFSET); + } + + pub fn set_s2affd(&mut self, access_flag_fault_never_occuers: bool) { + self.0[STE_S2AFFD_INDEX] = (self.0[STE_S2AFFD_INDEX] & (!STE_S2AFFD)) + | ((access_flag_fault_never_occuers as SteArrayBaseType) << STE_S2AFFD_OFFSET); + } + + pub fn set_s2ptw(&mut self, ptw: u8) { + assert!(ptw < 2); + self.0[STE_S2PTW_INDEX] = (self.0[STE_S2PTW_INDEX] & (!STE_S2PTW)) + | ((ptw as SteArrayBaseType) << STE_S2PTW_OFFSET); + } + + pub fn set_s2hd(&mut self, hd: u8) { + assert!(hd < 2); + self.0[STE_S2HD_INDEX] = + (self.0[STE_S2HD_INDEX] & (!STE_S2HD)) | ((hd as SteArrayBaseType) << STE_S2HD_OFFSET); + } + + pub fn set_s2ha(&mut self, ha: u8) { + assert!(ha <= 0b11); + self.0[STE_S2HA_INDEX] = + (self.0[STE_S2HA_INDEX] & (!STE_S2HA)) | ((ha as SteArrayBaseType) << STE_S2HA_OFFSET); + } + + pub fn set_s2s(&mut self, should_stall: bool) { + self.0[STE_S2S_INDEX] = (self.0[STE_S2S_INDEX] & (!STE_S2S)) + | ((should_stall as SteArrayBaseType) << STE_S2S_OFFSET); + } + + pub fn set_s2r(&mut self, should_record: bool) { + self.0[STE_S2R_INDEX] = (self.0[STE_S2R_INDEX] & (!STE_S2R)) + | ((should_record as SteArrayBaseType) << STE_S2R_OFFSET); + } + + pub fn set_s2nsa(&mut self, nsa: u8) { + assert!(nsa < 2); + self.0[STE_S2NSA_INDEX] = (self.0[STE_S2NSA_INDEX] & (!STE_S2NSA)) + | ((nsa as SteArrayBaseType) << STE_S2NSA_OFFSET); + } + pub fn set_stage2_translation_table(&mut self, table_address: usize) { assert_eq!(table_address & !(bitmask!(51, 4)), 0); self.0[STE_S2TTB_INDEX] = (self.0[STE_S2TTB_INDEX] & (!STE_S2TTB)) | (table_address as SteArrayBaseType); self.set_s2aa64(true); } + + /// This function is not validate STE + pub fn set_stage2_settings( + &mut self, + vtcr_el2: u64, + vttbr_el2: u64, + is_traffic_can_pass: bool, + is_stage1_bypassed: bool, + ) { + use crate::cpu::{ + VTCR_EL2_PS, VTCR_EL2_PS_BITS_OFFSET, VTCR_EL2_SL0, VTCR_EL2_SL0_BITS_OFFSET, + VTCR_EL2_T0SZ, VTCR_EL2_T0SZ_BITS_OFFSET, + }; + use crate::STAGE_2_PAGE_SIZE; + + self.set_s2hwu(0b0000); + self.set_s2fwb(0); + self.set_s2vmid(0); + self.set_s2t0sz(((vtcr_el2 & VTCR_EL2_T0SZ) >> VTCR_EL2_T0SZ_BITS_OFFSET) as u32); + self.set_s2sl0(((vtcr_el2 & VTCR_EL2_SL0) >> VTCR_EL2_SL0_BITS_OFFSET) as u32); + self.set_s2ir0(false, true); + self.set_s2or0(false, true); + self.set_s2tg(STAGE_2_PAGE_SIZE); + self.set_s2sh0(Shareability::NonShareable); + self.set_s2ps(((vtcr_el2 & VTCR_EL2_PS) >> VTCR_EL2_PS_BITS_OFFSET) as u8); + self.set_s2aa64(true); + self.set_s2endi(false); + self.set_s2affd(true); + self.set_s2ptw(0); + self.set_s2hd(0); + self.set_s2ha(0); + self.set_s2s(false); // TODO: + self.set_s2r(false); // TODO: + self.set_s2nsa(0); + self.set_stage2_translation_table(vttbr_el2 as usize); + self.set_config(is_traffic_can_pass, is_stage1_bypassed, false); + } +} + +/// This function will return false when the data is STE::config +pub fn is_offset_configuration_about_stage2(offset: usize, data: SteArrayBaseType) -> bool { + assert_eq!(core::mem::size_of::(), 8); + match offset { + 1 => { + let mask: SteArrayBaseType = (0b1111 << (72 - 64)) | (1 << (89 - 64)); + if (data & mask) != 0 { + true + } else { + false + } + } + 2 | 3 => true, + _ => false, + } +} + +pub const fn get_level1_table_size(log2_size: u32, split: u32) -> usize { + 8usize * (log2_size - split) as usize +} + +pub const fn get_level2_table_size(span: u64, _split: u32) -> usize { + (1usize << (span - 1)) * core::mem::size_of::() +} + +pub fn create_bitmask_of_stage2_configurations(ste_offset: usize) -> u64 { + use core::mem::size_of; + let start_offset = ste_offset / size_of::(); + let end_offset = (ste_offset + size_of::()) / size_of::(); + let mut mask: u64 = 0; + + for i in start_offset..end_offset { + let m = _create_bitmask_of_stage2_configurations(i) as u64; + let i_byte_offset = i * size_of::(); + if i_byte_offset >= ste_offset { + mask |= m << (i_byte_offset - ste_offset); + } else { + mask |= m >> (ste_offset - i_byte_offset); + } + } + mask +} + +/// I belive the compiler will make this function more clever... +const fn _create_bitmask_of_stage2_configurations( + ste_offset_by_array_base_type: usize, +) -> SteArrayBaseType { + let mut mask: SteArrayBaseType = 0; + if ste_offset_by_array_base_type == STE_CONFIG_INDEX { + mask |= 0b010 << STE_CONFIG_OFFSET; + } + if ste_offset_by_array_base_type == STE_S2HWU_INDEX { + mask |= STE_S2HWU; + } + if ste_offset_by_array_base_type == STE_S2FWB_INDEX { + mask |= STE_S2FWB; + } + if ste_offset_by_array_base_type == STE_S2VMID_INDEX { + mask |= STE_S2VMID; + } + if ste_offset_by_array_base_type == STE_S2T0SZ_INDEX { + mask |= STE_S2T0SZ; + }; + if ste_offset_by_array_base_type == STE_S2SL0_INDEX { + mask |= STE_S2SL0; + } + if ste_offset_by_array_base_type == STE_S2IR0_INDEX { + mask |= STE_S2IR0; + } + if ste_offset_by_array_base_type == STE_S2OR0_INDEX { + mask |= STE_S2OR0; + } + if ste_offset_by_array_base_type == STE_S2SH0_INDEX { + mask |= STE_S2SH0; + } + if ste_offset_by_array_base_type == STE_S2TG_INDEX { + mask |= STE_S2TG; + } + if ste_offset_by_array_base_type == STE_S2PS_INDEX { + mask |= STE_S2PS; + }; + if ste_offset_by_array_base_type == STE_S2AA64_INDEX { + mask |= STE_S2AA64; + } + if ste_offset_by_array_base_type == STE_S2ENDI_INDEX { + mask |= STE_S2ENDI; + } + if ste_offset_by_array_base_type == STE_S2AFFD_INDEX { + mask |= STE_S2AFFD; + } + if ste_offset_by_array_base_type == STE_S2PTW_INDEX { + mask |= STE_S2PTW; + } + if ste_offset_by_array_base_type == STE_S2HD_INDEX { + mask |= STE_S2HD; + } + if ste_offset_by_array_base_type == STE_S2HA_INDEX { + mask |= STE_S2HA; + }; + if ste_offset_by_array_base_type == STE_S2S_INDEX { + mask |= STE_S2S; + } + if ste_offset_by_array_base_type == STE_S2R_INDEX { + mask |= STE_S2R; + } + if ste_offset_by_array_base_type == STE_S2NSW_INDEX { + mask |= STE_S2NSW; + } + if ste_offset_by_array_base_type == STE_S2NSA_INDEX { + mask |= STE_S2NSA; + } + if ste_offset_by_array_base_type == STE_S2TTB_INDEX { + mask |= STE_S2TTB; + } + + mask } diff --git a/src/common/src/spin_flag.rs b/src/common/src/spin_flag.rs new file mode 100644 index 0000000..b19aac8 --- /dev/null +++ b/src/common/src/spin_flag.rs @@ -0,0 +1,42 @@ +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) +// All rights reserved. +// +// This software is released under the MIT License. +// http://opensource.org/licenses/mit-license.php + +use core::sync::atomic::{AtomicBool, Ordering}; + +pub struct SpinLockFlag(AtomicBool); + +impl SpinLockFlag { + pub const fn new() -> Self { + Self(AtomicBool::new(false)) + } + + #[inline(always)] + pub fn try_lock_weak(&self) -> Result<(), ()> { + self.0 + .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) + .and_then(|old| if old == false { Ok(()) } else { Err(false) }) + .or(Err(())) + } + + #[inline(always)] + pub fn lock(&self) { + while self.try_lock_weak().is_err() { + while self.is_locked() { + core::hint::spin_loop(); + } + } + } + + #[inline(always)] + pub fn unlock(&self) { + self.0.store(false, Ordering::Release) + } + + #[inline(always)] + pub fn is_locked(&self) -> bool { + self.0.load(Ordering::Relaxed) + } +} diff --git a/src/common/src/stack_memory_allocator.rs b/src/common/src/stack_memory_allocator.rs new file mode 100644 index 0000000..b609b70 --- /dev/null +++ b/src/common/src/stack_memory_allocator.rs @@ -0,0 +1,98 @@ +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) +// All rights reserved. +// +// This software is released under the MIT License. +// http://opensource.org/licenses/mit-license.php + +//! +//! Stack Style Memory Allocator +//! + +use crate::{paging::page_align_up, MemoryAllocationError, PAGE_SHIFT}; + +/// Stack Style Memory Allocator +/// +/// The memory allocator which can allocate memory with alignment. +/// +/// If you modified the member, please adjust [`Self::init`] +pub struct MemoryAllocator { + base_address: usize, + available_pages: usize, +} + +impl MemoryAllocator { + /// Setup myself with with allocated address + /// + /// All members of Self are uninitialized. + /// Please be careful when you assign some value into the member which has a drop trait. + /// (Use `core::mem::forget(core::mem::replace(&mut self.member, new_value))`) + /// This function is used to reduce the stack usage. + /// (`aarch64-unknown-uefi` does not have __chkstk) + /// + /// # Arguments + /// * `self` - mutable reference of Self, it may be uninitialized + /// * `allocated_address` - the base address allocated + /// * `allocated_size` - the allocated size + pub fn init(&mut self, allocated_address: usize, allocated_size: usize) { + /* Initialize members */ + self.available_pages = allocated_size >> PAGE_SHIFT; + self.base_address = allocated_address; + } + + /* + /// Setup MemoryAllocator with allocated address, and return Self + pub fn create(mut allocated_address: usize, allocated_size: usize) -> Self { + use core::mem::MaybeUninit; + + let mut available_pages: usize = 0; + let mut pool: [MaybeUninit; ALLOC_SIZE >> PAGE_SHIFT] = MaybeUninit::uninit_array(); + let mut allocated_pages = allocated_size >> PAGE_SHIFT; + assert!(allocated_pages <= (ALLOC_SIZE >> PAGE_SHIFT)); + + while allocated_pages > 0 { + pool[available_pages].write(allocated_address); + available_pages += 1; + allocated_address += PAGE_SIZE; + allocated_pages -= 1; + } + + Self { + pool: unsafe { MaybeUninit::array_assume_init(pool) }, + available_pages, + } + } + */ + + fn _allocate_memory(&mut self, pages: usize) -> Result { + if self.available_pages < pages { + return Err(MemoryAllocationError::AddressNotAvailable); + } + self.available_pages -= pages; + return Ok(self.base_address + (self.available_pages << PAGE_SHIFT)); + } + + pub fn allocate(&mut self, size: usize, align: usize) -> Result { + if size == 0 { + return Err(MemoryAllocationError::InvalidSize); + } + let pages = page_align_up(size) >> PAGE_SHIFT; + if align <= PAGE_SHIFT { + return self._allocate_memory(pages); + } + let mut base = self._allocate_memory(pages)?; + while (base & ((1 << align) - 1)) != 0 { + base = self._allocate_memory(1)?; + } + return Ok(base); + } + + pub fn free(&mut self, _address: usize, _size: usize) -> Result<(), MemoryAllocationError> { + Ok(()) + } + + pub fn get_all_memory(&mut self) -> (usize /*base_address*/, usize /* number of pages*/) { + let number_of_pages = self.available_pages; + self.available_pages = 0; + (self.base_address, number_of_pages) + } +} diff --git a/src/hypervisor_bootloader/Cargo.toml b/src/hypervisor_bootloader/Cargo.toml index e5b8f1b..35ab5f7 100644 --- a/src/hypervisor_bootloader/Cargo.toml +++ b/src/hypervisor_bootloader/Cargo.toml @@ -6,11 +6,11 @@ # http://opensource.org/licenses/mit-license.php [package] name = "hypervisor_bootloader" -version = "0.4.0" +version = "1.0.0" edition = "2021" [features] -default = ["smmu", "i210", "mt27800", "fast_restore", "acpi_table_protection", "contiguous_bit"] +default = ["smmu", "i210", "mt27800", "fast_restore", "acpi_table_protection", "contiguous_bit", "advanced_memory_manager"] smmu = [] i210 = [] mt27800 = [] @@ -18,6 +18,7 @@ fast_restore = [] acpi_table_protection = [] contiguous_bit = [] a64fx = [] +advanced_memory_manager = [] # Bootloader uses stack style allocator [dependencies] common = { path = "../common" } diff --git a/src/hypervisor_bootloader/src/console.rs b/src/hypervisor_bootloader/src/console.rs index 964b61c..6dca825 100644 --- a/src/hypervisor_bootloader/src/console.rs +++ b/src/hypervisor_bootloader/src/console.rs @@ -6,21 +6,19 @@ // http://opensource.org/licenses/mit-license.php //! -//! Console Input/Output Manager -//! -//! 主にUEFIコンソールの利用を想定 +//! Console with UEFI Output Protocol //! -use uefi::output::EfiOutputProtocol; -use uefi::EfiStatus; +//use common::spin_flag::SpinLockFlag; + +use uefi::{output::EfiOutputProtocol, EfiStatus}; use core::fmt; use core::mem::MaybeUninit; -use core::sync::atomic::{AtomicBool, Ordering}; pub struct Console { uefi_output_console: MaybeUninit<&'static EfiOutputProtocol>, - write_lock: AtomicBool, + //write_lock: SpinLockFlag, // Currently, Bootloader runs only BSP. Therefore the lock is not necessary. } pub static mut DEFAULT_CONSOLE: Console = Console::new(); @@ -29,7 +27,7 @@ impl Console { pub const fn new() -> Self { Self { uefi_output_console: MaybeUninit::uninit(), - write_lock: AtomicBool::new(false), + //write_lock: SpinLockFlag::new(), } } @@ -37,41 +35,21 @@ impl Console { self.uefi_output_console = MaybeUninit::new(unsafe { &*efi_output_protocol }); } - fn acquire_write_lock(&self) { - loop { - if self - .write_lock - .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { - return; - } - while self.write_lock.load(Ordering::Relaxed) { - core::hint::spin_loop(); - } - } - } - - fn release_write_lock(&self) { - self.write_lock.store(false, Ordering::Release) - } - /// For panic_handler pub unsafe fn force_release_write_lock(&self) { - self.release_write_lock(); + //self.write_lock.unlock(); } } impl fmt::Write for Console { - /// write_strはwrite_fmt内部で呼び出されます。 fn write_str(&mut self, string: &str) -> fmt::Result { - self.acquire_write_lock(); + //self.write_lock.lock(); let result = unsafe { self.uefi_output_console.assume_init().output(string) }; - self.release_write_lock(); + //self.write_lock.unlock(); if result == EfiStatus::EfiSuccess { - fmt::Result::Ok(()) + Ok(()) } else { - fmt::Result::Err(fmt::Error) + Err(fmt::Error) } } } @@ -91,8 +69,8 @@ macro_rules! print { #[macro_export] macro_rules! println { - ($fmt:expr) => ($crate::console::print(format_args_nl!($fmt))); - ($fmt:expr, $($arg:tt)*) => ($crate::console::print(format_args_nl!($fmt, $($arg)*))) + ($fmt:expr) => ($crate::console::print(format_args!("{}\n", format_args!($fmt)))); + ($fmt:expr, $($arg:tt)*) => ($crate::console::print(format_args!("{}\n", format_args!($fmt, $($arg)*)))); } #[cfg(debug_assertions)] diff --git a/src/hypervisor_bootloader/src/main.rs b/src/hypervisor_bootloader/src/main.rs index 918de49..b81da6a 100644 --- a/src/hypervisor_bootloader/src/main.rs +++ b/src/hypervisor_bootloader/src/main.rs @@ -7,12 +7,10 @@ #![no_std] #![no_main] -#![feature(asm_sym)] -#![feature(const_maybe_uninit_uninit_array)] -#![feature(format_args_nl)] -#![feature(maybe_uninit_uninit_array)] #![feature(naked_functions)] #![feature(panic_info_message)] +#![feature(let_else)] +#![feature(int_log)] #[macro_use] mod console; @@ -25,15 +23,11 @@ mod serial_port; mod smmu; use common::cpu::*; -use common::{ - HypervisorKernelMainType, MemorySaveListEntry, SystemInformation, ALLOC_SIZE, HYPERVISOR_PATH, - HYPERVISOR_SERIAL_BASE_ADDRESS, HYPERVISOR_VIRTUAL_BASE_ADDRESS, MAX_PHYSICAL_ADDRESS, - MEMORY_SAVE_ADDRESS_ONDEMAND_FLAG, PAGE_MASK, PAGE_SHIFT, PAGE_SIZE, STACK_PAGES, -}; +use common::*; use uefi::{ - boot_service, boot_service::EfiBootServices, file, EfiConfigurationTable, EfiHandle, EfiStatus, - EfiSystemTable, EFI_ACPI_20_TABLE_GUID, EFI_DTB_TABLE_GUID, + boot_service, file, EfiConfigurationTable, EfiHandle, EfiStatus, EfiSystemTable, + EFI_ACPI_20_TABLE_GUID, EFI_DTB_TABLE_GUID, }; use core::arch::asm; @@ -44,47 +38,53 @@ static mut ORIGINAL_VECTOR_BASE: u64 = 0; static mut ORIGINAL_TCR_EL2: u64 = 0; static mut INTERRUPT_FLAG: MaybeUninit = MaybeUninit::uninit(); -static mut MEMORY_POOL: ([MaybeUninit; ALLOC_SIZE / PAGE_SIZE], usize) = - (MaybeUninit::uninit_array(), 0); - static mut IMAGE_HANDLE: EfiHandle = 0; static mut SYSTEM_TABLE: *const EfiSystemTable = core::ptr::null(); static mut ACPI_20_TABLE_ADDRESS: Option = None; static mut DTB_ADDRESS: Option = None; +static mut MEMORY_ALLOCATOR: MaybeUninit = MaybeUninit::uninit(); #[no_mangle] extern "C" fn efi_main(image_handle: EfiHandle, system_table: *mut EfiSystemTable) { unsafe { - /* Initialize console to use UEFI Output Protocol */ - console::DEFAULT_CONSOLE.init((*system_table).console_output_protocol); IMAGE_HANDLE = image_handle; SYSTEM_TABLE = system_table; + console::DEFAULT_CONSOLE.init((*system_table).console_output_protocol); + } + + if let Some(hash_info) = HYPERVISOR_HASH_INFO { + println!( + "{} Bootloader Version {}({hash_info})", + HYPERVISOR_NAME, + env!("CARGO_PKG_VERSION") + ); + } else { + println!( + "{} Bootloader Version {}", + HYPERVISOR_NAME, + env!("CARGO_PKG_VERSION") + ); } + if let Some(compiler_info) = COMPILER_INFO { + println!("Compiler Information: {compiler_info}"); + } + + assert_eq!(get_current_el() >> 2, 2, "Expected CurrentEL is EL2"); - println!("Hello,world!"); + let allocated_memory_address = init_memory_pool(); - let efi_boot_services = unsafe { (*system_table).efi_boot_services }; - init_memory_pool(efi_boot_services); #[cfg(debug_assertions)] - dump_memory_map(efi_boot_services); - - let current_el: usize; - unsafe { asm!("mrs {:x}, CurrentEL", out(reg) current_el) }; - let current_el = current_el >> 2; - println!("CurrentEL: {}", current_el); - if current_el != 2 { - panic!("Expected current_el == 2"); - } + dump_memory_map(); - let entry_point = load_hypervisor(image_handle, efi_boot_services); + let entry_point = load_hypervisor(); #[cfg(debug_assertions)] paging::dump_page_table(); paging::setup_stage_2_translation().expect("Failed to setup Stage2 Paging"); - map_memory_pool(); + map_memory_pool(allocated_memory_address); - detect_acpi_and_dtb(system_table); + detect_acpi_and_dtb(); let mut serial = serial_port::detect_serial_port(); if let Some(s) = &mut serial { @@ -117,22 +117,27 @@ extern "C" fn efi_main(image_handle: EfiHandle, system_table: *mut EfiSystemTabl #[cfg(not(feature = "smmu"))] let smmu_v3_base_address = None; - let stack_address = allocate_memory(STACK_PAGES).expect("Failed to alloc stack"); - let memory_save_list = create_memory_save_list(efi_boot_services); + /* Stack for BSP */ + let stack_address = allocate_memory(STACK_PAGES, None).expect("Failed to alloc stack") + + (STACK_PAGES << PAGE_SHIFT); + let memory_save_list = create_memory_save_list(); println!("Call the hypervisor(Entry Point: {:#X})", entry_point); let mut system_info = SystemInformation { acpi_rsdp_address: unsafe { ACPI_20_TABLE_ADDRESS }, vbar_el2: 0, - memory_pool: unsafe { &MEMORY_POOL }, + available_memory_info: unsafe { MEMORY_ALLOCATOR.assume_init_mut().get_all_memory() }, memory_save_list, serial_port: serial, ecam_info, smmu_v3_base_address, - exit_boot_service_address: unsafe { (*efi_boot_services).exit_boot_services } as usize, + exit_boot_service_address: unsafe { + (*(*SYSTEM_TABLE).efi_boot_services).exit_boot_services + } as usize, }; unsafe { (transmute::(entry_point))(&mut system_info) }; - unsafe { MEMORY_POOL.1 = 0 }; /* Do not call allocate_memory after calling hypervisor */ + + /* Do not call allocate_memory/free_memory from here */ println!("Setup EL1"); @@ -141,24 +146,22 @@ extern "C" fn efi_main(image_handle: EfiHandle, system_table: *mut EfiSystemTabl unsafe { INTERRUPT_FLAG.write(local_irq_fiq_save()) }; /* Setup registers */ - unsafe { - asm!("mrs {:x}, vbar_el2", out(reg) ORIGINAL_VECTOR_BASE); - asm!("msr vbar_el2, {:x}", in(reg) system_info.vbar_el2); - } + unsafe { ORIGINAL_VECTOR_BASE = get_vbar_el2() }; + set_vbar_el2(system_info.vbar_el2); + set_up_el1(); /* Jump to EL1(el1_main) */ - el2_to_el1(stack_address + (STACK_PAGES << PAGE_SHIFT)); + el2_to_el1(stack_address, el1_main as *const fn() as usize); /* Never come here */ local_irq_fiq_restore(unsafe { INTERRUPT_FLAG.assume_init_ref().clone() }); panic!("Failed to jump EL1"); } -/// SystemTableを解析し、ACPI 2.0とDTBのアドレスを記録 -/// -/// SystemTableを解析し、[`EFI_ACPI_20_TABLE_GUID`]と[`EFI_DTB_TABLE_GUID`]に記録 -fn detect_acpi_and_dtb(system_table: *const EfiSystemTable) { +/// Analyze EfiSystemTable and store [`ACPI_20_TABLE_ADDRESS`] and [`DTB_ADDRESS`] +fn detect_acpi_and_dtb() { + let system_table = unsafe { SYSTEM_TABLE }; let num_of_entries = unsafe { (*system_table).num_table_entries }; for i in 0..num_of_entries { let table = unsafe { @@ -176,13 +179,21 @@ fn detect_acpi_and_dtb(system_table: *const EfiSystemTable) { } } } -/// UEFIからメモリを確保して[`MEMORY_POOL`]に格納 + +/// Allocate memory and setup [`MEMORY_ALLOCATOR`] +/// +/// This function allocates [`ALLOC_SIZE`] and then, set it into [`MEMORY_ALLOCATOR`] +/// The attribute of allocated memory area will be changed to EfiUnusableMemory /// -/// ALLOC_SIZE分をUEFIから確保する。確保したメモリ領域の属性はEfiUnusableMemoryに変更する。 -fn init_memory_pool(b_s: *const EfiBootServices) { +/// # Panics +/// If the allocation is failed, this function will panic. +/// +/// # Result +/// Returns the start_address allocated +fn init_memory_pool() -> usize { let allocate_pages = ALLOC_SIZE >> PAGE_SHIFT; - let mut allocated_address = boot_service::memory_service::alloc_highest_memory( - b_s, + let allocated_address = boot_service::alloc_highest_memory( + unsafe { (*SYSTEM_TABLE).efi_boot_services }, allocate_pages, MAX_PHYSICAL_ADDRESS, ) @@ -192,24 +203,31 @@ fn init_memory_pool(b_s: *const EfiBootServices) { allocated_address, allocated_address + ALLOC_SIZE ); - for e in unsafe { &mut MEMORY_POOL.0 } { - e.write(allocated_address); - allocated_address += PAGE_SIZE; - } - unsafe { MEMORY_POOL.1 = MEMORY_POOL.0.len() }; + //unsafe { MEMORY_ALLOCATOR.write(MemoryAllocator::create(allocated_address, ALLOC_SIZE)) }; + unsafe { + MEMORY_ALLOCATOR + .assume_init_mut() + .init(allocated_address, ALLOC_SIZE) + }; + return allocated_address; } -/// [`MEMORY_POOL`]をTTBR0_EL2にマップし、更にEL1からアクセスできないようにする +/// Map allocated memory area into TTBR0_EL2 and set up not to be accessible from EL1/EL0 +/// +/// This function map memory allocated by [`init_memory_pool`] into new TTBR0_EL2. +/// Also, this function will setup the dummy page. +/// This sets VTTBR_EL2 up to convert access to the allocated memory area from EL1/EL0 to single dummy page. +/// Therefore, EL1/EL0 will not read/write allocated memory area. /// -/// [`init_memory_pool`]で確保したメモリ領域をTTBR0_EL2にストレートマップし、ハイパーバイザーから -/// アクセスできるようにする。 +/// # Arguments +/// * `allocated_memory_address` - base address of allocated memory by [`init_memory_pool`] /// -/// また該当領域をVTTBR_EL2でダミーのページへのアクセスするように設定する。 -fn map_memory_pool() { - let allocated_memory = unsafe { MEMORY_POOL.0[0].assume_init() }; +/// # Panics +/// If the mapping into new TTBR0_EL2 or VTTBR_EL2 is failed, this function will panic. +fn map_memory_pool(allocated_memory_address: usize) { paging::map_address( - allocated_memory, - allocated_memory, + allocated_memory_address, + allocated_memory_address, ALLOC_SIZE, true, true, @@ -217,41 +235,61 @@ fn map_memory_pool() { false, ) .expect("Failed to map allocated memory"); - /*paging::unmap_address_from_vttbr_el2(b_s, allocated_memory, ALLOC_SIZE) + /*paging::unmap_address_from_vttbr_el2(b_s, allocated_memory_address, ALLOC_SIZE) .expect("Failed to unmap allocated address.");*/ - let dummy_page = allocate_memory(1).expect("Failed to alloc dummy page"); - paging::map_dummy_page_into_vttbr_el2(allocated_memory, ALLOC_SIZE, dummy_page) + let dummy_page = allocate_memory(1, None).expect("Failed to alloc dummy page"); + paging::map_dummy_page_into_vttbr_el2(allocated_memory_address, ALLOC_SIZE, dummy_page) .expect("Failed to map dummy page"); } -/// メモリをメモリプールから確保 +/// Allocate memory from memory pool /// -/// メモリを[`pages`]だけメモリプールから確保する。 -/// 失敗した場合はErr(())を返却する。 +/// # Arguments +/// * `pages` - The number of pages to allocate, the allocation size is `pages` << [`PAGE_SHIFT`] +/// * `align` - The alignment of the returned address, if `None`, [`PAGE_SHIFT`] will be used +/// +/// # Result +/// If the allocation is succeeded, Ok(start_address), otherwise Err(()) +pub fn allocate_memory(pages: usize, align: Option) -> Result { + unsafe { + MEMORY_ALLOCATOR + .assume_init_mut() + .allocate(pages << PAGE_SHIFT, align.unwrap_or(PAGE_SHIFT)) + } +} + +/// Free memory to memory pool /// /// # Arguments -/// * pages: 確保するメモリページ数 +/// * `address` - The start address to return to memory pool, it must be allocated by [`allocate_memory`] +/// * `pages` - The number of allocated pages /// -/// # Return Value -/// 確保に成功した場合はOk(address)、失敗した場合はErr(()) -pub fn allocate_memory(pages: usize) -> Result { - if unsafe { MEMORY_POOL.1 < pages } { - return Err(()); +/// # Result +/// If succeeded, Ok(()), otherwise Err(()) +pub fn free_memory(address: usize, pages: usize) -> Result<(), MemoryAllocationError> { + unsafe { + MEMORY_ALLOCATOR + .assume_init_mut() + .free(address, pages << PAGE_SHIFT) } - unsafe { MEMORY_POOL.1 -= pages }; - return Ok(unsafe { MEMORY_POOL.0[MEMORY_POOL.1].assume_init() }); } -/// ハイパーバイザー本体を[`common::HYPERVISOR_VIRTUAL_BASE_ADDRESS`]にに配置 +/// Load hypervisor_kernel to [`common::HYPERVISOR_VIRTUAL_BASE_ADDRESS`] /// -/// EFIを使用し、[`common::HYPERVISOR_PATH`]よりハイパーバイザー本体を読み込みELFヘッダに従って配置する。 -/// メモリに読み込む前に[`ORIGINAL_PAGE_TABLE`]に元のTTBR0_EL2を保存し、ページテーブルをコピーしたものに -/// 切り替える。読み込みに失敗した場合この関数はpanicする。 +/// This function loads hypervisor_kernel according to ELF header. +/// The hypervisor_kernel will be loaded from [`common::HYPERVISOR_PATH`] /// -/// # Return Value -/// ハイパーバイザー本体の初期化用エントリポイント -fn load_hypervisor(image_handle: EfiHandle, b_s: *const boot_service::EfiBootServices) -> usize /* Entry Point */ -{ +/// Before loads the hypervisor, this will save original TTBR0_EL2 into [`ORIGINAL_PAGE_TABLE`] and +/// create new TTBR0_EL2 by copying original page table tree. +/// +/// # Panics +/// If the loading is failed(including memory allocation, calling UEFI functions), this function panics +/// +/// # Result +/// Returns the entry point of hypervisor_kernel +fn load_hypervisor() -> usize { + let image_handle = unsafe { IMAGE_HANDLE }; + let b_s = unsafe { (*SYSTEM_TABLE).efi_boot_services }; let root_protocol = file::open_root_dir(image_handle, b_s).expect("Failed to open the volume"); let mut file_name_utf16: [u16; HYPERVISOR_PATH.len() + 1] = [0; HYPERVISOR_PATH.len() + 1]; @@ -283,9 +321,8 @@ fn load_hypervisor(image_handle: EfiHandle, b_s: *const boot_service::EfiBootSer } let program_header_entries_size = elf_header.get_program_header_entry_size() * elf_header.get_num_of_program_header_entries(); - let program_header_pool = - boot_service::memory_service::alloc_pool(b_s, program_header_entries_size) - .expect("Failed to allocate the pool for the program header"); + let program_header_pool = boot_service::alloc_pool(b_s, program_header_entries_size) + .expect("Failed to allocate the pool for the program header"); file::seek(hypervisor_protocol, elf_header.get_program_header_offset()) .expect("Failed to seek for the program header"); let read_size = file::read( @@ -302,7 +339,7 @@ fn load_hypervisor(image_handle: EfiHandle, b_s: *const boot_service::EfiBootSer } /* Switch PageTable */ - let cloned_page_table = paging::copy_page_table(); + let cloned_page_table = paging::clone_page_table(); unsafe { ORIGINAL_PAGE_TABLE = get_ttbr0_el2() as usize; ORIGINAL_TCR_EL2 = get_tcr_el2(); @@ -322,7 +359,7 @@ fn load_hypervisor(image_handle: EfiHandle, b_s: *const boot_service::EfiBootSer } let pages = (((info.memory_size - 1) & PAGE_MASK) >> PAGE_SHIFT) + 1; let physical_base_address = - allocate_memory(pages).expect("Failed to allocate memory for hypervisor"); + allocate_memory(pages, None).expect("Failed to allocate memory for hypervisor"); if info.file_size > 0 { file::seek(hypervisor_protocol, info.file_offset) @@ -343,10 +380,10 @@ fn load_hypervisor(image_handle: EfiHandle, b_s: *const boot_service::EfiBootSer if info.memory_size - info.file_size > 0 { unsafe { - ((*b_s).set_mem)( - physical_base_address + info.file_size, - info.memory_size - info.file_size, + core::ptr::write_bytes( + (physical_base_address + info.file_size) as *mut u8, 0, + info.memory_size - info.file_size, ) }; } @@ -373,7 +410,7 @@ fn load_hypervisor(image_handle: EfiHandle, b_s: *const boot_service::EfiBootSer .expect("Failed to map hypervisor"); } } - if let Err(e) = boot_service::memory_service::free_pool(b_s, program_header_pool) { + if let Err(e) = boot_service::free_pool(b_s, program_header_pool) { println!("Failed to free the pool: {:?}", e); } if let Err(e) = file::close_file(hypervisor_protocol) { @@ -386,14 +423,15 @@ fn load_hypervisor(image_handle: EfiHandle, b_s: *const boot_service::EfiBootSer return elf_header.get_entry_point(); } -fn create_memory_save_list(b_s: *const EfiBootServices) -> &'static mut [MemorySaveListEntry] { +fn create_memory_save_list() -> &'static mut [MemorySaveListEntry] { + let b_s = unsafe { (*SYSTEM_TABLE).efi_boot_services }; const MEMORY_SAVE_LIST_PAGES: usize = 3; const MEMORY_SAVE_LIST_SIZE: usize = MEMORY_SAVE_LIST_PAGES << PAGE_SHIFT; - let memory_map_info = - boot_service::memory_service::get_memory_map(b_s).expect("Failed to get the memory map"); + let memory_map_info = boot_service::get_memory_map(b_s).expect("Failed to get the memory map"); let list = unsafe { core::slice::from_raw_parts_mut( - allocate_memory(MEMORY_SAVE_LIST_PAGES).expect("Failed to allocate Memory for map") + allocate_memory(MEMORY_SAVE_LIST_PAGES, None) + .expect("Failed to allocate memory for memory saving list") as *mut MemorySaveListEntry, MEMORY_SAVE_LIST_SIZE / core::mem::size_of::(), ) @@ -403,9 +441,8 @@ fn create_memory_save_list(b_s: *const EfiBootServices) -> &'static mut [MemoryS let mut list_pointer = 0usize; for _ in 0..memory_map_info.num_of_entries { - use boot_service::memory_service::EfiMemoryType; - let e = - unsafe { &*(base_address as *const boot_service::memory_service::EfiMemoryDescriptor) }; + use boot_service::EfiMemoryType; + let e = unsafe { &*(base_address as *const boot_service::EfiMemoryDescriptor) }; if e.memory_type == EfiMemoryType::EfiBootServicesData || e.memory_type == EfiMemoryType::EfiRuntimeServicesCode || e.memory_type == EfiMemoryType::EfiRuntimeServicesData @@ -447,24 +484,23 @@ fn create_memory_save_list(b_s: *const EfiBootServices) -> &'static mut [MemoryS num_of_pages: 0, }; - if let Err(e) = boot_service::memory_service::free_pool(b_s, memory_map_info.descriptor_address) - { + if let Err(e) = boot_service::free_pool(b_s, memory_map_info.descriptor_address) { println!("Failed to free pool for the memory map: {:?}", e); } return list; } #[allow(dead_code)] -fn dump_memory_map(b_s: *const EfiBootServices) { - let memory_map_info = match boot_service::memory_service::get_memory_map(b_s) { +fn dump_memory_map() { + let b_s = unsafe { (*SYSTEM_TABLE).efi_boot_services }; + let memory_map_info = match boot_service::get_memory_map(b_s) { Ok(info) => info, Err(e) => { println!("Failed to get memory_map: {:?}", e); return; } }; - let default_descriptor_size = - core::mem::size_of::(); + let default_descriptor_size = core::mem::size_of::(); if default_descriptor_size != memory_map_info.actual_descriptor_size { println!( @@ -480,29 +516,22 @@ fn dump_memory_map(b_s: *const EfiBootServices) { let mut base_address = memory_map_info.descriptor_address; for index in 0..memory_map_info.num_of_entries { println!("{:02}: {:?}", index, unsafe { - &*(base_address as *const boot_service::memory_service::EfiMemoryDescriptor) + &*(base_address as *const boot_service::EfiMemoryDescriptor) }); base_address += memory_map_info.actual_descriptor_size; } - if let Err(e) = boot_service::memory_service::free_pool(b_s, memory_map_info.descriptor_address) - { + if let Err(e) = boot_service::free_pool(b_s, memory_map_info.descriptor_address) { println!("Failed to free pool for the memory map: {:?}", e); } } -/// EL2での各システムレジスタの値を適宜EL1にコピーし、EL2の各システムレジスタを適切な値に変更 fn set_up_el1() { - let is_e2h_enabled = { - let hcr_el2: u64; - unsafe { asm!("mrs {:x}, hcr_el2", out(reg) hcr_el2) }; - (hcr_el2 & HCR_EL2_E2H) != 0 - }; + let is_e2h_enabled = (get_hcr_el2() & HCR_EL2_E2H) != 0; - /* CNTHCTL_EL2 */ - let cnthctl_el2 = CNTHCTL_EL2_EL1PCEN | CNTHCTL_EL2_EL1PCTEN; - unsafe { asm!("msr cnthctl_el2, {:x}", in(reg) cnthctl_el2) }; - unsafe { asm!("msr cntvoff_el2, xzr") }; + /* CNTHCTL_EL2 & CNTVOFF_EL2 */ + set_cnthctl_el2(CNTHCTL_EL2_EL1PCEN | CNTHCTL_EL2_EL1PCTEN); + set_cntvoff_el2(0); /* HSTR_EL2 */ unsafe { asm!("msr hstr_el2, xzr") }; @@ -522,9 +551,8 @@ fn set_up_el1() { /* Ignore it currently... */ /* CPACR_EL1 & CPTR_EL2 */ - let cptr_el2_current: u64; + let cptr_el2_current = get_cptr_el2(); let mut cpacr_el1: u64 = 0; - unsafe { asm!("mrs {:x}, cptr_el2",out(reg) cptr_el2_current) }; cpacr_el1 |= ((((cptr_el2_current) & CPTR_EL2_ZEN) >> CPTR_EL2_ZEN_BITS_OFFSET) << CPACR_EL1_ZEN_BITS_OFFSET) @@ -541,17 +569,14 @@ fn set_up_el1() { >> CPTR_EL2_TTA_BIT_OFFSET_WITHOUT_E2H) << CPACR_EL1_TTA_BIT_OFFSET; } - /* TODO: CPTR_EL2を0から必要なBitのみONにするようにする */ + let mut cptr_el2: u64 = cptr_el2_current | CPTR_EL2_ZEN_NO_TRAP | CPTR_EL2_FPEN_NO_TRAP /*| CPTR_EL2_RES1*/; cptr_el2 &= !((1 << 28) | (1 << 30) | (1 << 31)); - unsafe { - asm!("msr cpacr_el1, {:x}",in(reg) cpacr_el1); - asm!("isb") - /* CPTR_EL2 will be set after HCR_EL2 */ - } + set_cpacr_el1(cpacr_el1); + isb(); + /* CPTR_EL2 will be set after HCR_EL2 */ - let id_aa64pfr0_el1: u64; - unsafe { asm!("mrs {:x}, id_aa64pfr0_el1", out(reg) id_aa64pfr0_el1) }; + let id_aa64pfr0_el1 = get_id_aa64pfr0_el1(); if (id_aa64pfr0_el1 & ID_AA64PFR0_EL1_SVE) != 0 { /* ZCR_EL2 */ unsafe { @@ -576,17 +601,14 @@ fn set_up_el1() { } /* MAIR_EL1(Copy MAIR_EL2) */ - unsafe { - asm!(" mrs {t}, mair_el2 - msr mair_el1, {t}", t = out(reg) _ ) - }; + set_mair_el1(get_mair_el2()); /* TTBR0_EL1 */ set_ttbr0_el1(unsafe { ORIGINAL_PAGE_TABLE } as u64); /* TCR_EL1 */ if is_e2h_enabled { - unsafe { asm!("msr tcr_el1, {:x}",in(reg) ORIGINAL_TCR_EL2) }; + set_tcr_el1(unsafe { ORIGINAL_TCR_EL2 }); } else { let mut tcr_el1: u64 = 0; let tcr_el2 = unsafe { ORIGINAL_TCR_EL2 }; @@ -613,19 +635,14 @@ fn set_up_el1() { << TCR_EL1_IPS_BITS_OFFSET; tcr_el1 |= TCR_EL1_EPD1; /* Disable TTBR1_EL1 */ - unsafe { asm!("msr tcr_el1, {:x}", in(reg) tcr_el1) }; + set_tcr_el1(tcr_el1); } /* SCTLR_EL1(Copy SCTLR_EL2) */ - unsafe { - asm!(" mrs {t}, sctlr_el2 - msr sctlr_el1, {t}",t = out(reg) _) - }; + set_sctlr_el1(get_sctlr_el2()); /* VBAR_EL1 */ - unsafe { - asm!("msr vbar_el1, {:x}",in(reg) ORIGINAL_VECTOR_BASE); - } + set_vbar_el1(unsafe { ORIGINAL_VECTOR_BASE }); #[cfg(feature = "a64fx")] { @@ -667,26 +684,17 @@ fn set_up_el1() { } /* HCR_EL2 */ - let hcr_el2: u64 = - HCR_EL2_FIEN | HCR_EL2_API | HCR_EL2_APK | HCR_EL2_RW | HCR_EL2_TSC | HCR_EL2_VM; - unsafe { - asm!("msr hcr_el2, {:x}",in(reg) hcr_el2); - asm!("isb"); - asm!("msr cptr_el2, {:x}",in(reg) cptr_el2); - } + let hcr_el2 = HCR_EL2_FIEN | HCR_EL2_API | HCR_EL2_APK | HCR_EL2_RW | HCR_EL2_TSC | HCR_EL2_VM; + set_hcr_el2(hcr_el2); + isb(); + set_cptr_el2(cptr_el2); } extern "C" fn el1_main() -> ! { local_irq_fiq_restore(unsafe { INTERRUPT_FLAG.assume_init_ref().clone() }); + assert_eq!(get_current_el() >> 2, 1, "Failed to jump to EL1"); println!("Hello,world! from EL1"); - let mut current_el: usize; - unsafe { asm!("mrs {:x}, CurrentEL", out(reg) current_el) }; - let current_el = current_el >> 2; - println!("CurrentEL: {}", current_el); - if current_el != 1 { - panic!("Failed to jump into EL1"); - } println!("Return to UEFI."); unsafe { @@ -701,12 +709,11 @@ extern "C" fn el1_main() -> ! { } #[naked] -extern "C" fn el2_to_el1(stack_pointer: usize) { +extern "C" fn el2_to_el1(stack_pointer: usize, el1_entry_point: usize) { unsafe { asm!( " - adr x8, {} - msr elr_el2, x8 + msr elr_el2, x1 // x1 contains the entry point of EL1 mov x8, sp msr sp_el1, x8 mov sp, x0 // x0 contains stack_pointer @@ -714,9 +721,8 @@ extern "C" fn el2_to_el1(stack_pointer: usize) { msr spsr_el2, x0 isb eret - ", - sym el1_main, - options(noreturn) + ", + options(noreturn) ) } } diff --git a/src/hypervisor_bootloader/src/paging.rs b/src/hypervisor_bootloader/src/paging.rs index 27d47ed..9f8b6dd 100644 --- a/src/hypervisor_bootloader/src/paging.rs +++ b/src/hypervisor_bootloader/src/paging.rs @@ -9,21 +9,14 @@ //! Paging //! -use super::{ - allocate_memory, TCR_EL2_DS_BIT_OFFSET_WITHOUT_E2H, TCR_EL2_DS_WITHOUT_E2H, - TCR_EL2_TG0_BITS_OFFSET_WITHOUT_E2H, TCR_EL2_TG0_WITHOUT_E2H, -}; -use crate::{ - TCR_EL2_PS_BITS_OFFSET_WITHOUT_E2H, TCR_EL2_PS_WITHOUT_E2H, - TCR_EL2_T0SZ_BITS_OFFSET_WITHOUT_E2H, TCR_EL2_T0SZ_WITHOUT_E2H, -}; +use crate::{allocate_memory, free_memory}; use common::cpu::*; use common::paging::*; use common::{PAGE_SHIFT, PAGE_SIZE, STAGE_2_PAGE_SHIFT, STAGE_2_PAGE_SIZE}; -fn _copy_page_table(table_address: usize, current_level: i8) -> usize { - let cloned_table_address = allocate_memory(1).expect("Failed to allocate page table"); +fn _clone_page_table(table_address: usize, current_level: i8) -> usize { + let cloned_table_address = allocate_memory(1, None).expect("Failed to allocate page table"); let cloned_table = unsafe { &mut *(cloned_table_address as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) @@ -39,23 +32,26 @@ fn _copy_page_table(table_address: usize, current_level: i8) -> usize { if is_descriptor_table_or_level_3_descriptor(*e) { let next_level_table_address = extract_output_address(*e, PAGE_SHIFT); *e = ((*e) & !(next_level_table_address as u64)) - | (_copy_page_table(next_level_table_address, current_level + 1) as u64); + | (_clone_page_table(next_level_table_address, current_level + 1) as u64); } } return cloned_table_address; } -/// Copy TTBR0_EL2 for Hypervisor +/// Clone TTBR0_EL2 /// -/// ハイパーバイザー向けのページテーブルを複製する。 +/// Clone the page table tree of TTBR0_EL2。 /// -/// # Return Value -/// Cloned Page Table Address -pub fn copy_page_table() -> usize { +/// # Panics +/// If memory allocation is failed, this function panics +/// +/// # Result +/// Returns Cloned Page Table Address +pub fn clone_page_table() -> usize { let page_table_address = TTBR::new(get_ttbr0_el2()).get_base_address(); let tcr_el2 = get_tcr_el2(); let first_table_level = get_initial_page_table_level_and_bits_to_shift(tcr_el2).0; - return _copy_page_table(page_table_address, first_table_level); + return _clone_page_table(page_table_address, first_table_level); } /// Map physical address recursively @@ -63,8 +59,8 @@ pub fn copy_page_table() -> usize { /// This will map memory area upto `num_of_remaining_pages`. /// This will call itself recursively, and map address until `num_of_remaining_pages` == 0 or reached the end of table. /// When all page is mapped successfully, `num_of_remaining_pages` has been 0. -/// # Arguments /// +/// # Arguments /// * `physical_address` - The address to map /// * `virtual_address` - The address to associate with `physical_address` /// * `num_of_remaining_pages` - The number of page entries to be mapped, this value will be changed @@ -131,15 +127,19 @@ fn map_address_recursive( *physical_address, table_level ); - if is_descriptor_table_or_level_3_descriptor(*target_descriptor) { - pr_debug!( - "PageTable:({:#X}) will be deleted.", - extract_output_address(*target_descriptor, PAGE_SHIFT) - ); - /* TODO: free page table */ + + let old_descriptor = core::mem::replace( + target_descriptor, + *physical_address as u64 + | create_attributes_for_stage_1(permission, memory_attribute, true), + ); + + if is_descriptor_table_or_level_3_descriptor(old_descriptor) { + let old_table = extract_output_address(old_descriptor, PAGE_SHIFT); + pr_debug!("PageTable:({:#X}) will be deleted.", old_table); + let _ = free_memory(old_table, 1); } - let attributes = create_attributes_for_stage_1(permission, memory_attribute, true); - *target_descriptor = *physical_address as u64 | attributes; + *physical_address += 1 << shift_level; *virtual_address += 1 << shift_level; *num_of_remaining_pages -= 512usize.pow((3 - table_level) as u32); @@ -209,6 +209,22 @@ fn map_address_recursive( return Ok(()); } +/// Map address +/// +/// This will map virtual address into physical address +/// The virtual address is for EL2. +/// +/// # Arguments +/// * `physical_address` - The address to map +/// * `virtual_address` - The address to associate with `physical_address` +/// * `size` - The map size +/// * `readable` - If true, the memory area will be readable +/// * `writable` - If true, the memory area will be writable +/// * `executable` - If true, the memory area will be executable +/// * `is_device` - If true, the cache control of the memory area will become for device memory +/// +/// # Result +/// If mapping is succeeded, returns Ok(()), otherwise returns Err(()) pub fn map_address( mut physical_address: usize, mut virtual_address: usize, @@ -285,9 +301,6 @@ pub fn map_address( return Ok(()); } -/// Map physical Address Recursively into Stage2 translation table -/// -/// permission: Bit0:Readable, Bit1: Writable, Bit2: Executable fn map_address_recursive_stage2( physical_address: &mut usize, virtual_address: &mut usize, @@ -374,17 +387,18 @@ fn map_address_recursive_stage2( *physical_address, table_level ); - if is_descriptor_table_or_level_3_descriptor(*target_descriptor) { - pr_debug!( - "PageTable:({:#X}) will be deleted.", - extract_output_address(*target_descriptor, STAGE_2_PAGE_SHIFT) - ); - /* TODO: free page table */ - } - let attributes = - create_attributes_for_stage_2(permission, is_dummy_page, is_unmap, true); - *target_descriptor = *physical_address as u64 | attributes; + let old_descriptor = core::mem::replace( + target_descriptor, + *physical_address as u64 + | create_attributes_for_stage_2(permission, is_dummy_page, is_unmap, true), + ); + + if is_descriptor_table_or_level_3_descriptor(old_descriptor) { + let old_table = extract_output_address(old_descriptor, STAGE_2_PAGE_SHIFT); + pr_debug!("PageTable:({:#X}) will be deleted.", old_table); + let _ = free_memory(old_table, 1); + } *physical_address += 1 << shift_level; *virtual_address += 1 << shift_level; @@ -459,8 +473,19 @@ fn map_address_recursive_stage2( return Ok(()); } +/// Map address ~ (address + size) to dummy page +/// +/// This function sets `address` ~ (`address` + `size) to un-accessible from EL1/EL0 +/// +/// # Arguments +/// * `address` - The address to hide from EL1/EL0 +/// * `size` - The size to hide +/// * `dummy_page` - [`common::PAGE_SIZE`] memory area to convert the access from EL1/EL0 +/// +/// # Result +/// If mapping is succeeded, returns Ok(()), otherwise returns Err(()) pub fn map_dummy_page_into_vttbr_el2( - mut virtual_address: usize, + mut address: usize, size: usize, mut dummy_page: usize, /*4 KiB Page Physical Address*/ ) -> Result<(), ()> { @@ -483,7 +508,7 @@ pub fn map_dummy_page_into_vttbr_el2( let original_dummy_page = dummy_page; map_address_recursive_stage2( &mut dummy_page, - &mut virtual_address, + &mut address, &mut num_of_needed_pages, TTBR::new(get_vttbr_el2()).get_base_address(), initial_look_up_level, @@ -807,29 +832,17 @@ fn allocate_page_table_for_stage_1( t0sz: u8, is_for_ttbr: bool, ) -> Result { - let table_address_alignment = if is_for_ttbr { - ((64 - ((PAGE_SHIFT - 3) as u8 * (4 - look_up_level) as u8) - t0sz).max(4)).min(12) + let alignment = if is_for_ttbr { + ((64 - ((PAGE_SHIFT - 3) * (4 - look_up_level) as usize) - t0sz as usize).max(4)).min(12) } else { - PAGE_SHIFT as u8 + PAGE_SHIFT }; - loop { - match allocate_memory(1) { - Ok(address) => { - if (address & ((1 << table_address_alignment) - 1)) != 0 { - println!( - "The table address is not alignment with {}, {:#X} will be wasted.", - table_address_alignment, address - ); - /* TODO: アライメントを指定してメモリを確保できるようにし、無駄をなくす。 */ - } else { - return Ok(address); - } - } - Err(e) => { - println!("Failed to allocate memory for the paging table: {:?}", e); - return Err(e); - } - }; + match allocate_memory(1, Some(alignment)) { + Ok(address) => Ok(address), + Err(err) => { + println!("Failed to allocate the page table: {:?}", err); + Err(()) + } } } @@ -842,33 +855,19 @@ fn allocate_page_table_for_stage_2( number_of_tables: u8, ) -> Result { assert_ne!(number_of_tables, 0); - let table_address_alignment = if is_for_ttbr { - ((64 - ((PAGE_SHIFT - 3) as u8 * (4 - look_up_level) as u8) - t0sz).max(4)).min(12) - + (number_of_tables - 1) + let alignment = if is_for_ttbr { + ((64 - ((PAGE_SHIFT - 3) as usize * (4 - look_up_level) as usize) - t0sz as usize).max(4)) + .min(12) + + (number_of_tables as usize - 1) } else { assert_eq!(number_of_tables, 1); - STAGE_2_PAGE_SHIFT as u8 + STAGE_2_PAGE_SHIFT }; - loop { - match allocate_memory(number_of_tables as usize) { - Ok(address) => { - if (address & ((1 << table_address_alignment) - 1)) != 0 { - println!( - "The table address is not alignment with {}, {:#X} will be wasted.", - table_address_alignment, address - ); - /* TODO: アライメントを指定してメモリを確保できるようにし、無駄をなくす。 */ - if number_of_tables != 1 { - let _ = allocate_memory(1); - } - } else { - return Ok(address); - } - } - Err(e) => { - println!("Failed to allocate memory for the paging table: {:?}", e); - return Err(()); - } - }; + match allocate_memory(number_of_tables as usize, Some(alignment)) { + Ok(address) => Ok(address), + Err(err) => { + println!("Failed to allocate the page table: {:?}", err); + Err(()) + } } } diff --git a/src/hypervisor_bootloader/src/smmu.rs b/src/hypervisor_bootloader/src/smmu.rs index a7b2fb8..6ab851b 100644 --- a/src/hypervisor_bootloader/src/smmu.rs +++ b/src/hypervisor_bootloader/src/smmu.rs @@ -1,188 +1,194 @@ // Copyright (c) 2022 RIKEN +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) // All rights reserved. // // This software is released under the MIT License. // http://opensource.org/licenses/mit-license.php //! -//! System Memory Management Unit +//! System Memory Management Unit Initialization +//! +//! Supported SMMU: SMMUv3.0 ~ SMMUv3.3 //! use crate::allocate_memory; use crate::paging::map_address; -use common::acpi::{get_acpi_table, iort::IORT, AcpiError}; -use common::cpu::{ - get_vtcr_el2, get_vttbr_el2, VTCR_EL2_PS, VTCR_EL2_PS_BITS_OFFSET, VTCR_EL2_SL0, - VTCR_EL2_SL0_BITS_OFFSET, VTCR_EL2_T0SZ, VTCR_EL2_T0SZ_BITS_OFFSET, -}; -use common::paging::Shareability; -use common::smmu::{ - StreamTableEntry, SMMU_CR0, SMMU_CR0ACK, SMMU_CR0_SMMUEN, SMMU_CR1, - SMMU_CR1_TABLE_SH_BITS_OFFSET, SMMU_GBPA, SMMU_GBPA_SHCFG_BITS_OFFSET, SMMU_IDR0, - SMMU_IDR0_S2P, SMMU_IDR0_ST_LEVEL, SMMU_IDR0_TTENDIAN, SMMU_IDR0_TTENDIAN_BITS_OFFSET, - SMMU_IDR1, SMMU_IDR5, SMMU_IDR5_GRAN4K, SMMU_MEMORY_MAP_SIZE, SMMU_STRTAB_BASE, - SMMU_STRTAB_BASE_CFG, -}; -use common::{bitmask, PAGE_SIZE, STAGE_2_PAGE_SIZE}; - -/// SMMUの初期化および設定を行います +use common::cpu::{get_vtcr_el2, get_vttbr_el2}; +use common::{acpi, paging::page_align_up, smmu::*, PAGE_SHIFT}; + +use core::ptr::{read_volatile, write_volatile}; + +/// Initialize SMMUv3 and setup Stage2 only STE /// -/// 渡されたACPIテーブルの中からIORTを捜索し、その中からSMMUv3のベースアドレスを発見した場合に -/// 以下の初期化を行います。 +/// This function searches SMMUv3 base address from ACPI IORT and setup it. /// -/// 1. SMMU領域をマップ(マップするサイズ: [`SMMU_MEMORY_MAP_SIZE`]) -/// 2. SMMUがStage2ページングと2段階Stream Tableをサポートし利用可能か確認 -/// 3. CPUのStage2ページングの設定をコピーしSTEを作成する -/// 4. 作成したSTEを複製した2段目のStream Tableを一つ作成 -/// 5. 1段目のStream Tableの各Descriptorに作成した2段目のStream Tableのアドレスを設定 -/// 6. IORTに存在するStream IDの全てをマップできているか確認 -/// 7. SMMUの有効化 +/// # Setup Processes +/// 1. Map SMMUv3 Register Map(Size: [`SMMU_MEMORY_MAP_SIZE`]) +/// 2. Check if SMMU supports Stage2 Paging and 2Level Stream Table +/// 3. Create STE from CPU's VTTBR_EL2 and VTCR_EL2 +/// 4. Build a Level2 Stream Table by cloning the created STE(SPAN: STREAM_TABLE_SPLIT) +/// 5. Find max value of stream id by parsing IORT +/// 6. Build Level1 Stream Table based on max stream id and set same L2Ptr and Span to all entries +/// 7. Enable SMMU /// /// # Arguments -/// acpi_address: ACPI 2.0以降のRSDPのアドレス +/// * acpi_address: RSDP of ACPI 2.0 or later /// -/// # Return Value -/// 上記すべての初期化に成功した場合にSome(smmuのベースアドレス)、そうでなければNone +/// # Result +/// If the initialization is succeed, return Some(smmuv3_base_address), otherwise none pub fn detect_smmu(acpi_address: usize) -> Option { - match get_acpi_table(acpi_address, &IORT::SIGNATURE) { - Ok(address) => { - let iort = unsafe { &*(address as *const IORT) }; - if let Some(smmu_v3) = iort.get_smmu_v3_information() { - let base_address = smmu_v3.base_address as usize; - println!("SMMU Base Address: {:#X}", base_address); - - map_address( - base_address, - base_address, - SMMU_MEMORY_MAP_SIZE, - true, - true, - false, - true, - ) - .expect("Failed to map SMMU Memory Area"); - let smmu_idr0 = unsafe { *((base_address + SMMU_IDR0) as *const u32) }; - let s2p = (smmu_idr0 & SMMU_IDR0_S2P) != 0; - let is_supported_2level_stream_table = (smmu_idr0 & SMMU_IDR0_ST_LEVEL) != 0; - println!( - "SMMU_IDR0: {:#X}(2Level: {}, S2P: {})", - smmu_idr0, is_supported_2level_stream_table, s2p - ); - if ((smmu_idr0 & SMMU_IDR0_TTENDIAN) >> SMMU_IDR0_TTENDIAN_BITS_OFFSET) == 0b11 { - println!("Big Endian is not supported."); - return None; - } else if !s2p { - println!("Stage 2 paging is not supported."); - return None; - } else if !is_supported_2level_stream_table { - println!("2Level stream table is not supported."); - return None; - } - let smmu_idr5 = unsafe { *((base_address + SMMU_IDR5) as *const u32) }; - if (smmu_idr5 & SMMU_IDR5_GRAN4K) == 0 { - println!("4K Paging is not supported."); - return None; - } - let smmu_cr0 = unsafe { *((base_address + SMMU_CR0) as *const u32) }; - if (smmu_cr0 & SMMU_CR0_SMMUEN) != 0 { - println!("SMMU is already enabled."); - return None; - } - let mut smmu_cr1 = unsafe { *((base_address + SMMU_CR1) as *const u32) }; - smmu_cr1 |= (0b11) << SMMU_CR1_TABLE_SH_BITS_OFFSET; - unsafe { *((base_address + SMMU_CR1) as *mut u32) = smmu_cr1 }; - - let smmu_gbpa = 0b01 << SMMU_GBPA_SHCFG_BITS_OFFSET; - unsafe { *((base_address + SMMU_GBPA) as *mut u32) = smmu_gbpa }; - - /* Create STE */ - let mut ste = StreamTableEntry::new(); - ste.set_config(true, false); - ste.set_s2ir0(false, true); - ste.set_s2or0(false, true); - ste.set_s2sh0(Shareability::OuterShareable); - ste.set_s2tg(STAGE_2_PAGE_SIZE); - let vtcr_el2 = get_vtcr_el2(); - ste.set_s2t0sz(((vtcr_el2 & VTCR_EL2_T0SZ) >> VTCR_EL2_T0SZ_BITS_OFFSET) as u32); - ste.set_s2sl0(((vtcr_el2 & VTCR_EL2_SL0) >> VTCR_EL2_SL0_BITS_OFFSET) as u32); - ste.set_s2ps(((vtcr_el2 & VTCR_EL2_PS) >> VTCR_EL2_PS_BITS_OFFSET) as u8); - let vttbr_el2 = get_vttbr_el2(); - ste.set_stage2_translation_table(vttbr_el2 as usize); - ste.validate(); - - let level2_table_address = - allocate_memory(1).expect("Failed to allocate a table for SMMU"); - let level1_table_address = - allocate_memory(1).expect("Failed to allocate a table for SMMU"); - assert_eq!(core::mem::size_of::(), 64); - let level2_table = unsafe { - &mut *(level2_table_address - as *mut [StreamTableEntry; - PAGE_SIZE / core::mem::size_of::()]) - }; - for e in level2_table { - *e = ste.clone(); - } - for e in unsafe { &mut *(level1_table_address as *mut [u64; PAGE_SIZE / 8]) } { - *e = level2_table_address as u64 | 7; /* Level2 Table contains 2^(7- 1) (= 64) STEs */ - } - const MAX_STREAM_ID: u32 = (64 * (PAGE_SIZE / 8) - 1) as u32; - const TABLE_LOG2_SIZE: u32 = (MAX_STREAM_ID + 1).trailing_zeros(); - - for e in smmu_v3.get_array_of_id_mappings() { - if e.is_single_map() { - println!("Single Map StreamID: {:#X}", e.output_base); - if e.output_base > MAX_STREAM_ID { - panic!("Unsupported StreamID: {:X}", e.output_base); - } - } else { - let max_stream_id = e.output_base + e.number_of_ids - 1; - println!("StreamID: {:#X}~{:#X}", e.output_base, max_stream_id); - if max_stream_id > MAX_STREAM_ID { - panic!("Unsupported StreamID: {:X}", max_stream_id); - } - } - } - let smmu_idr1 = unsafe { *((base_address + SMMU_IDR1) as *const u32) }; - let stream_id_size = smmu_idr1 & bitmask!(5, 0); - let number_of_stream_ids = if (1 << stream_id_size) - 1 < MAX_STREAM_ID { - stream_id_size - } else { - TABLE_LOG2_SIZE - }; - println!( - "Number of Stream Ids: 2^{:#X} - 1({:#X})", - number_of_stream_ids, - 2u32.pow(number_of_stream_ids) - 1 - ); - let strtab_base_cfg = (1 << 16) | (6 << 6) | number_of_stream_ids; - unsafe { *((base_address + SMMU_STRTAB_BASE_CFG) as *mut u32) = strtab_base_cfg }; - unsafe { - *((base_address + SMMU_STRTAB_BASE) as *mut u64) = - (level1_table_address as u64) & bitmask!(51, 6) - }; - /* Enable SMMU */ - let smmu_cr0 = SMMU_CR0_SMMUEN; - unsafe { *((base_address + SMMU_CR0) as *mut u32) = smmu_cr0 }; - - let smmu_cr0ack = unsafe { *((base_address + SMMU_CR0ACK) as *const u32) }; - if (smmu_cr0ack & SMMU_CR0_SMMUEN) == 0 { - panic!("Failed to enable SMMU(SMMU_CR0ACK: {:#X})", smmu_cr0ack); - } - Some(base_address) - } else { - println!("SMMUv3 is not found"); - None - } - } - Err(AcpiError::TableNotFound) => { + let iort = match acpi::get_acpi_table(acpi_address, &acpi::iort::IORT::SIGNATURE) { + Ok(address) => unsafe { &*(address as *const acpi::iort::IORT) }, + Err(acpi::AcpiError::TableNotFound) => { println!("IORT is not found."); - None + return None; } Err(e) => { - println!("Failed to get IORT table: {:?}", e); - None + println!("Failed to parse ACPI table: {:?}", e); + return None; + } + }; + let Some(smmu_v3) = iort.get_smmu_v3_information() else { println!("SMMUv3 is not found"); return None;}; + let base_address = smmu_v3.base_address as usize; + println!("SMMUv3 BaseAddress: {:#X}", base_address); + + map_address( + base_address, + base_address, + SMMU_MEMORY_MAP_SIZE, + true, + true, + false, + true, + ) + .expect("Failed to map SMMU Memory Area"); + + /* Check SMMU functions */ + let smmu_idr0 = unsafe { read_volatile((base_address + SMMU_IDR0) as *const u32) }; + let s2p = (smmu_idr0 & SMMU_IDR0_S2P) != 0; + let is_supported_2level_stream_table = (smmu_idr0 & SMMU_IDR0_ST_LEVEL) != 0; + println!( + "SMMU_IDR0: {:#X}(2Level: {}, S2P: {})", + smmu_idr0, is_supported_2level_stream_table, s2p + ); + if ((smmu_idr0 & SMMU_IDR0_TTENDIAN) >> SMMU_IDR0_TTENDIAN_BITS_OFFSET) == 0b11 { + println!("SMMU does not support Little Endian."); + return None; + } else if !s2p { + println!("Stage 2 paging is not supported."); + return None; + } else if !is_supported_2level_stream_table { + println!("2Level stream table is not supported."); + return None; + } + let smmu_idr5 = unsafe { read_volatile((base_address + SMMU_IDR5) as *const u32) }; + if (smmu_idr5 & SMMU_IDR5_GRAN4K) == 0 { + println!("4K Paging is not supported."); + return None; + } + let smmu_cr0 = unsafe { read_volatile((base_address + SMMU_CR0) as *const u32) }; + if (smmu_cr0 & SMMU_CR0_SMMUEN) != 0 { + println!("SMMU is already enabled."); + return None; + } + + let mut smmu_cr1 = unsafe { read_volatile((base_address + SMMU_CR1) as *const u32) }; + smmu_cr1 |= (0b11) << SMMU_CR1_TABLE_SH_BITS_OFFSET; + unsafe { write_volatile((base_address + SMMU_CR1) as *mut u32, smmu_cr1) }; + + /* Create STE */ + let mut ste = StreamTableEntry::new(); + ste.set_stage2_settings(get_vtcr_el2(), get_vttbr_el2(), true, true); + ste.validate(); + + /* Create Stream Table (Level2)*/ + const STREAM_TABLE_SPLIT: u32 = 6; + + let level2_table_address = allocate_memory( + page_align_up((1 << STREAM_TABLE_SPLIT) * core::mem::size_of::()) + >> PAGE_SHIFT, + None, + ) + .expect("Failed to allocate memory for Level2 Stream Table"); + let level2_table = unsafe { + &mut *(level2_table_address as *mut [StreamTableEntry; (1 << STREAM_TABLE_SPLIT)]) + }; + for e in level2_table { + core::mem::forget(core::mem::replace(e, ste.clone())); + } + + /* Find max_stream_id */ + let mut max_stream_id: u32 = 0; + for e in smmu_v3.get_array_of_id_mappings() { + if e.is_single_map() { + println!("Single Map StreamID: {:#X}", e.output_base); + if e.output_base > max_stream_id { + max_stream_id = e.output_base; + } + } else { + let array_max_stream_id = e.output_base + e.number_of_ids - 1; + println!("StreamID: {:#X}~{:#X}", e.output_base, array_max_stream_id); + if array_max_stream_id > max_stream_id { + max_stream_id = array_max_stream_id; + } } } + + /* Create Stream Table (Level1)*/ + let number_of_level1_context_descriptors = (max_stream_id + 1) >> STREAM_TABLE_SPLIT; + let level1_table_address = allocate_memory( + page_align_up(number_of_level1_context_descriptors as usize * core::mem::size_of::()) + >> PAGE_SHIFT, + None, + ) + .expect("Failed to allocate memory for Level1 Stream Table"); + + for e in unsafe { + core::slice::from_raw_parts_mut( + level1_table_address as *mut u64, + number_of_level1_context_descriptors as usize, + ) + } { + *e = level2_table_address as u64 | (STREAM_TABLE_SPLIT as u64 - 1); + } + + println!( + "Level1 Table Entries: {:#X}, Max Stream Id: {:#X}, LOG2SIZE: {}", + number_of_level1_context_descriptors, + max_stream_id, + (max_stream_id + 1).log2() + ); + + unsafe { + write_volatile( + (base_address + SMMU_STRTAB_BASE_CFG) as *mut u32, + SMMU_STRTAB_BASE_CFG_FMT_2LEVEL + | (STREAM_TABLE_SPLIT << SMMU_STRTAB_BASE_CFG_SPLIT_BITS_OFFSET) + | (max_stream_id + 1).log2(), + ) + }; + unsafe { + write_volatile( + (base_address + SMMU_STRTAB_BASE) as *mut u64, + (level1_table_address as u64) & SMMU_STRTAB_BASE_ADDRESS, + ) + }; + + /* Enable SMMU */ + unsafe { write_volatile((base_address + SMMU_CR0) as *mut u32, SMMU_CR0_SMMUEN) }; + + while unsafe { read_volatile((base_address + SMMU_CR0ACK) as *const u32) & SMMU_CR0_SMMUEN } + == 0 + { + core::hint::spin_loop(); + } + + unsafe { + write_volatile( + (base_address + SMMU_GBPA) as *mut u32, + SMMU_GBPA_UPDATE | SMMU_GBPA_SHCFG_INCOMING, + ) + }; + Some(base_address) } diff --git a/src/hypervisor_kernel/Cargo.toml b/src/hypervisor_kernel/Cargo.toml index 03caa1f..ec2ee0c 100644 --- a/src/hypervisor_kernel/Cargo.toml +++ b/src/hypervisor_kernel/Cargo.toml @@ -7,11 +7,11 @@ [package] name = "hypervisor_kernel" -version = "0.4.0" +version = "1.0.0" edition = "2021" [features] -default = ["smmu", "i210", "mt27800", "fast_restore", "acpi_table_protection", "contiguous_bit"] +default = ["smmu", "i210", "mt27800", "fast_restore", "acpi_table_protection", "contiguous_bit", "advanced_memory_manager"] smmu = [] i210 = [] mt27800 = [] @@ -19,6 +19,7 @@ fast_restore = [] acpi_table_protection = [] contiguous_bit = [] a64fx = [] +advanced_memory_manager = ["common/advanced_memory_manager"] [dependencies] common = { path = "../common" } diff --git a/src/hypervisor_kernel/src/acpi_protect.rs b/src/hypervisor_kernel/src/acpi_protect.rs index 8997fbc..f1087ee 100644 --- a/src/hypervisor_kernel/src/acpi_protect.rs +++ b/src/hypervisor_kernel/src/acpi_protect.rs @@ -13,17 +13,17 @@ use crate::paging::add_memory_access_trap; use crate::StoredRegisters; use common::acpi::{RSDP, XSDT, XSDT_STRUCT_SIZE}; -use common::{acpi, STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE}; +use common::{STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE}; -const EXCEPT_TABLE: [&[u8; 4]; 1] = [&acpi::iort::IORT::SIGNATURE]; +const EXCEPT_TABLE: [&[u8; 4]; 0] = []; pub fn init_table_protection(rsdp_address: usize) { /* Assume table validation check is done */ register_acpi_table( rsdp_address, - Some(unsafe { (&*(rsdp_address as *const RSDP)).length }), + Some(unsafe { (*(rsdp_address as *const RSDP)).length }), ); - let xsdt = unsafe { &*((&*(rsdp_address as *const RSDP)).xsdt_address as *const XSDT) }; + let xsdt = unsafe { &*((*(rsdp_address as *const RSDP)).xsdt_address as *const XSDT) }; register_acpi_table(xsdt as *const _ as usize, None); let mut is_dsdt_processed = false; @@ -94,5 +94,5 @@ pub fn acpi_table_store_handler( _access_size: u8, _data: u64, ) -> Result { - return Ok(StoreHookResult::Cancel); + Ok(StoreHookResult::Cancel) } diff --git a/src/hypervisor_kernel/src/emulation.rs b/src/hypervisor_kernel/src/emulation.rs index 4bbd010..0eeca70 100644 --- a/src/hypervisor_kernel/src/emulation.rs +++ b/src/hypervisor_kernel/src/emulation.rs @@ -12,19 +12,22 @@ mod load; mod store; -use crate::{handler_panic, StoredRegisters}; +pub use load::read_memory; +pub use store::write_memory; + +use crate::{handler_panic, paging::map_address, StoredRegisters}; -use common::bitmask; use common::cpu::{ + convert_virtual_address_to_intermediate_physical_address_el0_read, convert_virtual_address_to_intermediate_physical_address_el1_read, convert_virtual_address_to_intermediate_physical_address_el1_write, convert_virtual_address_to_physical_address_el2_read, + convert_virtual_address_to_physical_address_el2_write, SPSR_EL2_M, SPSR_EL2_M_EL0T, }; +use common::{bitmask, PAGE_MASK, PAGE_SIZE}; use core::arch::asm; -const NORMAL_INSTRUCTION_SIZE: usize = 4; - const REGISTER_NUMBER_XZR: u8 = 31; #[allow(unused_variables)] @@ -34,6 +37,7 @@ pub fn data_abort_handler( elr: u64, far: u64, hpfar: u64, + spsr: u64, ) -> Result<(), ()> { #[cfg(debug_assertions)] if (esr & (1 << 24)) != 0 { @@ -67,25 +71,23 @@ pub fn data_abort_handler( println!("No Valid Instruction Syndrome Information."); } - /* TODO: check EL1 or EL0 */ - let instruction_intermediate_physical_address = - convert_virtual_address_to_intermediate_physical_address_el1_read(elr as usize).unwrap(); + let instruction_intermediate_physical_address = if (spsr & SPSR_EL2_M) == SPSR_EL2_M_EL0T { + pr_debug!("Access from EL0"); + convert_virtual_address_to_intermediate_physical_address_el0_read(elr as usize).unwrap() + } else { + convert_virtual_address_to_intermediate_physical_address_el1_read(elr as usize).unwrap() + }; pr_debug!( "Target Instruction Address: {:#X} => {:#X}", elr, instruction_intermediate_physical_address ); - assert_eq!( - convert_virtual_address_to_physical_address_el2_read( - instruction_intermediate_physical_address - ) - .unwrap_or(0), - instruction_intermediate_physical_address - ); - let target_instruction = unsafe { *(instruction_intermediate_physical_address as *const u32) }; + let target_instruction_virtual_address = + get_virtual_address_to_access_ipa(instruction_intermediate_physical_address, false)?; + let target_instruction = unsafe { *(target_instruction_virtual_address as *const u32) }; pr_debug!("Target Instruction: {:#X}", target_instruction); - return emulate_instruction(s_r, target_instruction, elr, far, hpfar); + emulate_instruction(s_r, target_instruction, elr, far, hpfar) } fn emulate_instruction( @@ -211,7 +213,7 @@ fn emulate_instruction( } } println!("Unknown Instruction: {:#X}", target_instruction); - return Err(()); + Err(()) } fn faulting_va_to_ipa_load(far: u64) -> Result { @@ -222,14 +224,42 @@ fn faulting_va_to_ipa_store(far: u64) -> Result { convert_virtual_address_to_intermediate_physical_address_el1_write(far as usize) } -fn advance_elr_el2() { - unsafe { - asm!(" - mrs {t}, elr_el2 - add {t}, {t}, {SIZE} - msr elr_el2, {t} - ", t = out(reg) _ ,SIZE = const NORMAL_INSTRUCTION_SIZE) - }; +fn get_virtual_address_to_access_ipa( + intermediate_physical_address: usize, + is_write_access: bool, +) -> Result { + if is_write_access { + if let Ok(pa) = + convert_virtual_address_to_physical_address_el2_write(intermediate_physical_address) + { + return if pa != intermediate_physical_address { + println!("IPA({:#X}) != VA({:#X})", intermediate_physical_address, pa); + Err(()) + } else { + Ok(intermediate_physical_address) + }; + } + } else if let Ok(pa) = + convert_virtual_address_to_physical_address_el2_read(intermediate_physical_address) + { + return if pa != intermediate_physical_address { + println!("IPA({:#X}) != VA({:#X})", intermediate_physical_address, pa); + Err(()) + } else { + Ok(intermediate_physical_address) + }; + } + println!("Map {:#X}", intermediate_physical_address); + map_address( + intermediate_physical_address & PAGE_MASK, + intermediate_physical_address & PAGE_MASK, + PAGE_SIZE, + true, + true, + false, + true, + )?; + Ok(intermediate_physical_address) } fn get_register_reference_mut(s_r: &mut StoredRegisters, index: u8) -> &mut u64 { diff --git a/src/hypervisor_kernel/src/emulation/load.rs b/src/hypervisor_kernel/src/emulation/load.rs index 8923fa2..05eb489 100644 --- a/src/hypervisor_kernel/src/emulation/load.rs +++ b/src/hypervisor_kernel/src/emulation/load.rs @@ -12,14 +12,14 @@ //! use super::{ - advance_elr_el2, faulting_va_to_ipa_load, get_register_reference_mut, + faulting_va_to_ipa_load, get_register_reference_mut, get_virtual_address_to_access_ipa, write_back_index_register_imm7, write_back_index_register_imm9, REGISTER_NUMBER_XZR, }; + use crate::memory_hook::{memory_load_hook_handler, LoadHookResult}; use crate::StoredRegisters; -use common::cpu::convert_virtual_address_to_physical_address_el2_read; -use common::{bitmask, STAGE_2_PAGE_SHIFT}; +use common::{bitmask, cpu::advance_elr_el2, STAGE_2_PAGE_SHIFT}; pub fn emulate_load_register( s_r: &mut StoredRegisters, @@ -259,37 +259,22 @@ fn load_from_address_and_store_into_register( intermediate_physical_load_address, sse ); - /* TODO: 物理アドレスへのアクセス関数を用意する。現方法だとVA!=PAの時に誤動作する */ - let physical_load_address = - convert_virtual_address_to_physical_address_el2_read(intermediate_physical_load_address) - .expect("Failed to convert IPA => PA"); + let virtual_address_to_load = + get_virtual_address_to_access_ipa(intermediate_physical_load_address, false)?; - pr_debug!( - "{}{} <= [{:#X}](PA)(Sign Extend: {})", - if sf { 'X' } else { 'W' }, - target_register, - physical_load_address, - sse - ); if !sf && size == 0b11 { println!("Invalid Instruction: Loading a 64bit data into the 32bit register."); return Err(()); } - let hook_result = memory_load_hook_handler(physical_load_address, s_r, size, sf, sse)?; + let hook_result = + memory_load_hook_handler(intermediate_physical_load_address, s_r, size, sf, sse)?; let data = match hook_result { LoadHookResult::PassThrough => { if sse { unimplemented!(); } else { - use core::ptr::read_volatile; - match size { - 0b00 => unsafe { read_volatile(physical_load_address as *const u8) as u64 }, - 0b01 => unsafe { read_volatile(physical_load_address as *const u16) as u64 }, - 0b10 => unsafe { read_volatile(physical_load_address as *const u32) as u64 }, - 0b11 => unsafe { read_volatile(physical_load_address as *const u64) }, - _ => unreachable!(), - } + _read_memory(virtual_address_to_load, size) } } LoadHookResult::Data(d) => d, @@ -301,3 +286,22 @@ fn load_from_address_and_store_into_register( } return Ok(()); } + +fn _read_memory(load_virtual_address: usize, access_size: u8) -> u64 { + use core::ptr::read_volatile; + match access_size { + 0b00 => unsafe { read_volatile(load_virtual_address as *const u8) as u64 }, + 0b01 => unsafe { read_volatile(load_virtual_address as *const u16) as u64 }, + 0b10 => unsafe { read_volatile(load_virtual_address as *const u32) as u64 }, + 0b11 => unsafe { read_volatile(load_virtual_address as *const u64) }, + _ => unreachable!(), + } +} + +pub fn read_memory(intermediate_physical_load_address: usize, access_size: u8) -> u64 { + _read_memory( + get_virtual_address_to_access_ipa(intermediate_physical_load_address, false) + .expect("Failed to convert Address"), + access_size, + ) +} diff --git a/src/hypervisor_kernel/src/emulation/store.rs b/src/hypervisor_kernel/src/emulation/store.rs index 6a20a80..5ddf2ac 100644 --- a/src/hypervisor_kernel/src/emulation/store.rs +++ b/src/hypervisor_kernel/src/emulation/store.rs @@ -12,14 +12,14 @@ //! use super::{ - advance_elr_el2, faulting_va_to_ipa_store, get_register_reference_mut, + faulting_va_to_ipa_store, get_register_reference_mut, get_virtual_address_to_access_ipa, write_back_index_register_imm7, write_back_index_register_imm9, REGISTER_NUMBER_XZR, }; + use crate::memory_hook::{memory_store_hook_handler, StoreHookResult}; use crate::{handler_panic, StoredRegisters}; -use common::cpu::{convert_virtual_address_to_physical_address_el2_write, invalidate_data_cache}; -use common::{bitmask, STAGE_2_PAGE_SHIFT}; +use common::{bitmask, cpu::advance_elr_el2, STAGE_2_PAGE_SHIFT}; pub fn emulate_store_register( s_r: &mut StoredRegisters, @@ -185,14 +185,12 @@ fn store_register_into_address( target_register: u8, size: u8, ) -> Result<(), ()> { - /* TODO: 物理アドレスへのアクセス関数を用意する。現方法だとVA!=PAの時に誤動作する */ - let physical_store_address = - convert_virtual_address_to_physical_address_el2_write(intermediate_physical_store_address) - .expect("Failed to convert IPA => PA"); + let virtual_address_to_store = + get_virtual_address_to_access_ipa(intermediate_physical_store_address, true)?; pr_debug!( "[{:#X}](PA) <= R{}({})", - physical_store_address, + intermediate_physical_store_address, target_register, match size { 0b00 => " 8Bit", @@ -208,7 +206,8 @@ fn store_register_into_address( } else { *get_register_reference_mut(s_r, target_register) }; - let hook_result = memory_store_hook_handler(physical_store_address, s_r, size, reg_data)?; + let hook_result = + memory_store_hook_handler(intermediate_physical_store_address, s_r, size, reg_data)?; let data = match hook_result { StoreHookResult::PassThrough => reg_data, StoreHookResult::AlternativeData(d) => d, @@ -219,14 +218,27 @@ fn store_register_into_address( }; pr_debug!("Data: {:#X}", data); + _write_memory(virtual_address_to_store, size, data); + return Ok(()); +} + +pub fn _write_memory(store_address: usize, access_size: u8, data: u64) { use core::ptr::write_volatile; - match size { - 0b00 => unsafe { write_volatile(physical_store_address as *mut u8, data as u8) }, - 0b01 => unsafe { write_volatile(physical_store_address as *mut u16, data as u16) }, - 0b10 => unsafe { write_volatile(physical_store_address as *mut u32, data as u32) }, - 0b11 => unsafe { write_volatile(physical_store_address as *mut u64, data) }, + match access_size { + 0b00 => unsafe { write_volatile(store_address as *mut u8, data as u8) }, + 0b01 => unsafe { write_volatile(store_address as *mut u16, data as u16) }, + 0b10 => unsafe { write_volatile(store_address as *mut u32, data as u32) }, + 0b11 => unsafe { write_volatile(store_address as *mut u64, data) }, _ => unreachable!(), }; - invalidate_data_cache(physical_store_address); - return Ok(()); +} + +#[allow(dead_code)] +pub fn write_memory(intermediate_physical_store_address: usize, access_size: u8, data: u64) { + _write_memory( + get_virtual_address_to_access_ipa(intermediate_physical_store_address, true) + .expect("Failed to convert Address"), + access_size, + data, + ) } diff --git a/src/hypervisor_kernel/src/fast_restore.rs b/src/hypervisor_kernel/src/fast_restore.rs index e00e1b4..740ee6e 100644 --- a/src/hypervisor_kernel/src/fast_restore.rs +++ b/src/hypervisor_kernel/src/fast_restore.rs @@ -5,27 +5,25 @@ // This software is released under the MIT License. // http://opensource.org/licenses/mit-license.php -use crate::paging::{ - add_memory_access_trap, map_address, remake_page_table, remove_memory_access_trap, -}; -use crate::psci::{call_psci_function, PsciFunctionId}; -use crate::{allocate_memory, StoredRegisters, BSP_MPIDR, INSTRUCTION_SIZE}; - -use common::acpi::get_acpi_table; -use common::acpi::madt::MADT; -use common::cpu::{ - convert_virtual_address_to_intermediate_physical_address_el1_read, - convert_virtual_address_to_intermediate_physical_address_el1_write, +use crate::{ + allocate_memory, free_memory, + gic::restore_gic, + multi_core::{power_off_cpu, NUMBER_OF_RUNNING_AP, STACK_TO_FREE_LATER}, + paging::{ + add_memory_access_trap, map_address, remake_stage2_page_table, remove_memory_access_trap, + }, + psci::PsciReturnCode, + smmu::restore_smmu_status, + StoredRegisters, BSP_MPIDR, }; + use common::{ cpu, paging, MemorySaveListEntry, MEMORY_SAVE_ADDRESS_ONDEMAND_FLAG, PAGE_MASK, PAGE_SHIFT, - PAGE_SIZE, + PAGE_SIZE, STACK_PAGES, }; -use core::arch::asm; -use core::intrinsics::unlikely; use core::mem::MaybeUninit; -use core::ptr::{copy_nonoverlapping, write_volatile}; +use core::ptr::copy_nonoverlapping; use core::sync::atomic::{AtomicBool, Ordering}; static IS_RESTORE_NEEDED: AtomicBool = AtomicBool::new(false); @@ -57,7 +55,7 @@ pub fn add_memory_save_list(list: *mut [MemorySaveListEntry]) { pub fn create_memory_trap_for_save_memory() { unsafe { ORIGINAL_VTTBR_EL2 = cpu::get_vttbr_el2() }; - let page_table = remake_page_table().expect("Failed to remake page table."); + let page_table = remake_stage2_page_table().expect("Failed to remake page table."); cpu::set_vttbr_el2(page_table as u64); let list = unsafe { MEMORY_SAVE_LIST.assume_init_read() }; for e in list { @@ -77,12 +75,11 @@ pub fn create_memory_trap_for_save_memory() { } } +#[inline(always)] pub fn check_memory_access_for_memory_save_list(ec: u8, far_el2: u64) -> bool { - if unlikely(unsafe { ORIGINAL_VTTBR_EL2 != 0 }) { - if ec == crate::EC_DATA_ABORT { - add_memory_area_to_memory_save_list(far_el2); - return true; - } + if unsafe { ORIGINAL_VTTBR_EL2 != 0 } && ec == crate::EC_DATA_ABORT { + add_memory_area_to_memory_save_list(far_el2); + return true; } return false; } @@ -118,12 +115,15 @@ fn compress_memory_save_list(list: &mut [MemorySaveListEntry]) -> Option return None; } +#[inline(never)] fn add_memory_area_to_memory_save_list(far_el2: u64) { let fault_address = - convert_virtual_address_to_intermediate_physical_address_el1_read(far_el2 as usize) + cpu::convert_virtual_address_to_intermediate_physical_address_el1_read(far_el2 as usize) .unwrap_or_else(|_| { - convert_virtual_address_to_intermediate_physical_address_el1_write(far_el2 as usize) - .unwrap_or_else(|_| panic!("Failed to convert FAR_EL2({:#X})", far_el2)) + cpu::convert_virtual_address_to_intermediate_physical_address_el1_write( + far_el2 as usize, + ) + .unwrap_or_else(|_| panic!("Failed to convert FAR_EL2({:#X})", far_el2)) }) & PAGE_MASK; @@ -159,15 +159,13 @@ fn add_memory_area_to_memory_save_list(far_el2: u64) { } if e.saved_address == MEMORY_SAVE_ADDRESS_ONDEMAND_FLAG { available_entry = Some(e); - } else { - if e.memory_start == fault_address + PAGE_SIZE { - e.memory_start = fault_address; - e.num_of_pages += 1; - break; - } else if e.memory_start + ((e.num_of_pages as usize) << PAGE_SHIFT) == fault_address { - e.num_of_pages += 1; - break; - } + } else if e.memory_start == fault_address + PAGE_SIZE { + e.memory_start = fault_address; + e.num_of_pages += 1; + break; + } else if e.memory_start + ((e.num_of_pages as usize) << PAGE_SHIFT) == fault_address { + e.num_of_pages += 1; + break; } if i + 1 == list_length { let list = unsafe { MEMORY_SAVE_LIST.assume_init_read() }; @@ -193,7 +191,7 @@ fn save_memory(list: &mut [MemorySaveListEntry]) { } if e.saved_address != usize::MAX { let allocated_memory = - allocate_memory(e.num_of_pages as usize).expect("Failed to allocate memory"); + allocate_memory(e.num_of_pages as usize, None).expect("Failed to allocate memory"); unsafe { copy_nonoverlapping( e.memory_start as *const u8, @@ -269,11 +267,9 @@ pub fn exit_boot_service_trap_main(regs: &mut StoredRegisters, elr: u64) { return; } assert_ne!(elr, 0); - let hvc_address = elr as usize - INSTRUCTION_SIZE; - unsafe { - *(hvc_address as *mut u32) = ORIGINAL_INSTRUCTION; - asm!("msr elr_el2, {:x}", in(reg) hvc_address); - } + let hvc_address = elr as usize - cpu::AA64_INSTRUCTION_SIZE; + unsafe { *(hvc_address as *mut u32) = ORIGINAL_INSTRUCTION }; + cpu::set_elr_el2(hvc_address as u64); save_original_instruction_and_insert_hvc(regs.x30 as usize, HVC_AFTER_EXIT_BOOT_SERVICE_TRAP); } @@ -282,12 +278,12 @@ pub fn after_exit_boot_service_trap_main(regs: &mut StoredRegisters, elr: u64) { return; } assert_ne!(elr, 0); - let hvc_address = elr as usize - INSTRUCTION_SIZE; + let hvc_address = elr as usize - cpu::AA64_INSTRUCTION_SIZE; unsafe { *(hvc_address as *mut u32) = ORIGINAL_INSTRUCTION; - asm!("msr elr_el2, {:x}", in(reg) hvc_address); ORIGINAL_INSTRUCTION = 0; } + cpu::set_elr_el2(hvc_address as u64); cpu::flush_tlb_el1(); cpu::clear_instruction_cache_all(); pr_debug!("ExitBootServiceStatus: {:#X}", regs.x0); @@ -314,14 +310,12 @@ pub fn after_exit_boot_service_trap_main(regs: &mut StoredRegisters, elr: u64) { cpu::set_vttbr_el2(unsafe { ORIGINAL_VTTBR_EL2 }); /* TODO: free old page table */ unsafe { ORIGINAL_VTTBR_EL2 = 0 }; pr_debug!("Remove page table for memory save"); - return; } -/// 再起動を検知した場合にこの関数を呼ぶ -pub fn enter_restore_process() -> ! { - pr_debug!("Fast Restore is requested."); - cpu::local_irq_fiq_save(); - IS_RESTORE_NEEDED.store(true, Ordering::SeqCst); +/// If you disable all entries of Stage2 Page Table, +/// don't call [`super::paging::add_memory_access_trap`] and [`super::paging::remove_memory_access_trap`] +/// until you re-enable the entries. +fn modify_all_enable_bit_of_stage2_top_level_entries(is_enabled: bool) { let stage_2_page_table = paging::TTBR::new(cpu::get_vttbr_el2()).get_base_address(); let vtcr_el2 = cpu::get_vtcr_el2(); let vtcr_el2_sl0 = ((vtcr_el2 & cpu::VTCR_EL2_SL0) >> cpu::VTCR_EL2_SL0_BITS_OFFSET) as u8; @@ -335,14 +329,36 @@ pub fn enter_restore_process() -> ! { }; let num_of_pages = paging::calculate_number_of_concatenated_page_tables(vtcr_el2_t0sz, initial_look_up_level); - for e in unsafe { + + let table = unsafe { core::slice::from_raw_parts_mut( stage_2_page_table as *mut u64, (paging::PAGE_TABLE_SIZE / core::mem::size_of::()) * num_of_pages as usize, ) - } { - *e &= !1; + }; + if is_enabled { + for e in table { + *e |= 1; + } + } else { + for e in table { + *e &= !1; + } } +} + +/// This function will be called when the guest OS requested power off or reboot +pub fn enter_restore_process() -> ! { + pr_debug!("Fast Restore is requested."); + cpu::local_irq_fiq_save(); + IS_RESTORE_NEEDED.store(true, Ordering::SeqCst); + + modify_all_enable_bit_of_stage2_top_level_entries(false); + /* + After this point, we must not modify stage2 page table + including add_memory_access_trap/remove_memory_access_trap + */ + cpu::flush_tlb_el1(); cpu::clear_instruction_cache_all(); /* TODO: check if register access is enabled. */ @@ -356,27 +372,51 @@ pub fn enter_restore_process() -> ! { #[inline(always)] pub fn perform_restore_if_needed() { - if unlikely(IS_RESTORE_NEEDED.load(Ordering::Relaxed)) { + if IS_RESTORE_NEEDED.load(Ordering::Relaxed) { restore_main(); } } +#[inline(never)] fn restore_main() -> ! { + cpu::local_irq_fiq_save(); if unsafe { BSP_MPIDR } != cpu::get_mpidr_el1() { pr_debug!("This CPU(MPIDR: {:#X}) is not BSP, currently perform CPU_OFF(TODO: use AP to copy memory)", cpu::get_mpidr_el1()); + let result = power_off_cpu(); panic!( - "Failed to call CPU_OFF: {}", - call_psci_function(PsciFunctionId::CpuOff, 0, 0, 0) as i32 + "Failed to call CPU_OFF: {:#X?}", + PsciReturnCode::try_from(result) ); } - cpu::local_irq_fiq_save(); println!("BSP entered the restore process."); + println!("Wait until all APs are powered off..."); + cpu::dsb(); + cpu::isb(); + while NUMBER_OF_RUNNING_AP.load(Ordering::Relaxed) != 0 { + core::hint::spin_loop(); + } + println!("All APs are powered off."); + + modify_all_enable_bit_of_stage2_top_level_entries(true); + /* Now, we can call add_memory_access_trap/remove_memory_access_trap */ + + /* Free last one AP's stack if needed */ + let old_stack = STACK_TO_FREE_LATER.load(Ordering::Relaxed); + if old_stack != 0 { + if let Err(err) = free_memory(old_stack, STACK_PAGES) { + println!("Failed to free stack: {:?}", err); + } + STACK_TO_FREE_LATER.store(0, Ordering::Relaxed); + } /* Restore GIC */ if let Some(acpi_rsdp) = unsafe { crate::ACPI_RSDP } { restore_gic(acpi_rsdp); } + #[cfg(feature = "smmu")] + restore_smmu_status(); + /* Restore saved registers */ let saved_registers = unsafe { SAVED_SYSTEM_REGISTERS.assume_init_read() }; cpu::set_cpacr_el1(saved_registers.cpacr_el1); @@ -394,33 +434,12 @@ fn restore_main() -> ! { pr_debug!("Restore the memory"); restore_memory(unsafe { MEMORY_SAVE_LIST.assume_init_read() }); - let stage_2_page_table = paging::TTBR::new(cpu::get_vttbr_el2()).get_base_address(); - let vtcr_el2 = cpu::get_vtcr_el2(); - let vtcr_el2_sl0 = ((vtcr_el2 & cpu::VTCR_EL2_SL0) >> cpu::VTCR_EL2_SL0_BITS_OFFSET) as u8; - let vtcr_el2_t0sz = ((vtcr_el2 & cpu::VTCR_EL2_T0SZ) >> cpu::VTCR_EL2_T0SZ_BITS_OFFSET) as u8; - let initial_look_up_level: i8 = match vtcr_el2_sl0 { - 0b00 => 2, - 0b01 => 1, - 0b10 => 0, - 0b11 => 3, - _ => unreachable!(), - }; - let num_of_pages = - paging::calculate_number_of_concatenated_page_tables(vtcr_el2_t0sz, initial_look_up_level); - for e in unsafe { - core::slice::from_raw_parts_mut( - stage_2_page_table as *mut u64, - (paging::PAGE_TABLE_SIZE / core::mem::size_of::()) * num_of_pages as usize, - ) - } { - *e |= 1; - } cpu::flush_tlb_el1(); cpu::clear_instruction_cache_all(); pr_debug!("ERET"); IS_RESTORE_NEEDED.store(false, Ordering::SeqCst); unsafe { - asm!(" + core::arch::asm!(" ldp x30, xzr, [x0, #( 15 * 16)] ldp x28, x29, [x0, #( 14 * 16)] ldp x26, x27, [x0, #( 13 * 16)] @@ -440,32 +459,3 @@ fn restore_main() -> ! { eret", in("x0") SAVED_REGISTERS.as_ptr() as usize, options(noreturn)) } } - -const GICR_WAKER: usize = 0x0014; -const GCIR_WAKER_PROCESSOR_SLEEP: u32 = 1 << 1; - -const GICD_CTLR: usize = 0x00; - -fn restore_gic(acpi_address: usize) { - // TODO: Discovery Base Address - if let Ok(table) = get_acpi_table(acpi_address, b"APIC") { - let table = unsafe { &*(table as *const MADT) }; - let list = table.get_gic_list(); - for e in list { - /* Set sleep */ - let redistributor_base = e.gicr_base_address as usize; - if redistributor_base == 0 { - todo!() - } - let waker = redistributor_base + GICR_WAKER; - unsafe { write_volatile(waker as *mut u32, GCIR_WAKER_PROCESSOR_SLEEP) }; - } - if let Some(distributor) = table.get_gic_distributor_address() { - if distributor != 0 { - unsafe { write_volatile((distributor + GICD_CTLR) as *mut u32, 0) }; - } - } else { - println!("DistributorBase is zero"); - } - } -} diff --git a/src/hypervisor_kernel/src/gic.rs b/src/hypervisor_kernel/src/gic.rs new file mode 100644 index 0000000..12aa072 --- /dev/null +++ b/src/hypervisor_kernel/src/gic.rs @@ -0,0 +1,174 @@ +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) +// All rights reserved. +// +// This software is released under the MIT License. +// http://opensource.org/licenses/mit-license.php + +use crate::memory_hook::{ + add_memory_load_hook_handler, add_memory_store_hook_handler, remove_memory_load_hook_handler, + remove_memory_store_hook_handler, LoadAccessHandlerEntry, LoadHookResult, + StoreAccessHandlerEntry, StoreHookResult, +}; +use crate::paging::{add_memory_access_trap, map_address, remove_memory_access_trap}; +use crate::StoredRegisters; + +use common::acpi::{get_acpi_table, madt::MADT}; +use common::paging::{page_align_up, stage2_page_align_up}; +use common::PAGE_SIZE; + +use core::ptr::{read_volatile, write_volatile}; + +const GICR_MAP_SIZE: usize = 0x1000; + +const GICR_CTLR: usize = 0x0000; +const GICR_CTLR_RWP: u32 = 1 << 3; +const GICR_CTLR_ENABLE_LPIS: u32 = 1; + +const GICR_WAKER: usize = 0x0014; +const GICR_WAKER_PROCESSOR_SLEEP: u32 = 1 << 1; + +const GICR_PROPBASER: usize = 0x0070; +const GICR_PROPBASER_PTZ: u64 = 1 << 62; + +const GICD_CTLR: usize = 0x00; + +const GITS_CTLR: usize = 0x00; +const GITS_CTLR_ENABLED: u32 = 0x01; +const GITS_CTLR_QUIESCENT: u32 = 1 << 31; + +pub fn restore_gic(acpi_address: usize) { + // TODO: Discovery Base Address + if let Ok(table) = get_acpi_table(acpi_address, b"APIC") { + let table = unsafe { &*(table as *const MADT) }; + + for e in table.get_gic_its_list() { + map_address(e, e, PAGE_SIZE, true, true, false, true).expect("Failed to map ITS"); + unsafe { write_volatile((e + GITS_CTLR) as *mut u32, 0) }; + while unsafe { read_volatile((e + GITS_CTLR) as *const u32) & GITS_CTLR_ENABLED } != 0 { + core::hint::spin_loop(); + } + unsafe { write_volatile((e + GITS_CTLR) as *mut u32, GITS_CTLR_QUIESCENT) }; + } + + if let Some(distributor) = table.get_gic_distributor_address() { + if distributor != 0 { + unsafe { write_volatile((distributor + GICD_CTLR) as *mut u32, 0) }; + } + } else { + println!("DistributorBase is zero"); + } + + for e in table.get_gic_list() { + let redistributor_base = e.gicr_base_address as usize; + if redistributor_base == 0 { + todo!() + } + map_address( + redistributor_base, + redistributor_base, + page_align_up(GICR_MAP_SIZE), + true, + true, + false, + true, + ) + .expect("Failed to map GIC Redistributor"); + + let ctrl = (redistributor_base + GICR_CTLR) as *mut u32; + while unsafe { read_volatile(ctrl) & GICR_CTLR_RWP } != 0 { + core::hint::spin_loop(); + } + unsafe { write_volatile(ctrl, 0) }; + while unsafe { read_volatile(ctrl) & GICR_CTLR_RWP } != 0 { + core::hint::spin_loop(); + } + if (unsafe { read_volatile(ctrl) } & GICR_CTLR_ENABLE_LPIS) != 0 { + pr_debug!( + "GICR_CTLR::EnableLPIs became RES1(this behavior is IMPLEMENTATION DEFINED).\ + Therefore, add trap to mask this bit until EL1 writes this bit 1." + ); + add_memory_load_hook_handler(LoadAccessHandlerEntry::new( + redistributor_base, + stage2_page_align_up(GICR_MAP_SIZE), + gic_redistributor_fast_restore_load_handler, + )) + .expect("Failed to add load handler"); + add_memory_store_hook_handler(StoreAccessHandlerEntry::new( + redistributor_base, + stage2_page_align_up(GICR_MAP_SIZE), + gic_redistributor_fast_restore_store_handler, + )) + .expect("Failed to add store handler"); + add_memory_access_trap( + redistributor_base, + stage2_page_align_up(GICR_MAP_SIZE), + false, + false, + ) + .expect("Failed to trap GIC Register"); + } + + unsafe { + write_volatile( + (redistributor_base + GICR_WAKER) as *mut u32, + GICR_WAKER_PROCESSOR_SLEEP, + ) + }; + } + } +} + +fn gic_redistributor_fast_restore_load_handler( + accessing_address: usize, + _stored_registers: &mut StoredRegisters, + _access_size: u8, + _is_64bit_register: bool, + _is_sign_extend_required: bool, +) -> Result { + let offset = accessing_address & (GICR_MAP_SIZE - 1); + match offset { + GICR_CTLR => Ok(LoadHookResult::Data( + (unsafe { read_volatile(accessing_address as *const u32) } & !GICR_CTLR_ENABLE_LPIS) + as u64, + )), + _ => Ok(LoadHookResult::PassThrough), + } +} + +fn gic_redistributor_fast_restore_store_handler( + accessing_address: usize, + _stored_registers: &mut StoredRegisters, + _access_size: u8, + data: u64, +) -> Result { + let base = accessing_address & !(GICR_MAP_SIZE - 1); + let offset = accessing_address & (GICR_MAP_SIZE - 1); + match offset { + GICR_CTLR => { + if (data & (GICR_CTLR_ENABLE_LPIS as u64)) != 0 { + pr_debug!("Remove the trap of GIC Redistributor"); + remove_memory_access_trap(base, stage2_page_align_up(GICR_MAP_SIZE)) + .expect("Failed to remove the trap GIC Register"); + remove_memory_load_hook_handler(LoadAccessHandlerEntry::new( + base, + stage2_page_align_up(GICR_MAP_SIZE), + gic_redistributor_fast_restore_load_handler, + )) + .expect("Failed to remove load handler"); + remove_memory_store_hook_handler(StoreAccessHandlerEntry::new( + base, + stage2_page_align_up(GICR_MAP_SIZE), + gic_redistributor_fast_restore_store_handler, + )) + .expect("Failed to remove store handler"); + } + Ok(StoreHookResult::PassThrough) + } + GICR_PROPBASER => { + let original_data = unsafe { read_volatile(accessing_address as *const u64) }; + assert_eq!(original_data & !GICR_PROPBASER_PTZ, data); + Ok(StoreHookResult::Cancel) + } + _ => Ok(StoreHookResult::PassThrough), + } +} diff --git a/src/hypervisor_kernel/src/main.rs b/src/hypervisor_kernel/src/main.rs index f6dc59f..f726a11 100644 --- a/src/hypervisor_kernel/src/main.rs +++ b/src/hypervisor_kernel/src/main.rs @@ -8,11 +8,7 @@ #![no_std] #![no_main] #![feature(asm_const)] -#![feature(const_maybe_uninit_uninit_array)] #![feature(const_mut_refs)] -#![feature(core_intrinsics)] -#![feature(format_args_nl)] -#![feature(maybe_uninit_uninit_array)] #![feature(naked_functions)] #![feature(panic_info_message)] @@ -22,6 +18,7 @@ mod acpi_protect; mod drivers; mod emulation; mod fast_restore; +mod gic; mod memory_hook; mod multi_core; mod paging; @@ -30,21 +27,25 @@ mod pci; mod psci; mod smmu; -use common::cpu::{get_mpidr_el1, secure_monitor_call}; -use common::{acpi, bitmask}; -use common::{SystemInformation, ALLOC_SIZE, PAGE_SIZE}; - -use core::arch::{asm, global_asm}; +use common::cpu::{ + advance_elr_el2, get_elr_el2, get_esr_el2, get_far_el2, get_hpfar_el2, get_mpidr_el1, + get_spsr_el2, secure_monitor_call, +}; +use common::spin_flag::SpinLockFlag; +use common::{ + acpi, bitmask, MemoryAllocationError, MemoryAllocator, SystemInformation, COMPILER_INFO, + HYPERVISOR_HASH_INFO, HYPERVISOR_NAME, PAGE_SHIFT, +}; + +use core::arch::global_asm; use core::mem::MaybeUninit; const EC_HVC: u8 = 0b010110; const EC_SMC_AA64: u8 = 0b010111; const EC_DATA_ABORT: u8 = 0b100100; -const INSTRUCTION_SIZE: usize = 4; - -static mut MEMORY_POOL: ([MaybeUninit; ALLOC_SIZE / PAGE_SIZE], usize) = - (MaybeUninit::uninit_array(), 0); +static mut MEMORY_ALLOCATOR: (SpinLockFlag, MaybeUninit) = + (SpinLockFlag::new(), MaybeUninit::uninit()); static mut ACPI_RSDP: Option = None; static mut BSP_MPIDR: u64 = 0; @@ -97,14 +98,17 @@ fn hypervisor_main(system_information: &mut SystemInformation) { if let Some(s_info) = &system_information.serial_port { unsafe { serial_port::init_default_serial_port(s_info.clone()) }; } + + show_kernel_info(); + unsafe { - MEMORY_POOL = system_information.memory_pool.clone(); + MEMORY_ALLOCATOR.1.assume_init_mut().init( + system_information.available_memory_info.0, + system_information.available_memory_info.1 << PAGE_SHIFT, + ); ACPI_RSDP = system_information.acpi_rsdp_address; } - println!("Hello,world from Hypervisor Kernel!!"); - show_features_status(); - if let Some(ecam_info) = &system_information.ecam_info { pci::init_pci(ecam_info.address, ecam_info.start_bus, ecam_info.end_bus); } @@ -131,14 +135,14 @@ fn hypervisor_main(system_information: &mut SystemInformation) { fast_restore::create_memory_trap_for_save_memory(); } - unsafe { - BSP_MPIDR = get_mpidr_el1(); - asm!("adr {:x}, vector_table_el2", out(reg) system_information.vbar_el2 ); + unsafe { BSP_MPIDR = get_mpidr_el1() }; + extern "C" { + fn vector_table_el2(); } - return; + system_information.vbar_el2 = vector_table_el2 as *const fn() as usize as u64; } -fn show_features_status() { +fn show_kernel_info() { macro_rules! print_is_feature_enabled { ($feature:expr) => { println!( @@ -153,6 +157,23 @@ fn show_features_status() { }; } + if let Some(hash_info) = HYPERVISOR_HASH_INFO { + println!( + "{} Kernel Version {}({hash_info})", + HYPERVISOR_NAME, + env!("CARGO_PKG_VERSION") + ); + } else { + println!( + "{} Kernel Version {}", + HYPERVISOR_NAME, + env!("CARGO_PKG_VERSION") + ); + } + if let Some(compiler_info) = COMPILER_INFO { + println!("Compiler Information: {compiler_info}"); + } + print_is_feature_enabled!("smmu"); print_is_feature_enabled!("i210"); print_is_feature_enabled!("mt27800"); @@ -160,33 +181,63 @@ fn show_features_status() { print_is_feature_enabled!("acpi_table_protection"); print_is_feature_enabled!("contiguous_bit"); print_is_feature_enabled!("a64fx"); + print_is_feature_enabled!("advanced_memory_manager"); } -pub fn allocate_memory(pages: usize) -> Result { - if unsafe { MEMORY_POOL.1 < pages } { - return Err(()); +/// Allocate memory from memory pool +/// +/// # Arguments +/// * `pages` - The number of pages to allocate, the allocation size is `pages` << [`PAGE_SHIFT`] +/// * `align` - The alignment of the returned address, if `None`, [`PAGE_SHIFT`] will be used +/// +/// # Result +/// If the allocation is succeeded, Ok(start_address), otherwise Err(()) +pub fn allocate_memory(pages: usize, align: Option) -> Result { + unsafe { + MEMORY_ALLOCATOR.0.lock(); + let result = MEMORY_ALLOCATOR + .1 + .assume_init_mut() + .allocate(pages << PAGE_SHIFT, align.unwrap_or(PAGE_SHIFT)); + MEMORY_ALLOCATOR.0.unlock(); + return result; + } +} + +/// Free memory to memory pool +/// +/// # Arguments +/// * address: The start address to return to memory pool, it must be allocated by [`allocate_memory`] +/// * pages: The number of allocated pages +/// +/// # Result +/// If succeeded, Ok(()), otherwise Err(()) +pub fn free_memory(address: usize, pages: usize) -> Result<(), MemoryAllocationError> { + unsafe { + MEMORY_ALLOCATOR.0.lock(); + let result = MEMORY_ALLOCATOR + .1 + .assume_init_mut() + .free(address, pages << PAGE_SHIFT); + MEMORY_ALLOCATOR.0.unlock(); + return result; } - unsafe { MEMORY_POOL.1 -= pages }; - return Ok(unsafe { MEMORY_POOL.0[MEMORY_POOL.1].assume_init() }); } #[no_mangle] extern "C" fn synchronous_exception_handler(regs: &mut StoredRegisters) { - let esr_el2: u64; - let elr_el2: u64; - let far_el2: u64; - let hpfar_el2: u64; - unsafe { asm!("mrs {:x}, esr_el2", out(reg) esr_el2) }; - unsafe { asm!("mrs {:x}, elr_el2", out(reg) elr_el2) }; - unsafe { asm!("mrs {:x}, far_el2", out(reg) far_el2) }; - unsafe { asm!("mrs {:x}, hpfar_el2", out(reg) hpfar_el2) }; + let esr_el2 = get_esr_el2(); + let elr_el2 = get_elr_el2(); + let far_el2 = get_far_el2(); + let hpfar_el2 = get_hpfar_el2(); + let spsr_el2 = get_spsr_el2(); pr_debug!("Synchronous Exception!!"); pr_debug!("ESR_EL2: {:#X}", esr_el2); pr_debug!("ELR_EL2: {:#X}", elr_el2); pr_debug!("FAR_EL2: {:#X}", far_el2); pr_debug!("HPFAR_EL2: {:#X}", hpfar_el2); - pr_debug!("MPIDR_EL1: {:#X}", mpidr_el1); + pr_debug!("MPIDR_EL1: {:#X}", get_mpidr_el1()); pr_debug!("Registers: {:#X?}", regs); let ec = ((esr_el2 >> 26) & bitmask!(5, 0)) as u8; @@ -216,11 +267,7 @@ extern "C" fn synchronous_exception_handler(regs: &mut StoredRegisters) { }, EC_SMC_AA64 => { /* Adjust return address */ - unsafe { - asm!("mrs {t}, ELR_EL2 - add {t}, {t}, {size} - msr ELR_EL2, {t}", t = out(reg) _, size = const INSTRUCTION_SIZE ) - }; + advance_elr_el2(); let smc_number = esr_el2 & bitmask!(15, 0); pr_debug!("SecureMonitor Call: {:#X}", smc_number); pr_debug!("Registers: {:#X?}", regs); @@ -257,7 +304,7 @@ extern "C" fn synchronous_exception_handler(regs: &mut StoredRegisters) { EC_DATA_ABORT => { pr_debug!("Data Abort"); if let Err(e) = - emulation::data_abort_handler(regs, esr_el2, elr_el2, far_el2, hpfar_el2) + emulation::data_abort_handler(regs, esr_el2, elr_el2, far_el2, hpfar_el2, spsr_el2) { handler_panic!(regs, "Failed to emulate the instruction: {:?}", e); } @@ -270,30 +317,20 @@ extern "C" fn synchronous_exception_handler(regs: &mut StoredRegisters) { } #[no_mangle] -extern "C" fn s_error_exception_handler() { - println!("S Error Exception!!"); - loop { - unsafe { - asm!("wfi"); - } - } +extern "C" fn s_error_exception_handler(regs: &mut StoredRegisters) { + handler_panic!(regs, "S Error Exception!!"); } #[track_caller] fn interrupt_handler_panic(s_r: &StoredRegisters, f: core::fmt::Arguments) -> ! { - let esr_el2: u64; - let elr_el2: u64; - let far_el2: u64; - let spsr_el2: u64; - let hpfar_el2: u64; - let mpidr_el1: u64; - unsafe { asm!("mrs {:x}, esr_el2", out(reg) esr_el2) }; - unsafe { asm!("mrs {:x}, elr_el2", out(reg) elr_el2) }; - unsafe { asm!("mrs {:x}, far_el2", out(reg) far_el2) }; - unsafe { asm!("mrs {:x}, spsr_el2", out(reg) spsr_el2) }; - unsafe { asm!("mrs {:x}, hpfar_el2", out(reg) hpfar_el2) }; - unsafe { asm!("mrs {:x}, mpidr_el1", out(reg) mpidr_el1) }; - if let Some(s) = unsafe { crate::serial_port::DEFAULT_SERIAL_PORT.as_ref() } { + let esr_el2 = get_esr_el2(); + let elr_el2 = get_elr_el2(); + let far_el2 = get_far_el2(); + let spsr_el2 = get_spsr_el2(); + let hpfar_el2 = get_hpfar_el2(); + let mpidr_el1 = get_mpidr_el1(); + + if let Some(s) = unsafe { serial_port::DEFAULT_SERIAL_PORT.as_ref() } { unsafe { s.force_release_write_lock() }; } println!("ESR_EL2: {:#X}", esr_el2); diff --git a/src/hypervisor_kernel/src/memory_hook.rs b/src/hypervisor_kernel/src/memory_hook.rs index e8e1260..57c2e23 100644 --- a/src/hypervisor_kernel/src/memory_hook.rs +++ b/src/hypervisor_kernel/src/memory_hook.rs @@ -1,4 +1,5 @@ // Copyright (c) 2022 RIKEN +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) // All rights reserved. // // This software is released under the MIT License. @@ -10,13 +11,11 @@ use crate::StoredRegisters; -#[allow(dead_code)] pub enum LoadHookResult { PassThrough, Data(u64), } -#[allow(dead_code)] pub enum StoreHookResult { PassThrough, AlternativeData(u64), diff --git a/src/hypervisor_kernel/src/multi_core.rs b/src/hypervisor_kernel/src/multi_core.rs index afd809b..9e538d1 100644 --- a/src/hypervisor_kernel/src/multi_core.rs +++ b/src/hypervisor_kernel/src/multi_core.rs @@ -9,15 +9,15 @@ //! MultiCore Handling Functions //! -use crate::{allocate_memory, handler_panic, StoredRegisters}; - use crate::psci::{call_psci_function, PsciFunctionId, PsciReturnCode}; +use crate::{allocate_memory, free_memory, handler_panic, StoredRegisters}; + +use common::{cpu, PAGE_SHIFT, STACK_PAGES}; -use common::cpu::{convert_virtual_address_to_physical_address_el2_read, MAX_ZCR_EL2_LEN}; -use common::STACK_PAGES; +use core::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; -use core::arch::asm; -use core::sync::atomic::{AtomicU64, Ordering}; +pub static NUMBER_OF_RUNNING_AP: AtomicU64 = AtomicU64::new(0); +pub static STACK_TO_FREE_LATER: AtomicUsize = AtomicUsize::new(0); #[repr(C, align(16))] #[derive(Debug)] @@ -56,30 +56,19 @@ static mut REGISTER_BUFFER: HypervisorRegisters = HypervisorRegisters { }; pub fn setup_new_cpu(regs: &mut StoredRegisters) { - let stack_address: u64 = - allocate_memory(STACK_PAGES).expect("Failed to alloc stack for new CPU.") as u64; - let cnthctl_el2: u64; - let cptr_el2: u64; - let hcr_el2: u64; - let vttbr_el2: u64; - let ttbr0_el2: u64; - let mair_el2: u64; - let tcr_el2: u64; - let vtcr_el2: u64; - let sctlr_el2: u64; - let vbar_el2: u64; - unsafe { - asm!("mrs {:x}, cnthctl_el2", out(reg) cnthctl_el2); - asm!("mrs {:x}, cptr_el2", out(reg) cptr_el2); - asm!("mrs {:x}, hcr_el2", out(reg) hcr_el2); - asm!("mrs {:x}, ttbr0_el2", out(reg) ttbr0_el2); - asm!("mrs {:x}, vttbr_el2", out(reg) vttbr_el2); - asm!("mrs {:x}, mair_el2", out(reg)mair_el2); - asm!("mrs {:x}, tcr_el2", out(reg) tcr_el2); - asm!("mrs {:x}, vtcr_el2", out(reg) vtcr_el2); - asm!("mrs {:x}, sctlr_el2", out(reg) sctlr_el2); - asm!("mrs {:x}, vbar_el2", out(reg) vbar_el2); - } + let stack_address = (allocate_memory(STACK_PAGES, Some(STACK_PAGES)) + .expect("Failed to allocate stack") + + (STACK_PAGES << PAGE_SHIFT)) as u64; + let cnthctl_el2 = cpu::get_cnthctl_el2(); + let cptr_el2 = cpu::get_cptr_el2(); + let hcr_el2 = cpu::get_hcr_el2(); + let vttbr_el2 = cpu::get_vttbr_el2(); + let ttbr0_el2 = cpu::get_ttbr0_el2(); + let mair_el2 = cpu::get_mair_el2(); + let tcr_el2 = cpu::get_tcr_el2(); + let vtcr_el2 = cpu::get_vtcr_el2(); + let sctlr_el2 = cpu::get_sctlr_el2(); + let vbar_el2 = cpu::get_vbar_el2(); /* Aquire REGISTER_BUFFER's lock */ loop { @@ -116,15 +105,16 @@ pub fn setup_new_cpu(regs: &mut StoredRegisters) { REGISTER_BUFFER.el1_context_id = regs.x3; } - let hypervisor_registers_real_address = convert_virtual_address_to_physical_address_el2_read( - unsafe { ®ISTER_BUFFER } as *const _ as usize, - ) - .expect("Failed to convert virtual address to real address"); + let hypervisor_registers_real_address = + cpu::convert_virtual_address_to_physical_address_el2_read( + unsafe { ®ISTER_BUFFER } as *const _ as usize + ) + .expect("Failed to convert virtual address to real address"); let cpu_boot_address_real_address = - convert_virtual_address_to_physical_address_el2_read(cpu_boot as *const fn() as usize) + cpu::convert_virtual_address_to_physical_address_el2_read(cpu_boot as *const fn() as usize) .expect("Failed to convert virtual address to real address"); - pr_debug!("{:#X?}", hypervisor_registers); + pr_debug!("{:#X?}", unsafe { ®ISTER_BUFFER }); regs.x0 = call_psci_function( PsciFunctionId::CpuOn, @@ -135,19 +125,58 @@ pub fn setup_new_cpu(regs: &mut StoredRegisters) { if regs.x0 as i32 != PsciReturnCode::Success as i32 { handler_panic!( regs, - "Failed to on the cpu (MPIDR: {:#X}): {:?}", + "Failed to power on the cpu (MPIDR: {:#X}): {:?}", regs.x1, PsciReturnCode::try_from(regs.x0 as i32) ); } + NUMBER_OF_RUNNING_AP.fetch_add(1, Ordering::SeqCst); pr_debug!("The initialization completed."); } +/// # ATTENTION +/// do not power off BSP(BSP's stack may not be aligned with [`STACK_PAGES`]) +pub fn power_off_cpu() -> i32 { + let stack_address = (cpu::get_sp() as usize) & !((STACK_PAGES << PAGE_SHIFT) - 1); + + /* + STACK_TO_FREE_LATER_FLAG.lock(); + let stack_address_to_free = STACK_TO_FREE_LATER.load(Ordering::Relaxed); + if stack_address_to_free != 0 { + if let Err(err) = free_memory(stack_address_to_free, STACK_PAGES) { + println!("Failed to free stack: {:?}", err); + } + } + STACK_TO_FREE_LATER.store(stack_address, Ordering::Relaxed); + STACK_TO_FREE_LATER_FLAG.unlock(); + */ + + loop { + let current = STACK_TO_FREE_LATER.load(Ordering::Acquire); + if let Ok(stack_address_to_free) = STACK_TO_FREE_LATER.compare_exchange( + current, + stack_address, + Ordering::Release, + Ordering::Relaxed, + ) { + if stack_address_to_free != 0 { + if let Err(err) = free_memory(stack_address_to_free, STACK_PAGES) { + println!("Failed to free stack: {:?}", err); + } + } + break; + } + } + + NUMBER_OF_RUNNING_AP.fetch_sub(1, Ordering::SeqCst); + call_psci_function(PsciFunctionId::CpuOff, 0, 0, 0) as i32 +} + /* cpu_boot must use position-relative code */ #[naked] extern "C" fn cpu_boot() { unsafe { - asm!( + core::arch::asm!( " // MIDR_EL1 & MPIDR_EL1 mrs x15, midr_el1 msr vpidr_el2, x15 @@ -207,10 +236,13 @@ extern "C" fn cpu_boot() { msr spsr_el2, x1 msr elr_el2, x12 mov x0, x13 + dsb sy + isb str x14, [x14] isb eret - ", MAX_ZCR_EL2_LEN = const MAX_ZCR_EL2_LEN, A64FX = const cfg!(feature = "a64fx") as u64, + ", MAX_ZCR_EL2_LEN = const cpu::MAX_ZCR_EL2_LEN, + A64FX = const cfg!(feature = "a64fx") as u64, options(noreturn)) } } diff --git a/src/hypervisor_kernel/src/paging.rs b/src/hypervisor_kernel/src/paging.rs index cb90d3d..fe2a597 100644 --- a/src/hypervisor_kernel/src/paging.rs +++ b/src/hypervisor_kernel/src/paging.rs @@ -9,13 +9,13 @@ //! Paging //! -use super::allocate_memory; +use crate::{allocate_memory, free_memory}; use common::cpu::*; use common::paging::*; use common::{PAGE_SHIFT, PAGE_SIZE, STAGE_2_PAGE_SHIFT, STAGE_2_PAGE_SIZE}; -fn remake_page_table_recursive( +fn _remake_stage2_page_table( table_address: usize, physical_address: &mut usize, table_level: i8, @@ -46,7 +46,7 @@ fn remake_page_table_recursive( for e in page_table { let next_table_address = allocate_page_table_for_stage_2(table_level + 1, vtcr_el2_t0sz, false, 1)?; - remake_page_table_recursive( + _remake_stage2_page_table( next_table_address, physical_address, table_level + 1, @@ -59,7 +59,7 @@ fn remake_page_table_recursive( return Ok(()); } -pub fn remake_page_table() -> Result { +pub fn remake_stage2_page_table() -> Result { let vtcr_el2 = get_vtcr_el2(); let vtcr_el2_sl0 = ((vtcr_el2 & VTCR_EL2_SL0) >> VTCR_EL2_SL0_BITS_OFFSET) as u8; let vtcr_el2_sl2 = ((vtcr_el2 & VTCR_EL2_SL2) >> VTCR_EL2_SL2_BIT_OFFSET) as u8; @@ -81,7 +81,7 @@ pub fn remake_page_table() -> Result { number_of_tables as u8, )?; - remake_page_table_recursive( + _remake_stage2_page_table( table_address, &mut physical_address, initial_look_up_level, @@ -97,8 +97,8 @@ pub fn remake_page_table() -> Result { /// This will map memory area upto `num_of_remaining_pages`. /// This will call itself recursively, and map address until `num_of_remaining_pages` == 0 or reached the end of table. /// When all page is mapped successfully, `num_of_remaining_pages` has been 0. -/// # Arguments /// +/// # Arguments /// * `physical_address` - The address to map /// * `virtual_address` - The address to associate with `physical_address` /// * `num_of_remaining_pages` - The number of page entries to be mapped, this value will be changed @@ -165,15 +165,19 @@ fn map_address_recursive( *physical_address, table_level ); - if is_descriptor_table_or_level_3_descriptor(*target_descriptor) { - pr_debug!( - "PageTable:({:#X}) will be deleted.", - extract_output_address(*target_descriptor, PAGE_SHIFT) - ); - /* TODO: free page table */ + + let old_descriptor = core::mem::replace( + target_descriptor, + *physical_address as u64 + | create_attributes_for_stage_1(permission, memory_attribute, true), + ); + + if is_descriptor_table_or_level_3_descriptor(old_descriptor) { + let old_table = extract_output_address(old_descriptor, PAGE_SHIFT); + pr_debug!("PageTable:({:#X}) will be deleted.", old_table); + let _ = free_memory(old_table, 1); } - let attributes = create_attributes_for_stage_1(permission, memory_attribute, true); - *target_descriptor = *physical_address as u64 | attributes; + *physical_address += 1 << shift_level; *virtual_address += 1 << shift_level; *num_of_remaining_pages -= 512usize.pow((3 - table_level) as u32); @@ -244,6 +248,22 @@ fn map_address_recursive( return Ok(()); } +/// Map address +/// +/// This will map virtual address into physical address +/// The virtual address is for EL2. +/// +/// # Arguments +/// * `physical_address` - The address to map +/// * `virtual_address` - The address to associate with `physical_address` +/// * `size` - The map size +/// * `readable` - If true, the memory area will be readable +/// * `writable` - If true, the memory area will be writable +/// * `executable` - If true, the memory area will be executable +/// * `is_device` - If true, the cache control of the memory area will become for device memory +/// +/// # Result +/// If mapping is succeeded, returns Ok(()), otherwise returns Err(()) pub fn map_address( mut physical_address: usize, mut virtual_address: usize, @@ -399,17 +419,18 @@ fn map_address_recursive_stage2( *physical_address, table_level ); - if is_descriptor_table_or_level_3_descriptor(*target_descriptor) { - pr_debug!( - "PageTable:({:#X}) will be deleted.", - extract_output_address(*target_descriptor, STAGE_2_PAGE_SHIFT) - ); - /* TODO: free page table */ - } - let attributes = - create_attributes_for_stage_2(permission, is_dummy_page, is_unmap, true); - *target_descriptor = *physical_address as u64 | attributes; + let old_descriptor = core::mem::replace( + target_descriptor, + *physical_address as u64 + | create_attributes_for_stage_2(permission, is_dummy_page, is_unmap, true), + ); + + if is_descriptor_table_or_level_3_descriptor(old_descriptor) { + let old_table = extract_output_address(old_descriptor, STAGE_2_PAGE_SHIFT); + pr_debug!("PageTable:({:#X}) will be deleted.", old_table); + let _ = free_memory(old_table, 1); + } *physical_address += 1 << shift_level; /*for i in 0..(1 << (shift_level - STAGE_2_PAGE_SHIFT)) { @@ -487,52 +508,26 @@ fn map_address_recursive_stage2( return Ok(()); } -#[allow(dead_code)] -pub fn map_dummy_page_into_vttbr_el2( - mut virtual_address: usize, - size: usize, - mut dummy_page: usize, /*4 KiB Page Physical Address*/ -) -> Result<(), ()> { - if (size & ((1usize << STAGE_2_PAGE_SHIFT) - 1)) != 0 { - println!("Size({:#X}) is not aligned.", size); - return Err(()); - } - let mut num_of_needed_pages = size >> STAGE_2_PAGE_SHIFT; - let vtcr_el2 = get_vtcr_el2(); - let vtcr_el2_sl0 = ((vtcr_el2 & VTCR_EL2_SL0) >> VTCR_EL2_SL0_BITS_OFFSET) as u8; - let vtcr_el2_sl2 = ((vtcr_el2 & VTCR_EL2_SL2) >> VTCR_EL2_SL2_BIT_OFFSET) as u8; - let vtcr_el2_t0sz = ((vtcr_el2 & VTCR_EL2_T0SZ) >> VTCR_EL2_T0SZ_BITS_OFFSET) as u8; - let initial_look_up_level: i8 = match (vtcr_el2_sl0, vtcr_el2_sl2) { - (0b01u8, 0b0u8) => 1, - (0b10u8, 0b0u8) => 0, - (0b00u8, 0b1u8) => -1, - _ => unreachable!(), - }; - - let original_dummy_page = dummy_page; - map_address_recursive_stage2( - &mut dummy_page, - &mut virtual_address, - &mut num_of_needed_pages, - TTBR::new(get_vttbr_el2()).get_base_address(), - initial_look_up_level, - false, - (1 << MEMORY_PERMISSION_READABLE_BIT) - | (1 << MEMORY_PERMISSION_WRITABLE_BIT) - | (1 << MEMORY_PERMISSION_EXECUTABLE_BIT), - calculate_number_of_concatenated_page_tables(vtcr_el2_t0sz, initial_look_up_level), - vtcr_el2_t0sz, - true, - )?; - - assert_eq!(num_of_needed_pages, 0); - assert_eq!(original_dummy_page, dummy_page); - flush_tlb_el1(); - return Ok(()); -} - -/// VTTBR_EL2の該当アドレス範囲をトラップできるようにする。 -/// 現在はInitial Table Levelは0までのみの対応 +/// Set up to trap memory access from EL1/EL0 +/// +/// This will modify the stage2 page table to trap the access of (`address` ~ (`address` + `size`)) +/// from EL1/EL0. +/// +/// This function should be called after calling [`crate::memory_hook::add_memory_load_hook_handler`] +/// and/or [`crate::memory_hook::add_memory_store_hook_handler`]. +/// +/// # Arguments +/// * `address` - The physical address to trap +/// * `size` - The trap size +/// * `allow_read_access` - If true, read access from EL1/EL0 will not be trapped +/// * `allow_write_access` - If true, write access from EL1/EL0 will not be trapped +/// +/// # Attention +/// If call this function with (`allow_read_access` == true) && (`allow_write_access` == true), +/// it returns Err(()). If you want to remove the trap, call [`remove_memory_access_trap`] +/// +/// # Result +/// If the setting is succeeded, returns Ok(()), otherwise returns Err(()) pub fn add_memory_access_trap( mut address: usize, size: usize, @@ -582,8 +577,20 @@ pub fn add_memory_access_trap( return Ok(()); } -/// VTTBR_EL2の該当アドレス範囲をトラップできないようにする。 -/// 現在はInitial Table Levelは0までのみの対応 +/// Remove the trap of memory access from EL1/EL0 +/// +/// This will modify the stage2 page table to remove the access trap of (`address` ~ (`address` + `size`)) +/// from EL1/EL0. +/// +/// This function should be called before calling [`crate::memory_hook::remove_memory_load_hook_handler`] +/// and/or [`crate::memory_hook::remove_memory_store_hook_handler`]. +/// +/// # Arguments +/// * `address` - The physical address to remove trapping +/// * `size` - The trap size +/// +/// # Result +/// If the setting is succeeded, returns Ok(()), otherwise returns Err(()) pub fn remove_memory_access_trap(mut address: usize, size: usize) -> Result<(), ()> { if (size & ((1usize << STAGE_2_PAGE_SHIFT) - 1)) != 0 { println!("Size({:#X}) is not aligned.", size); @@ -631,33 +638,17 @@ fn allocate_page_table_for_stage_1( t0sz: u8, is_for_ttbr: bool, ) -> Result { - let table_address_alignment = if is_for_ttbr { - ((64 - ((PAGE_SHIFT - 3) as u8 * (4 - look_up_level) as u8) - t0sz).max(4)).min(12) + let alignment = if is_for_ttbr { + ((64 - ((PAGE_SHIFT - 3) * (4 - look_up_level) as usize) - t0sz as usize).max(4)).min(12) } else { - PAGE_SHIFT as u8 + PAGE_SHIFT }; - loop { - match allocate_memory(1) { - Ok(address) => { - if (address & ((1 << table_address_alignment) - 1)) != 0 { - pr_debug!( - "The table address is not alignment with {}, {:#X} will be wasted.", - table_address_alignment, - address - ); - /* TODO: アライメントを指定してメモリを確保できるようにし、無駄をなくす。 */ - } else { - return Ok(address); - } - } - Err(e) => { - println!( - "Failed to allocate memory for the stage 1 page table: {:?}", - e - ); - return Err(()); - } - }; + match allocate_memory(1, Some(alignment)) { + Ok(address) => Ok(address), + Err(err) => { + println!("Failed to allocate the page table: {:?}", err); + Err(()) + } } } @@ -670,37 +661,312 @@ fn allocate_page_table_for_stage_2( number_of_tables: u8, ) -> Result { assert_ne!(number_of_tables, 0); - let table_address_alignment = if is_for_ttbr { - ((64 - ((PAGE_SHIFT - 3) as u8 * (4 - look_up_level) as u8) - t0sz).max(4)).min(12) - + (number_of_tables - 1) + let alignment = if is_for_ttbr { + ((64 - ((PAGE_SHIFT - 3) as usize * (4 - look_up_level) as usize) - t0sz as usize).max(4)) + .min(12) + + (number_of_tables as usize - 1) } else { assert_eq!(number_of_tables, 1); - STAGE_2_PAGE_SHIFT as u8 + STAGE_2_PAGE_SHIFT }; - loop { - match allocate_memory(number_of_tables as usize) { - Ok(address) => { - if (address & ((1 << table_address_alignment) - 1)) != 0 { - pr_debug!( - "The table address is not alignment with {}, {:#X} will be wasted.", - table_address_alignment, - address + match allocate_memory(number_of_tables as usize, Some(alignment)) { + Ok(address) => Ok(address), + Err(err) => { + println!("Failed to allocate the page table: {:?}", err); + Err(()) + } + } +} + +#[allow(dead_code)] +pub fn dump_page_table_el2( + start_address: Option, + end_address: Option, + should_dump_table_only: bool, +) { + let tcr_el2 = get_tcr_el2(); + let tcr_el2_ds = + ((tcr_el2 & TCR_EL2_DS_WITHOUT_E2H) >> TCR_EL2_DS_BIT_OFFSET_WITHOUT_E2H) as u8; + let tcr_el2_tg0 = + ((tcr_el2 & TCR_EL2_TG0_WITHOUT_E2H) >> TCR_EL2_TG0_BITS_OFFSET_WITHOUT_E2H) as u8; + let tcr_el2_t0sz = + ((tcr_el2 & TCR_EL2_T0SZ_WITHOUT_E2H) >> TCR_EL2_T0SZ_BITS_OFFSET_WITHOUT_E2H) as u8; + let tcr_el2_ps = + ((tcr_el2 & TCR_EL2_PS_WITHOUT_E2H) >> TCR_EL2_PS_BITS_OFFSET_WITHOUT_E2H) as u8; + let page_shift = 12 + (tcr_el2_tg0 << 1); + let output_address_size = match tcr_el2_ps { + 0b000 => 32, + 0b001 => 36, + 0b010 => 40, + 0b011 => 42, + 0b100 => 44, + 0b101 => 48, + 0b110 => 52, + _ => unreachable!(), + }; + let paging_level = 4 - get_initial_page_table_level_and_bits_to_shift(tcr_el2).0; + let mut base_address: usize = 1 << page_shift; + let mut current_level = 3i8; + let page_table_address = TTBR::new(get_ttbr0_el2()).get_base_address(); + + if page_shift == 16 { + println!("64KiB Paging is not supported."); + return; + } + + if !should_dump_table_only { + println!( + "TCR_EL2: {:#b}\n DS: {}, TG0: {:#b}({} KiB), T0SZ: {}, PS: {:#b}({} bits)", + tcr_el2, + tcr_el2_ds, + tcr_el2_tg0, + (1 << page_shift) >> 10, + tcr_el2_t0sz, + tcr_el2_ps, + output_address_size, + ); + println!("PageTable: {:#X}", page_table_address); + println!( + "MAIR: {:#X}(Using MemAttr: {})", + get_mair_el2(), + get_suitable_memory_attribute_index_from_mair_el2(false) + ); + println!("Lookup: {} Level", paging_level); + println!( + " {} KiB: Level {} Descriptor", + base_address >> 10, + current_level + ); + } + for _ in 0..(paging_level - 1) { + base_address <<= 9; /*512Entry*/ + current_level -= 1; + if !should_dump_table_only { + if (base_address >> 10) < 1024 { + print!(" {} KiB", base_address >> 10) + } else if (base_address >> 20) < 1024 { + print!(" {} MiB", base_address >> 20); + } else { + print!(" {} GiB", base_address >> 30); + } + println!(": Level {} Descriptor", current_level); + } + } + + dump_page_table_recursive( + page_table_address, + start_address.unwrap_or(0), + end_address.unwrap_or(usize::MAX), + &mut 0, + paging_level as u8, + 0, + base_address, + 512, + ); +} + +#[allow(dead_code)] +pub fn dump_page_table_stage2( + start_address: Option, + end_address: Option, + should_dump_table_only: bool, +) { + let vtcr_el2 = get_vtcr_el2(); + let vtcr_el2_ps = (vtcr_el2 & VTCR_EL2_PS) >> VTCR_EL2_PS_BITS_OFFSET; + let vtcr_el2_tg0 = (vtcr_el2 & VTCR_EL2_TG0) >> VTCR_EL2_TG0_BITS_OFFSET; + let vtcr_el2_sl0 = (vtcr_el2 & VTCR_EL2_SL0) >> VTCR_EL2_SL0_BITS_OFFSET; + let vtcr_el2_t0sz = (vtcr_el2 & VTCR_EL2_T0SZ) >> VTCR_EL2_T0SZ_BITS_OFFSET; + let page_shift = match vtcr_el2_tg0 { + 0b00 => 12, + 0b01 => 16, + 0b10 => 14, + _ => unimplemented!(), + }; + let paging_level = match vtcr_el2_sl0 { + 0b00 => 2, + 0b01 => 3, + 0b10 => 4, + 0b11 => 1, + _ => unreachable!(), + }; + let output_address_size = match vtcr_el2_ps { + 0b000 => 32, + 0b001 => 36, + 0b010 => 40, + 0b011 => 42, + 0b100 => 44, + 0b101 => 48, + 0b110 => 52, + _ => unimplemented!(), + }; + let number_of_concatenated_page_tables = + calculate_number_of_concatenated_page_tables(vtcr_el2_t0sz as u8, 3 - (paging_level - 1)); + let mut base_address: usize = 1 << page_shift; + let mut current_level = 3i8; + let page_table_address = TTBR::new(get_vttbr_el2()).get_base_address(); + + if page_shift == 16 || page_shift == 14 { + println!("16/64KiB Paging is not supported."); + return; + } + + if !should_dump_table_only { + println!( + "VTCR_EL2: {:#b}\n TG0: {:#b}({} KiB), T0SZ: {}, PS: {:#b}({} bits)", + vtcr_el2, + vtcr_el2_tg0, + (1 << page_shift) >> 10, + vtcr_el2_t0sz, + vtcr_el2_ps, + output_address_size, + ); + println!("PageTable: {:#X}", page_table_address); + println!( + "Lookup: {} Level(Number of concatenated pages: {})", + paging_level, number_of_concatenated_page_tables + ); + println!( + " {} KiB: Level {} Descriptor", + base_address >> 10, + current_level + ); + } + for _ in 0..(paging_level - 1) { + base_address <<= 9; /*512Entry*/ + current_level -= 1; + if !should_dump_table_only { + if (base_address >> 10) < 1024 { + print!(" {} KiB", base_address >> 10) + } else if (base_address >> 20) < 1024 { + print!(" {} MiB", base_address >> 20); + } else { + print!(" {} GiB", base_address >> 30); + } + println!(": Level {} Descriptor", current_level); + } + } + + if !should_dump_table_only {} + + dump_page_table_recursive( + page_table_address, + start_address.unwrap_or(0), + end_address.unwrap_or(usize::MAX), + &mut 0, + paging_level as u8, + 0, + base_address, + (number_of_concatenated_page_tables as usize) * 512, + ); +} + +fn dump_page_table_recursive( + table_address: usize, + start_virtual_address: usize, + end_virtual_address: usize, + virtual_base_address: &mut usize, + level: u8, + space_count: u8, + granule: usize, + number_of_entries: usize, +) { + let print_indent = |c: u8| { + for _ in 0..c { + print!(" "); + } + }; + let mut processing_descriptor_address = table_address; + + let should_print = |v_a: usize| -> bool { + (start_virtual_address..=end_virtual_address).contains(&v_a) + || (v_a..(v_a + granule)).contains(&start_virtual_address) + || (v_a..(v_a + granule)).contains(&end_virtual_address) + }; + + if level == 1 { + for _ in 0..number_of_entries { + let level3_descriptor = unsafe { *(processing_descriptor_address as *const u64) }; + let should_print = should_print(*virtual_base_address); + if should_print { + print_indent(space_count); + if (level3_descriptor & 0b1) == 0 { + println!( + "{:#X} ~ {:#X}: Invalid", + virtual_base_address, + *virtual_base_address + granule + ); + } else if (level3_descriptor & 0b10) == 0 { + println!( + "{:#X} ~ {:#X}: Reserved", + virtual_base_address, + *virtual_base_address + granule ); - /* TODO: アライメントを指定してメモリを確保できるようにし、無駄をなくす。 */ - if number_of_tables != 1 { - let _ = allocate_memory(1); - } } else { - return Ok(address); + println!( + "{:#X} ~ {:#X}: {:#b}(OA: {:#X}, MemAttr: {})", + virtual_base_address, + *virtual_base_address + granule, + level3_descriptor, + extract_output_address(level3_descriptor, PAGE_SHIFT), + (level3_descriptor >> 2) & 0b111 + ); } } - Err(e) => { - println!( - "Failed to allocate memory for the stage 2 page table: {:?}", - e + *virtual_base_address += granule; + processing_descriptor_address += core::mem::size_of::(); + } + } else { + for _ in 0..number_of_entries { + let descriptor = unsafe { *(processing_descriptor_address as *const u64) }; + let should_print = should_print(*virtual_base_address); + if should_print { + print_indent(space_count); + } + if (descriptor & 0b1) == 0 { + if should_print { + println!( + "{:#X} ~ {:#X}: Invalid", + virtual_base_address, + *virtual_base_address + granule + ); + } + *virtual_base_address += granule; + } else if (descriptor & 0b10) == 0 { + // Block Descriptor + if should_print { + println!( + "{:#X} ~ {:#X}: Block: {:#b} (OA: {:#X}, MemAttr: {})", + virtual_base_address, + *virtual_base_address + granule, + descriptor, + extract_output_address(descriptor, PAGE_SHIFT), + (descriptor >> 2) & 0b111 + ); + } + *virtual_base_address += granule; + } else { + let next_level_table = extract_output_address(descriptor, PAGE_SHIFT); + if should_print { + println!( + "{:#X} ~ {:#X}: Table: {:#b} (OA: {:#X})", + virtual_base_address, + *virtual_base_address + granule, + descriptor, + extract_output_address(descriptor, PAGE_SHIFT) + ); + } + dump_page_table_recursive( + next_level_table, + start_virtual_address, + end_virtual_address, + virtual_base_address, + level - 1, + space_count + 2, + granule >> 9, + 512, ); - return Err(()); } - }; + processing_descriptor_address += core::mem::size_of::(); + } } } diff --git a/src/hypervisor_kernel/src/pci.rs b/src/hypervisor_kernel/src/pci.rs index f0bc32d..463c55c 100644 --- a/src/hypervisor_kernel/src/pci.rs +++ b/src/hypervisor_kernel/src/pci.rs @@ -24,7 +24,7 @@ pub fn init_pci(ecam_address: usize, start_bus_number: u8, end_bus_number: u8) { "{:X}:{:X} VenderId: {:#X}, DeviceId: {:#X}", bus, device, vendor_id, device_id ); - /* TODO: 動的にハンドラ呼び出し */ + /* TODO: call handlers dynamically */ if vendor_id == drivers::i210::VENDOR_ID && device_id == drivers::i210::DEVICE_ID { #[cfg(feature = "i210")] drivers::i210::setup_device(ecam_address, bus, device, 0); @@ -76,11 +76,11 @@ pub fn get_configuration_space_data( let data = unsafe { *((address + aligned_offset) as *const u32) }; let byte_offset = (offset & 0b11) as u8; assert!(byte_offset + size <= 4); - return if size == 4 { + if size == 4 { data } else { (data >> (byte_offset << 3)) & ((1 << (size << 3)) - 1) - }; + } } pub fn get_ecam_target_address(base_address: usize, bus: u8, device: u8, function: u8) -> usize { diff --git a/src/hypervisor_kernel/src/psci.rs b/src/hypervisor_kernel/src/psci.rs index 9fcca0d..ac38554 100644 --- a/src/hypervisor_kernel/src/psci.rs +++ b/src/hypervisor_kernel/src/psci.rs @@ -11,10 +11,10 @@ //! Supported Version: ~2.0 use crate::fast_restore::enter_restore_process; -use crate::multi_core::setup_new_cpu; -use crate::StoredRegisters; +use crate::multi_core::{power_off_cpu, setup_new_cpu}; +use crate::{handler_panic, StoredRegisters}; -use common::cpu::secure_monitor_call; +use common::cpu::{get_mpidr_el1, secure_monitor_call}; /// PSCI Function ID List /// @@ -121,6 +121,14 @@ pub fn handle_psci_call(function_id: PsciFunctionId, stored_registers: &mut Stor if function_id == PsciFunctionId::CpuOn { pr_debug!("CPU ON: MPIDR: {:#X}", stored_registers.x1); setup_new_cpu(stored_registers); + } else if function_id == PsciFunctionId::CpuOff { + let result = power_off_cpu(); + handler_panic!( + stored_registers, + "Failed to power off the cpu (MPIDR: {:#X}): {:?}", + get_mpidr_el1(), + PsciReturnCode::try_from(result) + ); } else { #[cfg(feature = "fast_restore")] if function_id == PsciFunctionId::SystemOff diff --git a/src/hypervisor_kernel/src/serial_port.rs b/src/hypervisor_kernel/src/serial_port.rs index 8e010fa..bc8e2ab 100644 --- a/src/hypervisor_kernel/src/serial_port.rs +++ b/src/hypervisor_kernel/src/serial_port.rs @@ -14,10 +14,9 @@ use arm_sbsa_generic_uart::SerialSbsaUart; use meson_gx_uart::SerialMesonGxUart; use common::serial_port::{SerialPortInfo, SerialPortType}; +use common::spin_flag::SpinLockFlag; use core::fmt; -use core::fmt::Write; -use core::sync::atomic::{AtomicBool, Ordering}; trait SerialPortDevice { fn new(address: usize) -> Self; @@ -55,7 +54,7 @@ impl SerialPortDevice for Device { pub struct SerialPort { device: Device, - write_lock: AtomicBool, + write_lock: SpinLockFlag, } impl SerialPort { @@ -72,36 +71,18 @@ impl SerialPort { Device::MesonGxUart(SerialMesonGxUart::new(info.virtual_address)) } }, - write_lock: AtomicBool::new(false), + write_lock: SpinLockFlag::new(), } } - fn acquire_write_lock(&self) { - loop { - if self - .write_lock - .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) - .is_ok() - { - return; - } - while self.write_lock.load(Ordering::Relaxed) { - core::hint::spin_loop(); - } - } - } - - fn release_write_lock(&self) { - self.write_lock.store(false, Ordering::Release) - } - - fn wait_fifo(&mut self) -> core::fmt::Result { + fn wait_fifo(&mut self) -> fmt::Result { + assert!(self.write_lock.is_locked()); let mut timeout = 0xFFFFusize; while self.device.is_write_fifo_full() { timeout -= 1; if timeout == 0 { - self.release_write_lock(); - return Err(core::fmt::Error); + self.write_lock.unlock(); + return Err(fmt::Error); } core::hint::spin_loop(); } @@ -110,7 +91,7 @@ impl SerialPort { /// For panic_handler pub unsafe fn force_release_write_lock(&self) { - self.release_write_lock(); + self.write_lock.unlock(); } } @@ -118,25 +99,25 @@ pub unsafe fn init_default_serial_port(info: SerialPortInfo) { DEFAULT_SERIAL_PORT = Some(SerialPort::new(info)); } -impl Write for SerialPort { - fn write_str(&mut self, s: &str) -> core::fmt::Result { - self.acquire_write_lock(); +impl fmt::Write for SerialPort { + fn write_str(&mut self, s: &str) -> fmt::Result { + self.write_lock.lock(); for c in s.as_bytes() { self.wait_fifo()?; if *c == b'\n' { let result = self.device.write_char(b'\r'); if result.is_err() { - self.release_write_lock(); + self.write_lock.unlock(); return Err(fmt::Error); } self.wait_fifo()?; } - if let Err(_) = self.device.write_char(*c) { - self.release_write_lock(); + if self.device.write_char(*c).is_err() { + self.write_lock.unlock(); return Err(fmt::Error); } } - self.release_write_lock(); + self.write_lock.unlock(); return Ok(()); } } @@ -145,6 +126,7 @@ pub(super) static mut DEFAULT_SERIAL_PORT: Option = None; pub fn print(args: fmt::Arguments) { if let Some(s) = unsafe { &mut DEFAULT_SERIAL_PORT } { + use fmt::Write; let _ = s.write_fmt(args); } } @@ -158,8 +140,8 @@ macro_rules! print { #[macro_export] macro_rules! println { - ($fmt:expr) => ($crate::serial_port::print(format_args_nl!($fmt))); - ($fmt:expr, $($arg:tt)*) => ($crate::serial_port::print(format_args_nl!($fmt, $($arg)*))) + ($fmt:expr) => ($crate::serial_port::print(format_args!("{}\n", format_args!($fmt)))); + ($fmt:expr, $($arg:tt)*) => ($crate::serial_port::print(format_args!("{}\n", format_args!($fmt, $($arg)*)))); } #[cfg(debug_assertions)] diff --git a/src/hypervisor_kernel/src/smmu.rs b/src/hypervisor_kernel/src/smmu.rs index b482507..c2bc610 100644 --- a/src/hypervisor_kernel/src/smmu.rs +++ b/src/hypervisor_kernel/src/smmu.rs @@ -1,5 +1,5 @@ // Copyright (c) 2022 RIKEN -// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) s +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) // All rights reserved. // // This software is released under the MIT License. @@ -9,73 +9,121 @@ //! System Memory Management Unit //! -use crate::memory_hook::{ - add_memory_load_hook_handler, add_memory_store_hook_handler, LoadAccessHandlerEntry, - LoadHookResult, StoreAccessHandlerEntry, StoreHookResult, -}; -use crate::paging::add_memory_access_trap; -use crate::StoredRegisters; +use crate::memory_hook::*; +use crate::paging::{add_memory_access_trap, map_address, remove_memory_access_trap}; +use crate::{emulation, StoredRegisters}; -use common::smmu::{SMMU_IDR0, SMMU_IDR0_HYP, SMMU_IDR0_S1P, SMMU_IDR0_S2P, SMMU_MEMORY_MAP_SIZE}; -use common::{STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE}; +use common::cpu::{dsb, get_vtcr_el2, get_vttbr_el2}; +use common::paging::{page_align_up, stage2_page_align_up}; +use common::smmu::*; +use common::{bitmask, STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE}; + +use core::mem::size_of; static mut SMMU_BASE_ADDRESS: usize = 0; -/// SMMU領域の保護の設定を行います +fn read_smmu_register(offset: usize) -> T { + assert!(offset < SMMU_MEMORY_MAP_SIZE); + dsb(); + unsafe { core::ptr::read_volatile((SMMU_BASE_ADDRESS + offset) as *const T) } +} + +fn write_smmu_register(offset: usize, data: T) { + assert!(offset < SMMU_MEMORY_MAP_SIZE); + unsafe { core::ptr::write_volatile((SMMU_BASE_ADDRESS + offset) as *mut T, data) } + dsb(); +} + +/// # ATTENTION +/// If you add the member of SmmuSavedRegisters, +/// please modify [`backup_default_smmu_settings`] and [`restore_smmu_status`] +struct SmmuSavedRegisters { + cr0: u32, + cr1: u32, + cr2: u32, + gbpa: u32, + agbpa: u32, + irq_ctrl: u32, + gerrorn: u32, + strtab_base: u64, + strtab_base_cfg: u32, + gatos_ctrl: u32, +} + +impl SmmuSavedRegisters { + const fn new() -> Self { + Self { + cr0: 0, + cr1: 0, + cr2: 0, + gbpa: 0, + agbpa: 0, + irq_ctrl: 0, + gerrorn: 0, + strtab_base: 0, + strtab_base_cfg: 0, + gatos_ctrl: 0, + } + } +} + +static mut DEFAULT_SMMU_STATUS: SmmuSavedRegisters = SmmuSavedRegisters::new(); +static mut CURRENT_SMMU_STATUS: SmmuSavedRegisters = SmmuSavedRegisters::new(); + +/// Set up SMMU registers, and mapping of it. /// -/// SMMUのMMIO領域をEL1からアクセス不能にします。 -/// またEL1からはStage1&2が使用不能のSMMUであるかのように見せます。 -/// IORTのアドレスが渡された場合はEL1からIORTのエントリがゼロクリアされた状態に見えるように設定します。 +/// This function sets up to trap the access of SMMU registers from EL1/EL0 +/// EL1 will recognize that the SMMU is supported only Stage1 translation. /// -/// # Arguments -/// base_address: SMMUのベースアドレス -/// iort_address: IORTエントリのアドレス(Optional) +/// # Panics +/// If adding memory access handler is failed, this function panics. /// -pub fn init_smmu(base_address: usize, iort_address: Option) { - /* base_address must be mapped, accessible, and enabled. */ - unsafe { SMMU_BASE_ADDRESS = base_address }; +/// # Arguments +/// * `smmu_registers_base_address` - The base address of SMMU registers([`common::smmu::SMMU_MEMORY_MAP_SIZE`] must be mapped and accessible) +/// * `iort_address` - The address of IORT(Optional) +pub fn init_smmu(smmu_registers_base_address: usize, _iort_address: Option) { + /* smmu_registers_base_address must be mapped, accessible, and enabled. */ + unsafe { SMMU_BASE_ADDRESS = smmu_registers_base_address }; + + backup_default_smmu_settings(); - add_memory_access_trap(base_address, SMMU_MEMORY_MAP_SIZE, false, false) - .expect("Failed to trap the memory access to SMMU"); + add_memory_access_trap( + smmu_registers_base_address, + SMMU_MEMORY_MAP_SIZE, + false, + false, + ) + .expect("Failed to trap the memory access to SMMU"); add_memory_load_hook_handler(LoadAccessHandlerEntry::new( - base_address, + smmu_registers_base_address, SMMU_MEMORY_MAP_SIZE, smmu_registers_load_handler, )) .expect("Failed to add the load handler"); add_memory_store_hook_handler(StoreAccessHandlerEntry::new( - base_address, + smmu_registers_base_address, SMMU_MEMORY_MAP_SIZE, smmu_registers_store_handler, )) .expect("Failed to add the store handler"); +} - if let Some(iort_address) = iort_address { - let iort_length = unsafe { *((iort_address + 4) as *const u32) } as usize; - let aligned_iort_address = iort_address & STAGE_2_PAGE_MASK; - let aligned_iort_size = (((iort_length + (iort_address - aligned_iort_address)) - 1) - & STAGE_2_PAGE_MASK) - + STAGE_2_PAGE_SIZE; - add_memory_access_trap(aligned_iort_address, aligned_iort_size, false, false) - .expect("Failed to trap the IORT area."); - add_memory_load_hook_handler(LoadAccessHandlerEntry::new( - iort_address, - iort_length, - iort_load_handler, - )) - .expect("Failed to add the load handler"); - add_memory_store_hook_handler(StoreAccessHandlerEntry::new( - iort_address, - iort_length, - crate::acpi_protect::acpi_table_store_handler, - )) - .expect("Failed to add the store handler"); - println!( - "Delete IORT(Address: {:#X}, Size: {:#X}) from EL1.", - iort_address, iort_length - ); - } +fn backup_default_smmu_settings() { + let default_smmu_settings = SmmuSavedRegisters { + cr0: read_smmu_register(SMMU_CR0), + cr1: read_smmu_register(SMMU_CR1), + cr2: read_smmu_register(SMMU_CR2), + gbpa: read_smmu_register(SMMU_GBPA), + agbpa: read_smmu_register(SMMU_AGBPA), + irq_ctrl: read_smmu_register(SMMU_IRQ_CTRL), + gerrorn: read_smmu_register(SMMU_GERRORN), + strtab_base: read_smmu_register(SMMU_STRTAB_BASE), + strtab_base_cfg: read_smmu_register(SMMU_STRTAB_BASE_CFG), + gatos_ctrl: read_smmu_register(SMMU_GATOS_CTRL), + }; + + unsafe { DEFAULT_SMMU_STATUS = default_smmu_settings }; } fn smmu_registers_load_handler( @@ -86,13 +134,35 @@ fn smmu_registers_load_handler( _is_sign_extend_required: bool, ) -> Result { let register_offset = accessing_memory_address - unsafe { SMMU_BASE_ADDRESS }; - println!("SMMU Load Access Handler: Offset: {:#X}", register_offset); + pr_debug!("SMMU Load Access Handler: Offset: {:#X}", register_offset); match register_offset { - SMMU_IDR0 => { - println!("SMMU_IDR0"); + SMMU_IDR0 => Ok(LoadHookResult::Data( + (read_smmu_register::(SMMU_IDR0) + & (!(SMMU_IDR0_S2P + | SMMU_IDR0_HYP + | SMMU_IDR0_CD2L + | SMMU_IDR0_VMID16 + | SMMU_IDR0_VATOS))) as u64, + )), + SMMU_IDR2 => Ok(LoadHookResult::Data(0)), + SMMU_CR0 | SMMU_CR0ACK => Ok(LoadHookResult::Data( + unsafe { CURRENT_SMMU_STATUS.cr0 } as u64 + )), + SMMU_CR1 => Ok(LoadHookResult::Data( + unsafe { CURRENT_SMMU_STATUS.cr1 } as u64 + )), + SMMU_CR2 => Ok(LoadHookResult::Data( + unsafe { CURRENT_SMMU_STATUS.cr2 } as u64 + )), + SMMU_STRTAB_BASE => Ok(LoadHookResult::Data(unsafe { + CURRENT_SMMU_STATUS.strtab_base + })), + SMMU_STRTAB_BASE_HIGH => Ok(LoadHookResult::Data( + unsafe { CURRENT_SMMU_STATUS.strtab_base } >> 32, + )), + SMMU_STRTAB_BASE_CFG => { Ok(LoadHookResult::Data( - (unsafe { *(accessing_memory_address as *const u32) } - & (!(SMMU_IDR0_S2P | SMMU_IDR0_S1P | SMMU_IDR0_HYP))) as u64, + unsafe { CURRENT_SMMU_STATUS.strtab_base_cfg } as u64, )) } _ => Ok(LoadHookResult::PassThrough), @@ -100,21 +170,682 @@ fn smmu_registers_load_handler( } fn smmu_registers_store_handler( - _accessing_memory_address: usize, + accessing_memory_address: usize, _stored_registers: &mut StoredRegisters, - _access_size: u8, - _data: u64, + access_size: u8, + data: u64, +) -> Result { + let register_offset = accessing_memory_address - unsafe { SMMU_BASE_ADDRESS }; + pr_debug!( + "SMMU Store Access Handler: Offset: {:#X}, Data: {:#X}", + register_offset, + data + ); + if access_size != 0b10 + && (register_offset != SMMU_STRTAB_BASE + && register_offset != SMMU_GERROR_IRQ_CFG0 + && register_offset != SMMU_CMDQ_BASE + && register_offset != SMMU_EVENTQ_BASE + && register_offset != SMMU_EVENTQ_IRQ_CFG0 + && register_offset != SMMU_PRIQ_BASE + && register_offset != SMMU_PRIQ_IRQ_CFG0 + && register_offset != SMMU_GATOS_SID + && register_offset != SMMU_GATOS_ADDR + && register_offset != SMMU_GATOS_PAR + && !(SMMU_CMDQ_CONTROL_PAGE_BASE..=SMMU_CMDQ_CONTROL_PAGE_BASE_END) + .contains(®ister_offset)/*&& &®ister_offset != (vatos_offset + SMMU_VATOS_SID) + && register_offset != (vatos_offset + SMMU_VATOS_ADDR) + && register_offset != (vatos_offset + SMMU_VATOS_PAR)*/) + { + println!("Invalid Access size: {:#X}", access_size); + return Ok(StoreHookResult::Cancel); + } + + match register_offset { + SMMU_CR0 => { + let old_smmu_en = (unsafe { CURRENT_SMMU_STATUS.cr0 } & SMMU_CR0_SMMUEN) != 0; + let new_smmu_en = ((data as u32) & SMMU_CR0_SMMUEN) != 0; + pr_debug!( + "SMMU_CR0: {:#X}(SMMUEN: {} => {})", + data, + old_smmu_en, + new_smmu_en + ); + if old_smmu_en == new_smmu_en { + unsafe { CURRENT_SMMU_STATUS.cr0 = data as u32 }; + if (unsafe { CURRENT_SMMU_STATUS.cr0 } & SMMU_CR0_EVENTQEN) == 0 + && ((data as u32) & SMMU_CR0_EVENTQEN) != 0 + { + let mask = SMMU_CR1_QUEUE_IC | SMMU_CR1_QUEUE_OC | SMMU_CR1_QUEUE_SH; + write_smmu_register( + SMMU_CR1, + ((data as u32) & mask) | (read_smmu_register::(SMMU_CR1) & !mask), + ); + } + return Ok(StoreHookResult::AlternativeData( + data | (SMMU_CR0_SMMUEN as u64), + )); + } + if !new_smmu_en { + /*Check SMMU_GBPA Status*/ + while (read_smmu_register::(SMMU_GBPA) & SMMU_GBPA_UPDATE) != 0 { + core::hint::spin_loop(); + } + if (read_smmu_register::(SMMU_GBPA) & SMMU_GBPA_ABORT) != 0 { + /* Disable SMMUEN */ + disable_smmu(old_smmu_en, true); + unsafe { CURRENT_SMMU_STATUS.cr0 = data as u32 }; + return Ok(StoreHookResult::PassThrough); + } + set_default_smmu_settings(old_smmu_en, true, Some(data as u32)); + } else { + apply_current_smmu_settings(Some(data as u32)); + } + /* Set CR0 (SMMU_CR0ACK will return the new value) */ + unsafe { CURRENT_SMMU_STATUS.cr0 = data as u32 }; + Ok(StoreHookResult::Cancel) + } + SMMU_GBPA => { + let data = data as u32; + if (data & SMMU_GBPA_UPDATE) != 0 { + if (data & SMMU_GBPA_ABORT) == 0 + && ((read_smmu_register::(SMMU_CR0) & SMMU_CR0_SMMUEN) == 0) + { + /* When Abort will be disabled and SMMUEN is disabled, all translations will be bypassed. + To avoid it, we must set default smmu settings */ + set_default_smmu_settings(false, false, None); + } else if (data & SMMU_GBPA_ABORT) != 0 + && ((read_smmu_register::(SMMU_CR0) & SMMU_CR0_SMMUEN) != 0) + { + /* + When Abort will be enabled and SMMUEN is enabled, all translations will not be bypassed. + To avoid it, we must disable smmu. + */ + + /* To avoid bypass translation while disabling smmu, write abort at first. */ + write_smmu_register(SMMU_GBPA, SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT); + while (read_smmu_register::(SMMU_GBPA) & SMMU_GBPA_UPDATE) != 0 { + core::hint::spin_loop(); + } + disable_smmu(false, false); + } + } + Ok(StoreHookResult::PassThrough) + } + SMMU_CR1 => { + if (unsafe { CURRENT_SMMU_STATUS.cr0 } & SMMU_CR0_SMMUEN) == 0 { + unsafe { CURRENT_SMMU_STATUS.cr1 = data as u32 } + } + Ok(StoreHookResult::Cancel) + } + SMMU_CR2 => { + if (unsafe { CURRENT_SMMU_STATUS.cr0 } & SMMU_CR0_SMMUEN) == 0 { + unsafe { CURRENT_SMMU_STATUS.cr2 = (data as u32) & !SMMU_CR2_E2H } + } + Ok(StoreHookResult::Cancel) + } + SMMU_STRTAB_BASE => { + if (unsafe { CURRENT_SMMU_STATUS.cr0 } & SMMU_CR0_SMMUEN) == 0 { + if access_size != 0b11 { + /* Store lower 32bit */ + unsafe { + CURRENT_SMMU_STATUS.strtab_base = + (CURRENT_SMMU_STATUS.strtab_base & !(u32::MAX as u64)) | data + }; + } else { + unsafe { CURRENT_SMMU_STATUS.strtab_base = data }; + } + } + Ok(StoreHookResult::Cancel) + } + SMMU_STRTAB_BASE_HIGH => { + if (unsafe { CURRENT_SMMU_STATUS.cr0 } & SMMU_CR0_SMMUEN) == 0 { + unsafe { + CURRENT_SMMU_STATUS.strtab_base = + (data << 32) | (CURRENT_SMMU_STATUS.strtab_base & u32::MAX as u64) + } + } + Ok(StoreHookResult::Cancel) + } + SMMU_STRTAB_BASE_CFG => { + if (unsafe { CURRENT_SMMU_STATUS.cr0 } & SMMU_CR0_SMMUEN) == 0 { + unsafe { CURRENT_SMMU_STATUS.strtab_base_cfg = data as u32 } + } + Ok(StoreHookResult::Cancel) + } + _ => Ok(StoreHookResult::PassThrough), + } +} + +fn remove_current_stream_table_traps() { + assert_ne!( + unsafe { CURRENT_SMMU_STATUS.strtab_base } & SMMU_STRTAB_BASE_ADDRESS, + unsafe { DEFAULT_SMMU_STATUS.strtab_base } & SMMU_STRTAB_BASE_ADDRESS + ); + + let smmu_status = unsafe { &CURRENT_SMMU_STATUS }; + let split = (smmu_status.strtab_base_cfg & SMMU_STRTAB_BASE_CFG_SPLIT) + >> SMMU_STRTAB_BASE_CFG_SPLIT_BITS_OFFSET; + let log2_size = (smmu_status.strtab_base_cfg & SMMU_STRTAB_BASE_CFG_LOG2SIZE) + >> SMMU_STRTAB_BASE_CFG_LOG2SIZE_BITS_OFFSET; + let table_base_address = (smmu_status.strtab_base & SMMU_STRTAB_BASE_ADDRESS) as usize; + let split = if split != 6 && split != 8 && split != 10 { + 6 + } else { + split + }; + let level1_table_size = get_level1_table_size(log2_size, split); + + remove_memory_access_trap(table_base_address, stage2_page_align_up(level1_table_size)) + .expect("Failed to remove trap of SMMU table"); + remove_memory_store_hook_handler(StoreAccessHandlerEntry::new( + table_base_address, + level1_table_size, + level1_table_store_handler, + )) + .expect("Failed to remove store handler"); + + for i in 0..(level1_table_size / size_of::()) { + remove_trap_of_level1_entry( + unsafe { *((table_base_address + (i * size_of::())) as *const u64) }, + split, + ); + } + /*unmap_address(table_base_address, page_align_up(level1_table_size)) + .expect("Failed to unmap address");*/ +} + +fn remove_trap_of_level1_entry(entry: u64, split: u32) { + let span = entry & bitmask!(4, 0); + if span == 0 || span > 12 { + return; + } + + let level2_table_address = (entry & bitmask!(51, 5 + (span as usize/* -1 + 1*/))) as usize; + let level2_table_size = get_level2_table_size(span, split); + remove_memory_access_trap( + level2_table_address, + stage2_page_align_up(level2_table_size), + ) + .expect("Failed to remove trap of SMMU table"); + remove_memory_load_hook_handler(LoadAccessHandlerEntry::new( + level2_table_address, + level2_table_size, + level2_table_load_handler, + )) + .expect("Failed to remove load handler"); + remove_memory_store_hook_handler(StoreAccessHandlerEntry::new( + level2_table_address, + level2_table_size, + level2_table_store_handler, + )) + .expect("Failed to remove store handler"); + /*unmap_address(level2_table_address, page_align_up(level2_table_size)) + .expect("Failed to unmap address");*/ +} + +fn disable_smmu(should_remove_current_trap: bool, should_apply_current_smmu_settings: bool) { + if should_apply_current_smmu_settings { + write_smmu_register(SMMU_CR1, unsafe { CURRENT_SMMU_STATUS.cr1 }); + write_smmu_register(SMMU_CR2, unsafe { CURRENT_SMMU_STATUS.cr2 }); + } + write_smmu_register( + SMMU_CR0, + read_smmu_register::(SMMU_CR0) & !SMMU_CR0_SMMUEN, + ); + + while (read_smmu_register::(SMMU_CR0ACK) & SMMU_CR0_SMMUEN) != 0 { + core::hint::spin_loop(); + } + + if should_remove_current_trap { + remove_current_stream_table_traps(); + } +} + +fn set_default_smmu_settings( + should_remove_current_trap: bool, + should_apply_current_smmu_settings: bool, + new_smmu_cr0: Option, +) { + /* To avoid bypass translation while disabling smmu, write abort at first. */ + while (read_smmu_register::(SMMU_GBPA) & SMMU_GBPA_UPDATE) != 0 { + core::hint::spin_loop(); + } + let default_gbpa: u32 = read_smmu_register(SMMU_GBPA); + write_smmu_register(SMMU_GBPA, SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT); + while (read_smmu_register::(SMMU_GBPA) & SMMU_GBPA_UPDATE) != 0 { + core::hint::spin_loop(); + } + + /* Disable SMMUEN */ + write_smmu_register( + SMMU_CR0, + read_smmu_register::(SMMU_CR0) & !SMMU_CR0_SMMUEN, + ); + while (read_smmu_register::(SMMU_CR0ACK) & SMMU_CR0_SMMUEN) != 0 { + core::hint::spin_loop(); + } + + /* Set default value */ + write_smmu_register(SMMU_STRTAB_BASE_CFG, unsafe { + DEFAULT_SMMU_STATUS.strtab_base_cfg + }); + write_smmu_register(SMMU_STRTAB_BASE, unsafe { DEFAULT_SMMU_STATUS.strtab_base }); + + if should_apply_current_smmu_settings { + write_smmu_register(SMMU_CR1, unsafe { CURRENT_SMMU_STATUS.cr1 }); + write_smmu_register(SMMU_CR2, unsafe { CURRENT_SMMU_STATUS.cr2 }); + write_smmu_register( + SMMU_CR0, + new_smmu_cr0.unwrap_or(unsafe { CURRENT_SMMU_STATUS.cr0 }), + ); + } + + /* Enable SMMUEN */ + write_smmu_register( + SMMU_CR0, + read_smmu_register::(SMMU_CR0) | SMMU_CR0_SMMUEN, + ); + while (read_smmu_register::(SMMU_CR0ACK) & SMMU_CR0_SMMUEN) == 0 { + core::hint::spin_loop(); + } + + /* Restore GBPA */ + write_smmu_register(SMMU_GBPA, default_gbpa | SMMU_GBPA_UPDATE); + + if should_remove_current_trap { + remove_current_stream_table_traps() + } +} + +fn apply_current_smmu_settings(new_smmu_cr0: Option) { + /* To avoid bypass translation while disabling smmu, write abort at first. */ + while (read_smmu_register::(SMMU_GBPA) & SMMU_GBPA_UPDATE) != 0 { + core::hint::spin_loop(); + } + let default_gbpa: u32 = read_smmu_register(SMMU_GBPA); + write_smmu_register(SMMU_GBPA, SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT); + while (read_smmu_register::(SMMU_GBPA) & SMMU_GBPA_UPDATE) != 0 { + core::hint::spin_loop(); + } + + /* Disable SMMUEN */ + write_smmu_register( + SMMU_CR0, + read_smmu_register::(SMMU_CR0) & !SMMU_CR0_SMMUEN, + ); + while (read_smmu_register::(SMMU_CR0ACK) & SMMU_CR0_SMMUEN) != 0 { + core::hint::spin_loop(); + } + + /* Set default value */ + write_smmu_register(SMMU_STRTAB_BASE_CFG, unsafe { + CURRENT_SMMU_STATUS.strtab_base_cfg + }); + write_smmu_register(SMMU_STRTAB_BASE, unsafe { CURRENT_SMMU_STATUS.strtab_base }); + write_smmu_register(SMMU_CR1, unsafe { CURRENT_SMMU_STATUS.cr1 }); + write_smmu_register(SMMU_CR2, unsafe { CURRENT_SMMU_STATUS.cr2 }); + /* Analysis new settings */ + add_trap_of_current_stream_table(); + + write_smmu_register( + SMMU_CR0, + new_smmu_cr0.unwrap_or(unsafe { CURRENT_SMMU_STATUS.cr0 }), + ); + while (read_smmu_register::(SMMU_CR0ACK) & SMMU_CR0_SMMUEN) == 0 { + core::hint::spin_loop(); + } + + /* Restore GBPA */ + write_smmu_register(SMMU_GBPA, default_gbpa | SMMU_GBPA_UPDATE); +} + +fn add_trap_of_current_stream_table() { + let smmu_status = unsafe { &CURRENT_SMMU_STATUS }; + let fmt = (smmu_status.strtab_base_cfg & SMMU_STRTAB_BASE_CFG_FMT) + >> SMMU_STRTAB_BASE_CFG_FMT_BITS_OFFSET; + let split = (smmu_status.strtab_base_cfg & SMMU_STRTAB_BASE_CFG_SPLIT) + >> SMMU_STRTAB_BASE_CFG_SPLIT_BITS_OFFSET; + let log2size = (smmu_status.strtab_base_cfg & SMMU_STRTAB_BASE_CFG_LOG2SIZE) + >> SMMU_STRTAB_BASE_CFG_LOG2SIZE_BITS_OFFSET; + let level1_table_address = (smmu_status.strtab_base & SMMU_STRTAB_BASE_ADDRESS) as usize; + pr_debug!( + "SMMU: {{BASE: {:#X}, CFG: {:#X}(FMT: {:#b}, SPLIT: {}, SIZE:2^{})}}", + level1_table_address, + smmu_status.strtab_base_cfg, + fmt, + split, + log2size + ); + + if fmt != 0b01 { + panic!("Only 2Level Stream Table is supported"); + } + + let split = if split != 6 && split != 8 && split != 10 { + println!("SMMU Split is invalid, behave as 6"); + unsafe { + CURRENT_SMMU_STATUS.strtab_base_cfg = (CURRENT_SMMU_STATUS.strtab_base_cfg + & !SMMU_STRTAB_BASE_CFG_SPLIT) + | (6 << SMMU_STRTAB_BASE_CFG_SPLIT_BITS_OFFSET) + }; + 6 + } else { + split + }; + + pr_debug!("L1STD[{}:{}] -> L2STD[{}:0]", log2size - 1, split, split); + if split >= log2size { + panic!("Unsupported Split and Log2Size"); + } + + let level1_table_size = get_level1_table_size(log2size, split); + let aligned_level1_table_size = stage2_page_align_up(level1_table_size); + + assert_eq!(level1_table_address & !STAGE_2_PAGE_MASK, 0); + map_address( + level1_table_address, + level1_table_address, + page_align_up(level1_table_size), + true, + true, + false, + false, + ) + .expect("Failed to map address"); + add_memory_store_hook_handler(StoreAccessHandlerEntry::new( + level1_table_address, + level1_table_size, + level1_table_store_handler, + )) + .expect("Failed to add store handler"); + add_memory_access_trap(level1_table_address, aligned_level1_table_size, true, false) + .expect("Failed to map SMMU table"); + + for i in 0..(level1_table_size / size_of::()) { + process_level1_table_entry( + unsafe { *((level1_table_address + (i * size_of::())) as *const u64) }, + (i << split) as u32, + split, + ); + } +} + +fn process_level1_table_entry(entry: u64, base_id: u32, split: u32) { + let span = entry & bitmask!(4, 0); + if span == 0 || span > 12 { + pr_debug!( + "Level2 Table: {:#X}(Span: {}, Id: {:#X} ~ )", + entry, + span, + base_id + ); + return; + } + + let span_mask = bitmask!(51, 5 + (span as usize/* -1 + 1*/)); + let table_address = (entry & span_mask) as usize; + let table_size = get_level2_table_size(span, split); + + pr_debug!( + "Level2 Table: {:#X}(Span: {}, Address: {:#X}, TableSize: {:#X}, Id: {:#X} ~ )", + entry, + span, + table_address, + table_size, + base_id + ); + + map_address( + table_address, + table_address, + page_align_up(table_size), + true, + true, + false, + false, + ) + .expect("Failed to map address"); + add_memory_load_hook_handler(LoadAccessHandlerEntry::new( + table_address, + table_size, + level2_table_load_handler, + )) + .expect("Failed to add load handler"); + add_memory_store_hook_handler(StoreAccessHandlerEntry::new( + table_address, + table_size, + level2_table_store_handler, + )) + .expect("Failed to add store handler"); + add_memory_access_trap( + table_address, + stage2_page_align_up(table_size), + false, + false, + ) + .expect("Failed to map SMMU table"); + + for i in 0..(1u32 << (span - 1)) { + process_level2_table_entry( + table_address + ((i as usize) * size_of::()), + base_id + i, + true, + ); + } +} + +fn process_level2_table_entry(entry_base: usize, _id: u32, should_check_entry: bool) { + let ste = unsafe { &mut *(entry_base as *mut StreamTableEntry) }; + if should_check_entry { + if !ste.is_validated() { + //pr_debug!("STE(id: {:#X}) is not validated", id); + return; + } + if !ste.is_traffic_can_pass() { + //pr_debug!("STE(id: {:#X}) is not configured yet, ignore", id); + return; + } + } + ste.set_stage2_settings( + get_vtcr_el2(), + get_vttbr_el2(), + ste.is_traffic_can_pass(), + ste.is_stage1_bypassed(), + ); +} + +fn level1_table_store_handler( + accessing_address: usize, + _stored_registers: &mut StoredRegisters, + access_size: u8, + data: u64, ) -> Result { - println!("SMMU Store Access Handler"); - return Ok(StoreHookResult::Cancel); + assert_eq!(STAGE_2_PAGE_SIZE, 0x1000); + if access_size != 0b11 { + panic!("unsupported Access Size: {:#b}", access_size); + } + let id = (accessing_address + - (unsafe { CURRENT_SMMU_STATUS.strtab_base } & SMMU_STRTAB_BASE_ADDRESS) as usize) + >> 3; + pr_debug!("Level1 table ID: {}", id); + let smmu_split = (unsafe { CURRENT_SMMU_STATUS.strtab_base_cfg } & SMMU_STRTAB_BASE_CFG_SPLIT) + >> SMMU_STRTAB_BASE_CFG_SPLIT_BITS_OFFSET; + + remove_trap_of_level1_entry(unsafe { *(accessing_address as *mut u64) }, smmu_split); + process_level1_table_entry(data, (id << smmu_split) as u32, smmu_split); + + Ok(StoreHookResult::PassThrough) } -fn iort_load_handler( - _accessing_memory_address: usize, +fn level2_table_load_handler( + accessing_address: usize, _stored_registers: &mut StoredRegisters, - _access_size: u8, + access_size: u8, _is_64bit_register: bool, _is_sign_extend_required: bool, ) -> Result { - return Ok(LoadHookResult::Data(0)); + let ste_base = accessing_address & !(size_of::() - 1); + let ste_offset = accessing_address - ste_base; + let read_mask = !create_bitmask_of_stage2_configurations(ste_offset); + let original_data = emulation::read_memory(accessing_address, access_size); + Ok(LoadHookResult::Data((original_data & read_mask) as u64)) +} + +fn level2_table_store_handler( + accessing_address: usize, + _stored_registers: &mut StoredRegisters, + access_size: u8, + data: u64, +) -> Result { + let ste_base_address = accessing_address & !(size_of::() - 1); + let ste_offset = accessing_address - ste_base_address; + let ste_offset_per_ste_base_type = ste_offset / size_of::(); + + let stream_id = get_stream_id( + accessing_address, + (unsafe { CURRENT_SMMU_STATUS.strtab_base } & SMMU_STRTAB_BASE_ADDRESS) as usize, + get_level1_table_size( + (unsafe { CURRENT_SMMU_STATUS.strtab_base_cfg } & SMMU_STRTAB_BASE_CFG_LOG2SIZE) + >> SMMU_STRTAB_BASE_CFG_LOG2SIZE_BITS_OFFSET, + (unsafe { CURRENT_SMMU_STATUS.strtab_base_cfg } & SMMU_STRTAB_BASE_CFG_SPLIT) + >> SMMU_STRTAB_BASE_CFG_SPLIT_BITS_OFFSET, + ), + (unsafe { CURRENT_SMMU_STATUS.strtab_base_cfg } & SMMU_STRTAB_BASE_CFG_SPLIT) + >> SMMU_STRTAB_BASE_CFG_SPLIT_BITS_OFFSET, + ); + assert_eq!(STE_V_INDEX, 0); + assert_eq!(STE_CONFIG_INDEX, 0); + if ste_offset_per_ste_base_type == 0 { + process_level2_table_entry(ste_base_address, stream_id, false); + } + let data = if unsafe { &*(ste_base_address as *const StreamTableEntry) }.is_validated() + || (ste_offset_per_ste_base_type == 0 && ((data as SteArrayBaseType & STE_V) != 0)) + { + let mask = create_bitmask_of_stage2_configurations(ste_offset); + let original_data = emulation::read_memory(accessing_address, access_size); + (data & !mask) | (original_data & mask) + } else { + data + }; + //dump_level2_table_entry(ste_base_address, stream_id); + Ok(StoreHookResult::AlternativeData(data)) +} + +fn get_stream_id( + accessing_address: usize, + level1_table_base_address: usize, + level1_table_size: usize, + split: u32, +) -> u32 { + let mut upper_id = 0; + + while level1_table_size > (upper_id << 3) { + let entry = unsafe { *((level1_table_base_address + (upper_id << 3)) as *const u64) }; + let span = entry & bitmask!(4, 0); + if span > 0 && span < 12 { + let span_mask = bitmask!(51, 5 + (span as usize/* -1 + 1*/)); + if (accessing_address & span_mask) == (entry as usize & span_mask) { + /* Found */ + return ((upper_id << split) + | ((accessing_address - (accessing_address & span_mask)) + / size_of::())) as u32; + } + } + upper_id += 1; + } + panic!("Not Found"); +} + +pub fn restore_smmu_status() { + let default_smmu_status = unsafe { &DEFAULT_SMMU_STATUS }; + /* Restore GBPA */ + while (read_smmu_register::(SMMU_GBPA) & SMMU_GBPA_UPDATE) != 0 { + core::hint::spin_loop(); + } + write_smmu_register(SMMU_GBPA, default_smmu_status.gbpa | SMMU_GBPA_UPDATE); + while (read_smmu_register::(SMMU_GBPA) & SMMU_GBPA_UPDATE) != 0 { + core::hint::spin_loop(); + } + + write_smmu_register(SMMU_CR0, 0u32); + while (read_smmu_register::(SMMU_CR0ACK) & SMMU_CR0_SMMUEN) != 0 { + core::hint::spin_loop(); + } + + /* Restore SMMU settings */ + let default_smmu_status = unsafe { &DEFAULT_SMMU_STATUS }; + write_smmu_register(SMMU_CR1, default_smmu_status.cr1); + write_smmu_register(SMMU_CR2, default_smmu_status.cr2); + write_smmu_register(SMMU_AGBPA, default_smmu_status.agbpa); + write_smmu_register(SMMU_IRQ_CTRL, default_smmu_status.irq_ctrl); + write_smmu_register(SMMU_GERRORN, default_smmu_status.gerrorn); + write_smmu_register(SMMU_STRTAB_BASE, default_smmu_status.strtab_base); + write_smmu_register(SMMU_STRTAB_BASE_CFG, default_smmu_status.strtab_base_cfg); + write_smmu_register(SMMU_GATOS_CTRL, default_smmu_status.gatos_ctrl); + + write_smmu_register(SMMU_GBPA, default_smmu_status.gbpa | SMMU_GBPA_UPDATE); + write_smmu_register(SMMU_CR0, default_smmu_status.cr0); + + if unsafe { CURRENT_SMMU_STATUS.cr0 & SMMU_CR0_SMMUEN } != 0 { + remove_current_stream_table_traps(); + } + unsafe { CURRENT_SMMU_STATUS = SmmuSavedRegisters::new() }; +} + +#[allow(dead_code)] +pub fn dump_stream_table() { + let table_base_address = + (read_smmu_register::(SMMU_STRTAB_BASE) & SMMU_STRTAB_BASE_ADDRESS) as usize; + let strtab_base_cfg = read_smmu_register::(SMMU_STRTAB_BASE_CFG); + let split = + (strtab_base_cfg & SMMU_STRTAB_BASE_CFG_SPLIT) >> SMMU_STRTAB_BASE_CFG_SPLIT_BITS_OFFSET; + let log2size = (strtab_base_cfg & SMMU_STRTAB_BASE_CFG_LOG2SIZE) + >> SMMU_STRTAB_BASE_CFG_LOG2SIZE_BITS_OFFSET; + let level1_table_size = get_level1_table_size(log2size, split); + for i in 0..(level1_table_size >> 3) { + dump_level1_table_entry( + unsafe { *((table_base_address + (i << 3)) as *const u64) }, + (i << split) as u32, + split, + ); + } +} + +#[allow(dead_code)] +fn dump_level1_table_entry(entry: u64, base_id: u32, split: u32) { + let span = entry & bitmask!(4, 0); + if span == 0 || span > 12 { + println!( + "Level1(ID: {:#X} ~ ): {:#X}(Invalid, Span:{})", + base_id, entry, span + ); + return; + } + + let span_mask = bitmask!(51, 5 + (span as usize/* -1 + 1*/)); + let table_address = (entry & span_mask) as usize; + let table_size = get_level2_table_size(span, split); + println!( + "Level1(ID: {:#X} ~ ): {:#X}(Span: {}, L2Ptr: {:#X}, TableSize: {:#X})", + base_id, entry, span, table_address, table_size + ); + + for i in 0..(1u32 << (span - 1)) { + dump_level2_table_entry( + table_address + ((i as usize) * size_of::()), + base_id + i, + ); + } +} + +#[allow(dead_code)] +fn dump_level2_table_entry(entry_base: usize, id: u32) { + println!(" STE(id: {:#X}):", id); + for i in 0..(size_of::() / size_of::()) { + println!(" {:#X}: {:#X}", i * size_of::(), unsafe { + *((entry_base + i * size_of::()) as *const u32) + }); + } } diff --git a/src/uefi/Cargo.toml b/src/uefi/Cargo.toml index c7ae30f..111a357 100644 --- a/src/uefi/Cargo.toml +++ b/src/uefi/Cargo.toml @@ -6,7 +6,7 @@ # http://opensource.org/licenses/mit-license.php [package] name = "uefi" -version = "0.4.0" +version = "1.0.0" edition = "2021" [dependencies] diff --git a/src/uefi/src/boot_service.rs b/src/uefi/src/boot_service.rs index 4c2f7f4..e3a6fe7 100644 --- a/src/uefi/src/boot_service.rs +++ b/src/uefi/src/boot_service.rs @@ -9,35 +9,34 @@ //! UEFI Boot Services //! -pub mod memory_service; +mod memory_service; -use memory_service::{EfiAllocateType, EfiMemoryDescriptor, EfiMemoryType}; +pub use memory_service::*; -use super::{EfiHandle, EfiStatus, EfiTableHeader, Guid}; +use crate::{EfiHandle, EfiStatus, EfiTableHeader, Guid}; -#[derive(Debug)] #[repr(C)] pub struct EfiBootServices { efi_table_header: EfiTableHeader, raise_tpl: usize, restore_tpl: usize, - pub allocate_pages: extern "C" fn( + allocate_pages: extern "efiapi" fn( allocate_type: EfiAllocateType, memory_type: EfiMemoryType, pages: usize, memory: *mut usize, ) -> EfiStatus, free_pages: usize, - pub get_memory_map: extern "C" fn( + get_memory_map: extern "efiapi" fn( memory_map_size: *mut usize, memory_map: *mut EfiMemoryDescriptor, map_key: *mut usize, descriptor_size: *mut usize, descriptor_version: *mut u32, ) -> EfiStatus, - pub allocate_pool: - extern "C" fn(pool_type: EfiMemoryType, size: usize, memory: *mut usize) -> EfiStatus, - pub free_pool: extern "C" fn(memory: usize) -> EfiStatus, + allocate_pool: + extern "efiapi" fn(pool_type: EfiMemoryType, size: usize, memory: *mut usize) -> EfiStatus, + free_pool: extern "efiapi" fn(memory: usize) -> EfiStatus, create_event: usize, set_timer: usize, wait_for_event: usize, @@ -55,20 +54,21 @@ pub struct EfiBootServices { install_configuration_table: usize, load_image: usize, start_image: usize, - pub exit: extern "C" fn( + pub exit: extern "efiapi" fn( image_handler: EfiHandle, exit_status: EfiStatus, exit_data_size: usize, exit_data: *const u16, ) -> EfiStatus, unload_image: usize, - pub exit_boot_services: extern "C" fn(image_handler: EfiHandle, map_key: usize) -> EfiStatus, + pub exit_boot_services: + extern "efiapi" fn(image_handler: EfiHandle, map_key: usize) -> EfiStatus, get_next_monotonic_count: usize, stall: usize, set_watchdog_timer: usize, connect_controller: usize, disconnect_controller: usize, - pub open_protocol: extern "C" fn( + pub open_protocol: extern "efiapi" fn( handle: EfiHandle, protocol: *const Guid, interface: *mut *const usize, @@ -80,7 +80,7 @@ pub struct EfiBootServices { open_protocol_information: usize, protocols_per_handle: usize, locate_handle_buffer: usize, - pub locate_protocol: extern "C" fn( + pub locate_protocol: extern "efiapi" fn( protocol: *const Guid, registration: *const usize, interface: *mut *const usize, @@ -88,7 +88,19 @@ pub struct EfiBootServices { install_multiple_protocol_interfaces: usize, uninstall_multiple_protocol_interfaces: usize, calculate_crc32: usize, - pub copy_mem: extern "C" fn(destination: usize, source: usize, length: usize), - pub set_mem: extern "C" fn(buffer: usize, size: usize, value: u8), + copy_mem: extern "efiapi" fn(destination: usize, source: usize, length: usize), + set_mem: extern "efiapi" fn(buffer: usize, size: usize, value: u8), create_event_ex: usize, } + +pub const EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL: u32 = 0x00000001; +#[allow(dead_code)] +pub const EFI_OPEN_PROTOCOL_GET_PROTOCOL: u32 = 0x00000002; +#[allow(dead_code)] +pub const EFI_OPEN_PROTOCOL_TEST_PROTOCOL: u32 = 0x00000004; +#[allow(dead_code)] +pub const EFI_OPEN_PROTOCOL_BY_CHILD_CONTROLLER: u32 = 0x00000008; +#[allow(dead_code)] +pub const EFI_OPEN_PROTOCOL_BY_DRIVER: u32 = 0x00000010; +#[allow(dead_code)] +pub const EFI_OPEN_PROTOCOL_EXCLUSIVE: u32 = 0x00000020; diff --git a/src/uefi/src/boot_service/memory_service.rs b/src/uefi/src/boot_service/memory_service.rs index ed26006..1e0ffc8 100644 --- a/src/uefi/src/boot_service/memory_service.rs +++ b/src/uefi/src/boot_service/memory_service.rs @@ -1,4 +1,5 @@ // Copyright (c) 2022 RIKEN +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) // All rights reserved. // // This software is released under the MIT License. @@ -8,12 +9,12 @@ //! Memory Allocation Services of Boot Service //! -use super::super::EfiStatus; use super::EfiBootServices; +use crate::EfiStatus; + #[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)] #[repr(C)] -#[allow(dead_code)] pub enum EfiMemoryType { EfiReservedMemoryType, EfiLoaderCode, @@ -35,7 +36,6 @@ pub enum EfiMemoryType { #[derive(Copy, Clone, Eq, PartialEq)] #[repr(u64)] -#[allow(dead_code)] pub enum EfiMemoryAttribute { EfiMemoryUc = 0x0000000000000001, EfiMemoryWc = 0x0000000000000002, @@ -55,7 +55,6 @@ pub enum EfiMemoryAttribute { #[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)] #[repr(C)] -#[allow(dead_code)] pub enum EfiAllocateType { AllocateAnyPages, AllocateMaxAddress, @@ -100,9 +99,15 @@ pub fn free_pool(b_s: *const EfiBootServices, address: usize) -> Result<(), EfiS Ok(()) } -/// サイズ(ページ指定)を満たす中で最高位のアドレスを確保する。 +/// Allocate highest memory which matches the demanded size and `border_address` +/// +/// # Arguments +/// * `b_s` - EfiBootService +/// * `pages` - the number of needed pages +/// * `border_address` - the upper border address to restrict to be allocating address /// -/// [`border_address`]以下で条件に合うアドレスを確保する。 +/// # Result +/// If the allocation is succeeded, Ok(start_address), otherwise Err(EfiStatus) pub fn alloc_highest_memory( b_s: *const EfiBootServices, pages: usize, @@ -123,10 +128,18 @@ pub fn alloc_highest_memory( Ok(memory_address) } -/// メモリマップを取得する +/// Get memory map +/// +/// This function will allocate memory pool to store memory map by calling [`alloc_pool`] +/// +/// # Arguments +/// * `b_s` - EfiBootService +/// +/// # Result +/// If the allocation is succeeded, returns Ok(MemoryMapInfo), otherwise Err(EfiStatus) /// -/// メモリマップを取得する際に、[`alloc_pool`]を使用し、その中にメモリマップを格納する。 -/// [`MemoryMapInfo::descriptor_address`]は[`free_pool`]で開放する。 +/// # Attention +/// After processed memory map, you must free [`MemoryMapInfo::descriptor_address`] with [`free_pool`] pub fn get_memory_map(b_s: *const EfiBootServices) -> Result { let mut memory_map_size = 0; let mut map_key = 0usize; diff --git a/src/uefi/src/file.rs b/src/uefi/src/file.rs index 70dd216..297196a 100644 --- a/src/uefi/src/file.rs +++ b/src/uefi/src/file.rs @@ -1,4 +1,5 @@ // Copyright (c) 2022 RIKEN +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) // All rights reserved. // // This software is released under the MIT License. @@ -8,11 +9,11 @@ //! EFI Simple File System Protocol //! -use super::boot_service::EfiBootServices; -use super::loaded_image::EfiLoadedImageProtocol; -use super::{ +use crate::boot_service::{EfiBootServices, EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL}; +use crate::loaded_image::EfiLoadedImageProtocol; +use crate::{ EfiHandle, EfiStatus, EfiTime, Guid, EFI_LOADED_IMAGE_PROTOCOL_GUID, - EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL, EFI_SIMPLE_FILE_SYSTEM_PROTOCOL_GUID, + EFI_SIMPLE_FILE_SYSTEM_PROTOCOL_GUID, }; const EFI_FILE_MODE_READ: u64 = 0x0000000000000001; @@ -46,7 +47,8 @@ const EFI_FILE_INFO_ID: Guid = Guid { #[repr(C)] pub struct EfiSimpleFileProtocol { revision: u64, - open_volume: extern "C" fn(this: *const Self, root: *mut *const EfiFileProtocol) -> EfiStatus, + open_volume: + extern "efiapi" fn(this: *const Self, root: *mut *const EfiFileProtocol) -> EfiStatus, } #[derive(Debug)] @@ -65,34 +67,41 @@ pub struct EfiFileInfo { #[repr(C)] pub struct EfiFileProtocol { revision: u64, - open: extern "C" fn( + open: extern "efiapi" fn( this: *const Self, new_handle: *mut *const Self, file_name: *const u16, open_mode: u64, attributes: u64, ) -> EfiStatus, - close: extern "C" fn(this: *const Self) -> EfiStatus, - delete: extern "C" fn(this: *const Self) -> EfiStatus, - read: extern "C" fn(this: *const Self, buffer_size: *mut usize, buffer: *mut u8) -> EfiStatus, - write: - extern "C" fn(this: *const Self, buffer_size: *mut usize, buffer: *const u8) -> EfiStatus, - get_position: extern "C" fn(this: *const Self, position: *mut u64) -> EfiStatus, - set_position: extern "C" fn(this: *const Self, position: u64) -> EfiStatus, - get_info: extern "C" fn( + close: extern "efiapi" fn(this: *const Self) -> EfiStatus, + delete: extern "efiapi" fn(this: *const Self) -> EfiStatus, + read: extern "efiapi" fn( + this: *const Self, + buffer_size: *mut usize, + buffer: *mut u8, + ) -> EfiStatus, + write: extern "efiapi" fn( + this: *const Self, + buffer_size: *mut usize, + buffer: *const u8, + ) -> EfiStatus, + get_position: extern "efiapi" fn(this: *const Self, position: *mut u64) -> EfiStatus, + set_position: extern "efiapi" fn(this: *const Self, position: u64) -> EfiStatus, + get_info: extern "efiapi" fn( this: *const Self, information_type: *const Guid, buffer_size: *mut usize, buffer: *mut u8, ) -> EfiStatus, - set_info: extern "C" fn( + set_info: extern "efiapi" fn( this: *const Self, information_type: *const Guid, buffer_size: usize, buffer: *const u8, ) -> EfiStatus, - flush: extern "C" fn(this: *const Self) -> EfiStatus, - open_ex: extern "C" fn( + flush: extern "efiapi" fn(this: *const Self) -> EfiStatus, + open_ex: extern "efiapi" fn( this: *const Self, new_handle: *mut *const Self, file_name: *const u16, @@ -100,9 +109,9 @@ pub struct EfiFileProtocol { attributes: u64, token: usize, ) -> EfiStatus, - read_ex: extern "C" fn(this: *const Self, token: usize) -> EfiStatus, - write_ex: extern "C" fn(this: *const Self, token: usize) -> EfiStatus, - flush_ex: extern "C" fn(this: *const Self, token: usize) -> EfiStatus, + read_ex: extern "efiapi" fn(this: *const Self, token: usize) -> EfiStatus, + write_ex: extern "efiapi" fn(this: *const Self, token: usize) -> EfiStatus, + flush_ex: extern "efiapi" fn(this: *const Self, token: usize) -> EfiStatus, } pub fn open_root_dir( diff --git a/src/uefi/src/lib.rs b/src/uefi/src/lib.rs index 66273bc..1927a40 100644 --- a/src/uefi/src/lib.rs +++ b/src/uefi/src/lib.rs @@ -1,18 +1,18 @@ // Copyright (c) 2022 RIKEN +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) // All rights reserved. // // This software is released under the MIT License. // http://opensource.org/licenses/mit-license.php //! -//! UEFI +//! Unified Extensible Firmware Interface +//! +//! Supported Version: 2.9 //! #![no_std] -#![feature(maybe_uninit_array_assume_init)] - -use boot_service::EfiBootServices; -use output::EfiOutputProtocol; +#![feature(abi_efiapi)] pub mod boot_service; pub mod file; @@ -98,11 +98,11 @@ pub struct EfiSystemTable { pub console_input_handler: EfiHandle, pub console_input_protocol: usize, pub console_output_handler: EfiHandle, - pub console_output_protocol: *const EfiOutputProtocol, + pub console_output_protocol: *const output::EfiOutputProtocol, pub standard_error_handler: EfiHandle, - pub standard_error_protocol: *const EfiOutputProtocol, + pub standard_error_protocol: *const output::EfiOutputProtocol, pub efi_runtime_services: usize, - pub efi_boot_services: *const EfiBootServices, + pub efi_boot_services: *const boot_service::EfiBootServices, pub num_table_entries: usize, pub configuration_table: usize, } @@ -165,15 +165,3 @@ pub struct EfiTime { day_light: u8, pad_2: u8, } - -const EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL: u32 = 0x00000001; -#[allow(dead_code)] -const EFI_OPEN_PROTOCOL_GET_PROTOCOL: u32 = 0x00000002; -#[allow(dead_code)] -const EFI_OPEN_PROTOCOL_TEST_PROTOCOL: u32 = 0x00000004; -#[allow(dead_code)] -const EFI_OPEN_PROTOCOL_BY_CHILD_CONTROLLER: u32 = 0x00000008; -#[allow(dead_code)] -const EFI_OPEN_PROTOCOL_BY_DRIVER: u32 = 0x00000010; -#[allow(dead_code)] -const EFI_OPEN_PROTOCOL_EXCLUSIVE: u32 = 0x00000020; diff --git a/src/uefi/src/loaded_image.rs b/src/uefi/src/loaded_image.rs index 9a7330a..a3db213 100644 --- a/src/uefi/src/loaded_image.rs +++ b/src/uefi/src/loaded_image.rs @@ -1,15 +1,15 @@ // Copyright (c) 2022 RIKEN +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) // All rights reserved. // // This software is released under the MIT License. // http://opensource.org/licenses/mit-license.php //! -//! Loaded Image +//! EFI Loaded Image Protocol //! -use super::boot_service::memory_service::EfiMemoryType; -use super::{EfiHandle, EfiStatus, EfiSystemTable}; +use crate::{boot_service::EfiMemoryType, EfiHandle, EfiStatus, EfiSystemTable}; #[repr(C)] pub struct EfiLoadedImageProtocol { @@ -24,5 +24,5 @@ pub struct EfiLoadedImageProtocol { pub image_base: usize, pub image_code_type: EfiMemoryType, pub image_data_type: EfiMemoryType, - pub unload: extern "C" fn(image_handle: EfiHandle) -> EfiStatus, + pub unload: extern "efiapi" fn(image_handle: EfiHandle) -> EfiStatus, } diff --git a/src/uefi/src/output.rs b/src/uefi/src/output.rs index bfc65aa..9a2b92d 100644 --- a/src/uefi/src/output.rs +++ b/src/uefi/src/output.rs @@ -1,19 +1,20 @@ // Copyright (c) 2022 RIKEN +// Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) // All rights reserved. // // This software is released under the MIT License. // http://opensource.org/licenses/mit-license.php //! -//! EFI Output Protocol +//! EFI Simple Text Output Protocol //! -use super::EfiStatus; +use crate::EfiStatus; #[repr(C)] pub struct EfiOutputProtocol { - reset: extern "C" fn(*const EfiOutputProtocol, bool) -> EfiStatus, - output: extern "C" fn(*const EfiOutputProtocol, *const u16) -> EfiStatus, + reset: extern "efiapi" fn(*const EfiOutputProtocol, bool) -> EfiStatus, + output_string: extern "efiapi" fn(*const EfiOutputProtocol, *const u16) -> EfiStatus, test_string: usize, query_mode: usize, set_mode: usize, @@ -25,15 +26,19 @@ pub struct EfiOutputProtocol { } impl EfiOutputProtocol { - /// 画面を消去してカーソルを一番先頭に持っていく + /// Clear the screen and move the cursor to top + /// + /// # Arguments + /// * `extended_verification` - should execute extended verification(this will be passed to UEFI) #[allow(dead_code)] pub fn reset(&self, extended_verification: bool) -> EfiStatus { (self.reset)(self as *const _, extended_verification) } - /// 文字列を画面に表示する + /// Print the string /// - /// ATTENTION: UTF-16であるため、日本語も扱えるがフォントを持っていないUEFIも多く正しく表示できない可能性あり + /// # Arguments + /// * `string` - string to print (Should avoid to contain non ASCII chars) pub fn output(&self, string: &str) -> EfiStatus { let mut buf = [0; 256]; let mut pointer = 0; @@ -41,19 +46,19 @@ impl EfiOutputProtocol { for x in string.encode_utf16() { if x == '\n' as u16 { buf[pointer] = 0; - let status = (self.output)(self as *const _, buf.as_ptr()); + let status = (self.output_string)(self as *const _, buf.as_ptr()); if status != EfiStatus::EfiSuccess { return status; } pointer = 0; let cr_lf = ['\r' as u16, '\n' as u16, '\0' as u16]; - let status = (self.output)(self as *const _, cr_lf.as_ptr()); + let status = (self.output_string)(self as *const _, cr_lf.as_ptr()); if status != EfiStatus::EfiSuccess { return status; } } else { if pointer >= buf.len() - 1 { - let status = (self.output)(self as *const _, buf.as_ptr()); + let status = (self.output_string)(self as *const _, buf.as_ptr()); if status != EfiStatus::EfiSuccess { return status; } @@ -64,6 +69,6 @@ impl EfiOutputProtocol { } } buf[pointer] = 0; - (self.output)(self as *const _, buf.as_ptr()) + (self.output_string)(self as *const _, buf.as_ptr()) } }