Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Elfloader: NVIDIA Jetson Orin support #190

Open
wants to merge 19 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmake-tool/helpers/application_settings.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ function(ApplyData61ElfLoaderSettings kernel_platform kernel_sel4_arch)
binary_list
"tx1;hikey;odroidc2;odroidc4;imx8mq-evk;imx8mm-evk;hifive;tqma8xqp1gb;bcm2711;rocketchip"
)
set(efi_list "tk1;rockpro64;quartz64")
set(efi_list "tk1;rockpro64;quartz64;orin")
set(uimage_list "tx2;am335x")
if(
${kernel_platform} IN_LIST efi_list
Expand Down
20 changes: 11 additions & 9 deletions elfloader-tool/include/arch-arm/64/mode/assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,15 @@
#define MT_NORMAL_WT 5
#define MAIR(_attr, _mt) ((_attr) << ((_mt) * 8))

.macro disable_mmu sctlr tmp
__disable_mmu \sctlr, \tmp
ic ialluis
dsb sy
isb
.endm

.macro enable_mmu sctlr tmp
dsb sy
mrs \tmp, \sctlr
orr \tmp, \tmp, #(1 << 0)
orr \tmp, \tmp, #(1 << 2)
Expand All @@ -76,17 +84,11 @@
isb
.endm

.macro disable_mmu sctlr tmp
mrs \tmp, \sctlr
bic \tmp, \tmp, #(1 << 0)
bic \tmp, \tmp, #(1 << 2)
bic \tmp, \tmp, #(1 << 12)
msr \sctlr, \tmp
.macro __disable_mmu sctlr tmp
dsb sy
isb
.endm

.macro disable_id_cache sctlr tmp
mrs \tmp, \sctlr
bic \tmp, \tmp, #(1 << 0)
bic \tmp, \tmp, #(1 << 2)
bic \tmp, \tmp, #(1 << 12)
msr \sctlr, \tmp
Expand Down
68 changes: 35 additions & 33 deletions elfloader-tool/src/arch-arm/armv/armv8-a/64/mmu-hyp.S
Original file line number Diff line number Diff line change
Expand Up @@ -21,31 +21,42 @@
.extern invalidate_icache
.extern _boot_pgd_down

BEGIN_FUNC(disable_caches_hyp)
stp x29, x30, [sp, #-16]!
mov x29, sp
bl flush_dcache
disable_id_cache sctlr_el2, x9
ldp x29, x30, [sp], #16
BEGIN_FUNC(disable_mmu_caches_hyp)
/* Assume D-cache already cleaned to PoC */
disable_mmu sctlr_el2, x9
ret
END_FUNC(disable_mmu_caches_hyp)

BEGIN_FUNC(clean_dcache_by_range)
/* Ordering needed for strongly-ordered mem, not needed for NORMAL mem.
* See ARM DDI 0487I.a, page D7-5063.
*/
dmb sy

/* Extract minimum DCache CL size into x3 and CL mask into x4 */
mrs x2, ctr_el0
ubfx x4, x2, #16, #4
mov x3, #4
lsl x3, x3, x4
sub x4, x3, #1

/* Apply mask to start address before entering the loop */
bic x4, x0, x4
clean_dcache_by_range_loop:
dc cvac, x4
add x4, x4, x3
cmp x4, x1
b.lt clean_dcache_by_range_loop
dsb sy
isb
ret
END_FUNC(disable_caches_hyp)
END_FUNC(clean_dcache_by_range)

BEGIN_FUNC(leave_hyp)
/* We call nested functions, follow the ABI. */
stp x29, x30, [sp, #-16]!
mov x29, sp

bl flush_dcache

/* Ensure I-cache, D-cache and mmu are disabled for EL2/Stage1 */
disable_mmu sctlr_el2, x9

/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively are discarded.
*/
bl invalidate_icache

/* Ensure I-cache, D-cache and mmu are disabled for EL1/Stage2 */
mov x9, #(1 << 31)
msr hcr_el2, x9
Expand All @@ -71,15 +82,6 @@ BEGIN_FUNC(leave_hyp)
END_FUNC(leave_hyp)

BEGIN_FUNC(arm_enable_hyp_mmu)
stp x29, x30, [sp, #-16]!
mov x29, sp

bl flush_dcache

disable_mmu sctlr_el2, x8

bl invalidate_icache

/*
* DEVICE_nGnRnE 000 00000000
* DEVICE_nGnRE 001 00000100
Expand Down Expand Up @@ -107,13 +109,13 @@ BEGIN_FUNC(arm_enable_hyp_mmu)
dsb ish
isb

enable_mmu sctlr_el2, x8
/* Invalidate icache */
ic ialluis
dsb ish
dsb sy
isb
tlbi alle2is
dsb ish
isb
ldp x29, x30, [sp], #16

enable_mmu sctlr_el2, x8
/* NOTE: enable_mmu already contains an isb after enabling. */

ret
END_FUNC(arm_enable_hyp_mmu)
11 changes: 0 additions & 11 deletions elfloader-tool/src/arch-arm/armv/armv8-a/64/mmu.S
Original file line number Diff line number Diff line change
Expand Up @@ -43,17 +43,6 @@ BEGIN_FUNC(arm_enable_mmu)
stp x29, x30, [sp, #-16]!
mov x29, sp

bl flush_dcache

/* Ensure I-cache, D-cache and mmu are disabled for EL1/Stage1 */
disable_mmu sctlr_el1 , x8

/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively are discarded.
*/
bl invalidate_icache

/*
* DEVICE_nGnRnE 000 00000000
* DEVICE_nGnRE 001 00000100
Expand Down
6 changes: 6 additions & 0 deletions elfloader-tool/src/arch-arm/armv/armv8-a/64/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,12 @@ void core_entry(uint64_t sp)

int is_core_up(int i)
{
/* Secondary core may be booted with caches disabled,
* this value might be written in memory, invalidate our
* copy and get a new one. */
asm volatile("dc ivac, %0\n\t"
"dmb nsh\n\t"
:: "r"(&core_up[i]));
return core_up[i] == i;
}

Expand Down
9 changes: 8 additions & 1 deletion elfloader-tool/src/arch-arm/drivers/smp-psci.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
*
* SPDX-License-Identifier: GPL-2.0-only
*/
#include <autoconf.h>
#include <elfloader_common.h>
#include <devices_gen.h>
#include <drivers/common.h>
Expand All @@ -24,7 +25,13 @@ static int smp_psci_cpu_on(UNUSED struct elfloader_device *dev,
}
secondary_data.entry = entry;
secondary_data.stack = stack;
dmb();
#if defined(CONFIG_ARCH_AARCH64)
/* If the secondary core caches are off, need to make sure that the info
* is clean to the physical memory so that the sedcondary cores can read it.
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Typo: "sedcondary".

*/
asm volatile("dc cvac, %0" :: "r"(&secondary_data));
dsb();
#endif
int ret = psci_cpu_on(cpu->cpu_id, (unsigned long)&secondary_startup, 0);
if (ret != PSCI_SUCCESS) {
printf("Failed to bring up core 0x%x with error %d\n", cpu->cpu_id, ret);
Expand Down
32 changes: 28 additions & 4 deletions elfloader-tool/src/arch-arm/smp_boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,11 @@ void non_boot_main(void)
#endif
/* Spin until the first CPU has finished initialisation. */
while (!non_boot_lock) {
#ifndef CONFIG_ARCH_AARCH64
#ifdef CONFIG_ARCH_AARCH64
/* The compiler may optimize this loop away, add a dsb()
* to force a reload. */
dsb();
Comment on lines +38 to +40
Copy link

@Indanz Indanz Feb 26, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This also applies to non-aarch64, so this doesn't look like the right solution. Using standard atomics may be better and more portable here.

Edit: The problem may be masked by cpu_idle() in 32-bit, but this still feels wrong.

#else
cpu_idle();
#endif
}
Expand All @@ -45,12 +49,19 @@ void non_boot_main(void)
abort();
}

#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
if (is_hyp_mode()) {
extern void leave_hyp(void);
extern void disable_mmu_caches_hyp(void);
#ifdef CONFIG_ARCH_AARCH64
/* Disable the MMU and cacheability unconditionally on ARM64.
* The 32 bit ARM platforms do not expect the MMU to be turned
* off, so we leave them alone. */
disable_mmu_caches_hyp();
#endif
#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
leave_hyp();
}
#endif
}
/* Enable the MMU, and enter the kernel. */
if (is_hyp_mode()) {
arm_enable_hyp_mmu();
Expand Down Expand Up @@ -117,7 +128,13 @@ WEAK void init_cpus(void)
abort();
}

while (!is_core_up(num_cpus));
while (!is_core_up(num_cpus)) {
#if defined(CONFIG_ARCH_AARCH64)
/* The compiler may optimize this loop away, add a dsb()
* to force a reload. */
dsb();
#endif
}
printf("Core %d is up with logic id %d\n", elfloader_cpus[i].cpu_id, num_cpus);
num_cpus++;
}
Expand All @@ -134,6 +151,13 @@ void smp_boot(void)
arm_disable_dcaches();
#endif
init_cpus();
#if defined(CONFIG_ARCH_AARCH64)
dsb();
non_boot_lock = 1;
/* Secondary CPUs may still run with MMU & caches off. Force the update to be visible. */
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The same is true for 32-bit, right?

asm volatile("dc civac, %0\n\t" :: "r"(&non_boot_lock) : "memory");;
#else
non_boot_lock = 1;
#endif
}
#endif /* CONFIG_MAX_NUM_NODES */
87 changes: 87 additions & 0 deletions elfloader-tool/src/arch-arm/sys_boot.c
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,92 @@ void main(UNUSED void *arg)
abort();
}

/* ARMv8 64-bit specific implementation of continue_boot() */
#if defined(CONFIG_ARCH_AARCH64)
void continue_boot(int was_relocated)
{
if (was_relocated) {
printf("ELF loader relocated, continuing boot...\n");
}

/*
* If we were relocated, we need to re-initialise the
* driver model so all its pointers are set up properly.
*/
if (was_relocated) {
initialise_devices();
}

/* If in EL2, disable MMU and I/D cacheability unconditionally */
if (is_hyp_mode()) {
extern void disable_mmu_caches_hyp(void);
extern void clean_dcache_by_range(paddr_t start, paddr_t end);

paddr_t start = kernel_info.phys_region_start;
paddr_t end = kernel_info.phys_region_end;
clean_dcache_by_range(start, end);
start = (paddr_t)user_info.phys_region_start;
end = (paddr_t)user_info.phys_region_end;
clean_dcache_by_range(start, end);
start = (paddr_t)_text;
end = (paddr_t)_end;
clean_dcache_by_range(start, end);
if (dtb) {
start = (paddr_t)dtb;
end = start + dtb_size;
clean_dcache_by_range(start, end);
}

#if defined(CONFIG_ARCH_AARCH64)
/* Disable the MMU and cacheability unconditionally on ARM64.
* The 32 bit ARM platforms do not expect the MMU to be turned
* off, so we leave them alone. */
disable_mmu_caches_hyp();
#endif

#if (defined(CONFIG_ARCH_ARM_V7A) || defined(CONFIG_ARCH_ARM_V8A)) && !defined(CONFIG_ARM_HYPERVISOR_SUPPORT)
extern void leave_hyp(void);
/* Switch to EL1, assume EL2 MMU already disabled for ARMv8. */
leave_hyp();
#endif
/* Setup MMU. */
if (is_hyp_mode()) {
init_hyp_boot_vspace(&kernel_info);
} else {
/* If we are not in HYP mode, we enable the SV MMU and paging
* just in case the kernel does not support hyp mode. */
init_boot_vspace(&kernel_info);
}

#if CONFIG_MAX_NUM_NODES > 1
smp_boot();
#endif /* CONFIG_MAX_NUM_NODES */

if (is_hyp_mode()) {
printf("Enabling hypervisor MMU and paging\n");
arm_enable_hyp_mmu();
} else {
printf("Enabling MMU and paging\n");
arm_enable_mmu();
}

/* Enter kernel. The UART may no longer be accessible here. */
if ((uintptr_t)uart_get_mmio() < kernel_info.virt_region_start) {
printf("Jumping to kernel-image entry point...\n\n");
}

((init_arm_kernel_t)kernel_info.virt_entry)(user_info.phys_region_start,
user_info.phys_region_end,
user_info.phys_virt_offset,
user_info.virt_entry,
(word_t)dtb,
dtb_size);

/* We should never get here. */
printf("ERROR: Kernel returned back to the ELF Loader\n");
abort();
}
#else
void continue_boot(int was_relocated)
{
if (was_relocated) {
Expand Down Expand Up @@ -232,3 +318,4 @@ void continue_boot(int was_relocated)
printf("ERROR: Kernel returned back to the ELF Loader\n");
abort();
}
#endif
1 change: 1 addition & 0 deletions elfloader-tool/src/binaries/efi/gnuefi/elf_aarch64_efi.lds
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ SECTIONS
.dynstr : { *(.dynstr) }
. = ALIGN(4096);
.note.gnu.build-id : { *(.note.gnu.build-id) }
_end = .;
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should be part of commit dea6924 I think.

/DISCARD/ :
{
*(.rel.reloc)
Expand Down
Loading