diff --git a/arch/arm64/src/common/arm64_internal.h b/arch/arm64/src/common/arm64_internal.h index 35e51575bdb81..19c8b9eb69f64 100644 --- a/arch/arm64/src/common/arm64_internal.h +++ b/arch/arm64/src/common/arm64_internal.h @@ -270,6 +270,8 @@ EXTERN uint8_t g_idle_topstack[]; /* End+1 of heap */ ****************************************************************************/ void arm64_new_task(struct tcb_s *tak_new); +void arm64_jump_to_user(uint64_t entry, uint64_t x0, uint64_t x1, + uint64_t *regs) noreturn_function; /* Low level initialization provided by chip logic */ diff --git a/arch/arm64/src/common/arm64_pthread_start.c b/arch/arm64/src/common/arm64_pthread_start.c index db80db228b630..8132884cf8394 100644 --- a/arch/arm64/src/common/arm64_pthread_start.c +++ b/arch/arm64/src/common/arm64_pthread_start.c @@ -68,12 +68,6 @@ void up_pthread_start(pthread_trampoline_t startup, pthread_startroutine_t entrypt, pthread_addr_t arg) { - uint64_t *regs = this_task()->xcp.initregs; - - /* This must be performed atomically, the C-section ends upon user entry */ - - enter_critical_section(); - /* Set up to enter the user-space pthread start-up function in * unprivileged mode. We need: * @@ -83,24 +77,8 @@ void up_pthread_start(pthread_trampoline_t startup, * SPSR = user mode */ - regs[REG_ELR] = (uint64_t)startup; - regs[REG_X0] = (uint64_t)entrypt; - regs[REG_X1] = (uint64_t)arg; - regs[REG_SPSR] = (regs[REG_SPSR] & ~SPSR_MODE_MASK) | SPSR_MODE_EL0T; - - /* Fully unwind the kernel stack and drop to user space */ - - asm - ( - "mov x0, %0\n" /* Get context registers */ - "mov sp, x0\n" /* Stack pointer = context */ - "b arm64_exit_exception\n" - : - : "r" (regs) - : "x0", "memory" - ); - - PANIC(); + arm64_jump_to_user((uint64_t)startup, (uint64_t)entrypt, (uint64_t)arg, + this_task()->xcp.initregs); } #endif /* !CONFIG_BUILD_FLAT && __KERNEL__ && !CONFIG_DISABLE_PTHREAD */ diff --git a/arch/arm64/src/common/arm64_task_start.c b/arch/arm64/src/common/arm64_task_start.c index 5e3501eee6f52..0d20f693e5a04 100644 --- a/arch/arm64/src/common/arm64_task_start.c +++ b/arch/arm64/src/common/arm64_task_start.c @@ -65,12 +65,6 @@ void up_task_start(main_t taskentry, int argc, char *argv[]) { - uint64_t *regs = this_task()->xcp.initregs; - - /* This must be performed atomically, the C-section ends upon user entry */ - - enter_critical_section(); - /* Set up to return to the user-space _start function in * unprivileged mode. We need: * @@ -80,24 +74,8 @@ void up_task_start(main_t taskentry, int argc, char *argv[]) * SPSR = user mode */ - regs[REG_ELR] = (uint64_t)taskentry; - regs[REG_X0] = (uint64_t)argc; - regs[REG_X1] = (uint64_t)argv; - regs[REG_SPSR] = (regs[REG_SPSR] & ~SPSR_MODE_MASK) | SPSR_MODE_EL0T; - - /* Fully unwind the kernel stack and drop to user space */ - - asm - ( - "mov x0, %0\n" /* Get context registers */ - "mov sp, x0\n" /* Stack pointer = context */ - "b arm64_exit_exception\n" - : - : "r" (regs) - : "x0", "memory" - ); - - PANIC(); + arm64_jump_to_user((uint64_t)taskentry, (uint64_t)argc, (uint64_t)argv, + this_task()->xcp.initregs); } #endif /* !CONFIG_BUILD_FLAT */ diff --git a/arch/arm64/src/common/arm64_vectors.S b/arch/arm64/src/common/arm64_vectors.S index 16f75106b2095..d2d1e5c95a309 100644 --- a/arch/arm64/src/common/arm64_vectors.S +++ b/arch/arm64/src/common/arm64_vectors.S @@ -156,6 +156,36 @@ restore_new: ret +/**************************************************************************** + * Function: arm64_jump_to_user + * + * Description: + * Routine to jump to user space, called when a user process is started and + * the kernel is ready to give control to the user task in user space. + * + * arm64_jump_to_user(entry, x0, x1, regs) + * entry: process entry point + * x0: parameter 0 for process + * x1: parameter 1 for process + * regs: integer register save area to use + * + ****************************************************************************/ + +#ifndef CONFIG_BUILD_FLAT +GTEXT(arm64_jump_to_user) +SECTION_FUNC(text, arm64_jump_to_user) + msr daifset, #IRQ_DAIF_MASK + mov sp, x3 + str x0, [sp, #8 * REG_ELR] + str x1, [sp, #8 * REG_X0] + str x2, [sp, #8 * REG_X1] + mrs x0, spsr_el1 + and x0, x0, #~SPSR_MODE_MASK + #orr x0, x0, #SPSR_MODE_EL0T # EL0T=0x00, out of range for orr + str x0, [sp, #8 * REG_SPSR] + b arm64_exit_exception +#endif + /**************************************************************************** * Function: arm64_sync_exc *