Skip to content

Commit

Permalink
head.S: make state on entry to the kernel similar to that used by TXT
Browse files Browse the repository at this point in the history
Intel TXT provides physical address of kernel entry point in %ebx. This
is due to the fact that %ss is undefined there, so the kernel can't use
the call/pop pair to obtain its load address. Even though it is possible
on AMD with this implementation of SKL, keep things consistent and move
the entry point address to %ebx as well.

%ebp points to base of SLB, the kernel can use it to obtain offset to
SLRT, and through it, bootloader context and payload argument saved
within.

Note that this commit (temporarily) breaks booting to non-Linux
payloads.

Signed-off-by: Krystian Hebel <[email protected]>
  • Loading branch information
krystian-hebel committed Nov 7, 2024
1 parent 1cc0a75 commit 3f17a05
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 49 deletions.
43 changes: 15 additions & 28 deletions head.S
Original file line number Diff line number Diff line change
Expand Up @@ -190,16 +190,23 @@ GLOBAL(_entry)
call skl_main

/*
* skl_main() is magic. It returns two pointers by register:
* skl_main() returns a pointer to protected mode kernel entry in %eax. It
* could also return the argument for the kernel (depending on kernel type,
* this could be either Linux boot parameters or MBI for Multiboot2), but
* for parity with what Intel TXT does, this isn't the case.
*
* %eax - protected mode kernel entry
* %edx - argument for kernel entry point, depends on type of kernel
* Intel TXT provides physical address of kernel entry point in %ebx. This
* is due to the fact that %ss is undefined there, so the kernel can't use
* the call/pop pair to obtain its load address. Even though it is possible
* on AMD with this implementation of SKL, keep things consistent and move
* the entry point address to %ebx as well.
*
* We stash the entry point in %edi and the argument in %esi to protect
* them from clobbering during teardown.
* %ebp points to base of SLB, it was set by the first instruction on SKL
* entry and preserved across call to C. This is how the kernel can obtain
* offset to SLRT, and through it, bootloader context and payload argument
* saved within.
*/
mov %eax, %edi
mov %edx, %esi
mov %eax, %ebx

#ifdef __x86_64__

Expand Down Expand Up @@ -232,28 +239,8 @@ GLOBAL(_entry)
push $0
popf

/*
* Various kernels use different boot protocols, SKL supports some of
* the common ones. Because of that, we are saving the same argument in
* every possible place that any of the supported kernel types may look
* for it. As of now, supported protocols include:
*
* - Linux x86 protected mode entry, not UEFI
* - Multiboot2, also not UEFI
* - simple payload started as 'entry(u32 arg)' function call. As we
* don't expect it to return, __cdecl, __stdcall and __pascal calling
* conventions work the same.
*/
/* Linux expects Zero Page address in %esi, it is already there */
/* Multiboot2 expects MBI address in %ebx and magic number in %eax */
mov %esi, %ebx
mov $MULTIBOOT2_BOOTLOADER_MAGIC, %eax
/* Simple payload expects argument on stack followed by return address */
push %esi
push $0

/* All set, jump to the kernel */
jmp *%edi
jmp *%ebx
ENDFUNC(_entry)

.section .rodata, "a", @progbits
Expand Down
29 changes: 8 additions & 21 deletions main.c
Original file line number Diff line number Diff line change
Expand Up @@ -231,27 +231,15 @@ static void dma_protection_setup(void)
#endif
}

/*
* Function return ABI magic:
*
* By returning a simple object of two pointers, the SYSV ABI splits it across
* %rax and %rdx rather than spilling it to the stack. This is far more
* convenient for our asm caller to deal with.
*/
typedef struct {
void *dlme_entry; /* %eax */
void *dlme_arg; /* %edx */
} asm_return_t;

asm_return_t skl_main(void)
void *skl_main(void)
{
struct tpm *tpm;
struct slr_entry_dl_info *dl_info;
asm_return_t ret;
void *dlme_entry;
u32 entry_offset;

/*
* Now in 64b mode, paging is setup. This is the launching point. We can
* Now in 64b mode, paging is set up. This is the launching point. We can
* now do what we want. At the end, trampoline to the PM entry point which
* will include the Secure Launch stub.
*/
Expand Down Expand Up @@ -298,20 +286,19 @@ asm_return_t skl_main(void)
tpm_relinquish_locality(tpm);
free_tpm(tpm);

ret.dlme_entry = _p(dl_info->dlme_base + dl_info->dlme_entry);
ret.dlme_arg = _p(dl_info->bl_context.context);
dlme_entry = _p(dl_info->dlme_base + dl_info->dlme_entry);

/* End of the line, off to the protected mode entry into the kernel */
print("dlme_entry:\n");
hexdump(ret.dlme_entry, 0x100);
print("dlme_arg:\n");
hexdump(ret.dlme_arg, 0x280);
hexdump(dlme_entry, 0x100);
print("bl_context:\n");
hexdump(_p(dl_info->bl_context.context), 0x280);
print("skl_base:\n");
hexdump(_start, 0x100);
print("bootloader_data:\n");
hexdump(&bootloader_data, bootloader_data.size);

print("skl_main() is about to exit\n");

return ret;
return dlme_entry;
}

0 comments on commit 3f17a05

Please sign in to comment.