Skip to content

Commit

Permalink
Merge remote-tracking branch 'stable/linux-6.1.y' into rpi-6.1.y
Browse files Browse the repository at this point in the history
  • Loading branch information
popcornmix committed Aug 31, 2023
2 parents 9d9586d + a2943d2 commit 9b79cb0
Show file tree
Hide file tree
Showing 176 changed files with 1,584 additions and 1,137 deletions.
4 changes: 2 additions & 2 deletions Documentation/admin-guide/hw-vuln/srso.rst
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ sequence.
To ensure the safety of this mitigation, the kernel must ensure that the
safe return sequence is itself free from attacker interference. In Zen3
and Zen4, this is accomplished by creating a BTB alias between the
untraining function srso_untrain_ret_alias() and the safe return
function srso_safe_ret_alias() which results in evicting a potentially
untraining function srso_alias_untrain_ret() and the safe return
function srso_alias_safe_ret() which results in evicting a potentially
poisoned BTB entry and using that safe one for all function returns.

In older Zen1 and Zen2, this is accomplished using a reinterpretation
Expand Down
2 changes: 1 addition & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -6066,7 +6066,7 @@ S: Supported
F: Documentation/networking/devlink
F: include/net/devlink.h
F: include/uapi/linux/devlink.h
F: net/core/devlink.c
F: net/devlink/

DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT
M: Christoph Niedermaier <[email protected]>
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 47
SUBLEVEL = 50
EXTRAVERSION =
NAME = Curry Ramen

Expand Down
21 changes: 19 additions & 2 deletions arch/mips/include/asm/cpu-features.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,24 @@
#define cpu_has_4k_cache __isa_ge_or_opt(1, MIPS_CPU_4K_CACHE)
#endif
#ifndef cpu_has_octeon_cache
#define cpu_has_octeon_cache 0
#define cpu_has_octeon_cache \
({ \
int __res; \
\
switch (boot_cpu_type()) { \
case CPU_CAVIUM_OCTEON: \
case CPU_CAVIUM_OCTEON_PLUS: \
case CPU_CAVIUM_OCTEON2: \
case CPU_CAVIUM_OCTEON3: \
__res = 1; \
break; \
\
default: \
__res = 0; \
} \
\
__res; \
})
#endif
/* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
#ifndef cpu_has_fpu
Expand Down Expand Up @@ -351,7 +368,7 @@
({ \
int __res; \
\
switch (current_cpu_type()) { \
switch (boot_cpu_type()) { \
case CPU_M14KC: \
case CPU_74K: \
case CPU_1074K: \
Expand Down
28 changes: 17 additions & 11 deletions arch/riscv/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -447,24 +447,30 @@ config TOOLCHAIN_HAS_ZIHINTPAUSE
config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
def_bool y
# https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc
depends on AS_IS_GNU && AS_VERSION >= 23800
# https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=98416dbb0a62579d4a7a4a76bab51b5b52fec2cd
depends on AS_IS_GNU && AS_VERSION >= 23600
help
Newer binutils versions default to ISA spec version 20191213 which
moves some instructions from the I extension to the Zicsr and Zifencei
extensions.
Binutils-2.38 and GCC-12.1.0 bumped the default ISA spec to the newer
20191213 version, which moves some instructions from the I extension to
the Zicsr and Zifencei extensions. This requires explicitly specifying
Zicsr and Zifencei when binutils >= 2.38 or GCC >= 12.1.0. Zicsr
and Zifencei are supported in binutils from version 2.36 onwards.
To make life easier, and avoid forcing toolchains that default to a
newer ISA spec to version 2.2, relax the check to binutils >= 2.36.
For clang < 17 or GCC < 11.3.0, for which this is not possible or need
special treatment, this is dealt with in TOOLCHAIN_NEEDS_OLD_ISA_SPEC.

config TOOLCHAIN_NEEDS_OLD_ISA_SPEC
def_bool y
depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
# https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
depends on CC_IS_CLANG && CLANG_VERSION < 170000
# https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=d29f5d6ab513c52fd872f532c492e35ae9fd6671
depends on (CC_IS_CLANG && CLANG_VERSION < 170000) || (CC_IS_GCC && GCC_VERSION < 110300)
help
Certain versions of clang do not support zicsr and zifencei via -march
but newer versions of binutils require it for the reasons noted in the
help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This
option causes an older ISA spec compatible with these older versions
of clang to be passed to GAS, which has the same result as passing zicsr
and zifencei to -march.
Certain versions of clang and GCC do not support zicsr and zifencei via
-march. This option causes an older ISA spec compatible with these older
versions of clang and GCC to be passed to GAS, which has the same result
as passing zicsr and zifencei to -march.

config FPU
bool "FPU support"
Expand Down
8 changes: 7 additions & 1 deletion arch/riscv/kernel/compat_vdso/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,13 @@ compat_vdso-syms += flush_icache
COMPAT_CC := $(CC)
COMPAT_LD := $(LD)

COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
# binutils 2.35 does not support the zifencei extension, but in the ISA
# spec 20191213, G stands for IMAFD_ZICSR_ZIFENCEI.
ifdef CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32
else
COMPAT_CC_FLAGS := -march=rv32imafd -mabi=ilp32
endif
COMPAT_LD_FLAGS := -melf32lriscv

# Disable attributes, as they're useless and break the build.
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/entry-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
static __always_inline void arch_exit_to_user_mode(void)
{
mds_user_clear_cpu_buffers();
amd_clear_divider();
}
#define arch_exit_to_user_mode arch_exit_to_user_mode

Expand Down
28 changes: 17 additions & 11 deletions arch/x86/include/asm/nospec-branch.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,17 +168,17 @@
.endm

#ifdef CONFIG_CPU_UNRET_ENTRY
#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
#define CALL_UNTRAIN_RET "call entry_untrain_ret"
#else
#define CALL_ZEN_UNTRAIN_RET ""
#define CALL_UNTRAIN_RET ""
#endif

/*
* Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
* return thunk isn't mapped into the userspace tables (then again, AMD
* typically has NO_MELTDOWN).
*
* While zen_untrain_ret() doesn't clobber anything but requires stack,
* While retbleed_untrain_ret() doesn't clobber anything but requires stack,
* entry_ibpb() will clobber AX, CX, DX.
*
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
Expand All @@ -189,14 +189,9 @@
defined(CONFIG_CPU_SRSO)
ANNOTATE_UNRET_END
ALTERNATIVE_2 "", \
CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB
#endif

#ifdef CONFIG_CPU_SRSO
ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
"call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
#endif
.endm

#else /* __ASSEMBLY__ */
Expand All @@ -210,10 +205,21 @@
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
extern retpoline_thunk_t __x86_indirect_thunk_array[];

#ifdef CONFIG_RETHUNK
extern void __x86_return_thunk(void);
extern void zen_untrain_ret(void);
#else
static inline void __x86_return_thunk(void) {}
#endif

extern void retbleed_return_thunk(void);
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);

extern void retbleed_untrain_ret(void);
extern void srso_untrain_ret(void);
extern void srso_untrain_ret_alias(void);
extern void srso_alias_untrain_ret(void);

extern void entry_untrain_ret(void);
extern void entry_ibpb(void);

#ifdef CONFIG_RETPOLINE
Expand Down
1 change: 1 addition & 0 deletions arch/x86/kernel/cpu/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1295,3 +1295,4 @@ void noinstr amd_clear_divider(void)
asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
:: "a" (0), "d" (0), "r" (1));
}
EXPORT_SYMBOL_GPL(amd_clear_divider);
28 changes: 23 additions & 5 deletions arch/x86/kernel/cpu/bugs.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);

static DEFINE_MUTEX(spec_ctrl_mutex);

void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;

/* Update SPEC_CTRL MSR and its cached copy unconditionally */
static void update_spec_ctrl(u64 val)
{
Expand Down Expand Up @@ -164,8 +166,13 @@ void __init cpu_select_mitigations(void)
md_clear_select_mitigation();
srbds_select_mitigation();
l1d_flush_select_mitigation();
gds_select_mitigation();

/*
* srso_select_mitigation() depends and must run after
* retbleed_select_mitigation().
*/
srso_select_mitigation();
gds_select_mitigation();
}

/*
Expand Down Expand Up @@ -1013,6 +1020,9 @@ static void __init retbleed_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);

if (IS_ENABLED(CONFIG_RETHUNK))
x86_return_thunk = retbleed_return_thunk;

if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
pr_err(RETBLEED_UNTRAIN_MSG);
Expand Down Expand Up @@ -2388,9 +2398,10 @@ static void __init srso_select_mitigation(void)
* Zen1/2 with SMT off aren't vulnerable after the right
* IBPB microcode has been applied.
*/
if ((boot_cpu_data.x86 < 0x19) &&
(!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED)))
if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
return;
}
}

if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
Expand Down Expand Up @@ -2419,11 +2430,15 @@ static void __init srso_select_mitigation(void)
* like ftrace, static_call, etc.
*/
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);

if (boot_cpu_data.x86 == 0x19)
if (boot_cpu_data.x86 == 0x19) {
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
else
x86_return_thunk = srso_alias_return_thunk;
} else {
setup_force_cpu_cap(X86_FEATURE_SRSO);
x86_return_thunk = srso_return_thunk;
}
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
} else {
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
Expand Down Expand Up @@ -2672,6 +2687,9 @@ static ssize_t gds_show_state(char *buf)

static ssize_t srso_show_state(char *buf)
{
if (boot_cpu_has(X86_FEATURE_SRSO_NO))
return sysfs_emit(buf, "Mitigation: SMT disabled\n");

return sysfs_emit(buf, "%s%s\n",
srso_strings[srso_mitigation],
(cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
Expand Down
3 changes: 1 addition & 2 deletions arch/x86/kernel/fpu/context.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@
* FPU state for a task MUST let the rest of the kernel know that the
* FPU registers are no longer valid for this task.
*
* Either one of these invalidation functions is enough. Invalidate
* a resource you control: CPU if using the CPU for something else
* Invalidate a resource you control: CPU if using the CPU for something else
* (with preemption disabled), FPU for the current task, or a task that
* is prevented from running by the current task.
*/
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/fpu/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,7 @@ static void fpu_reset_fpregs(void)
struct fpu *fpu = &current->thread.fpu;

fpregs_lock();
fpu__drop(fpu);
__fpu_invalidate_fpregs_state(fpu);
/*
* This does not change the actual hardware registers. It just
* resets the memory image and sets TIF_NEED_FPU_LOAD so a
Expand Down
7 changes: 7 additions & 0 deletions arch/x86/kernel/fpu/xstate.c
Original file line number Diff line number Diff line change
Expand Up @@ -882,6 +882,13 @@ void __init fpu__init_system_xstate(unsigned int legacy_size)
goto out_disable;
}

/*
* CPU capabilities initialization runs before FPU init. So
* X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
* functional, set the feature bit so depending code works.
*/
setup_force_cpu_cap(X86_FEATURE_OSXSAVE);

print_xstate_offset_size();
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
fpu_kernel_cfg.max_features,
Expand Down
13 changes: 13 additions & 0 deletions arch/x86/kernel/static_call.c
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
*/
bool __static_call_fixup(void *tramp, u8 op, void *dest)
{
unsigned long addr = (unsigned long)tramp;
/*
* Not all .return_sites are a static_call trampoline (most are not).
* Check if the 3 bytes after the return are still kernel text, if not,
* then this definitely is not a trampoline and we need not worry
* further.
*
* This avoids the memcmp() below tripping over pagefaults etc..
*/
if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) &&
!kernel_text_address(addr + 7))
return false;

if (memcmp(tramp+5, tramp_ud, 3)) {
/* Not a trampoline site, not our problem. */
return false;
Expand Down
2 changes: 0 additions & 2 deletions arch/x86/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error)
{
do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
FPE_INTDIV, error_get_trap_addr(regs));

amd_clear_divider();
}

DEFINE_IDTENTRY(exc_overflow)
Expand Down
20 changes: 10 additions & 10 deletions arch/x86/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -134,18 +134,18 @@ SECTIONS
KPROBES_TEXT
ALIGN_ENTRY_TEXT_BEGIN
#ifdef CONFIG_CPU_SRSO
*(.text.__x86.rethunk_untrain)
*(.text..__x86.rethunk_untrain)
#endif

ENTRY_TEXT

#ifdef CONFIG_CPU_SRSO
/*
* See the comment above srso_untrain_ret_alias()'s
* See the comment above srso_alias_untrain_ret()'s
* definition.
*/
. = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
*(.text.__x86.rethunk_safe)
. = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
*(.text..__x86.rethunk_safe)
#endif
ALIGN_ENTRY_TEXT_END
SOFTIRQENTRY_TEXT
Expand All @@ -154,8 +154,8 @@ SECTIONS

#ifdef CONFIG_RETPOLINE
__indirect_thunk_start = .;
*(.text.__x86.indirect_thunk)
*(.text.__x86.return_thunk)
*(.text..__x86.indirect_thunk)
*(.text..__x86.return_thunk)
__indirect_thunk_end = .;
#endif
} :text =0xcccc
Expand Down Expand Up @@ -507,8 +507,8 @@ INIT_PER_CPU(irq_stack_backing_store);
"fixed_percpu_data is not at start of per-cpu area");
#endif

#ifdef CONFIG_RETHUNK
. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned");
#ifdef CONFIG_RETHUNK
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
#endif

Expand All @@ -523,8 +523,8 @@ INIT_PER_CPU(irq_stack_backing_store);
* Instead do: (A | B) - (A & B) in order to compute the XOR
* of the two function addresses:
*/
. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
(ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
(ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
"SRSO function pair won't alias");
#endif

Expand Down
3 changes: 2 additions & 1 deletion arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -4212,7 +4212,8 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* root was invalidated by a memslot update or a relevant mmu_notifier fired.
*/
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault, int mmu_seq)
struct kvm_page_fault *fault,
unsigned long mmu_seq)
{
struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa);

Expand Down
Loading

0 comments on commit 9b79cb0

Please sign in to comment.