Skip to content

Commit

Permalink
Merge tag 'v6.12.6' into HEAD
Browse files Browse the repository at this point in the history
 * Linux 6.12.6

Signed-off-by: Yang Jeong Hun <[email protected]>
  • Loading branch information
Nevuly committed Dec 20, 2024
2 parents 0e43068 + e9d65b4 commit 4bc72b4
Show file tree
Hide file tree
Showing 182 changed files with 2,197 additions and 1,291 deletions.
6 changes: 6 additions & 0 deletions Documentation/networking/ip-sysctl.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2170,6 +2170,12 @@ nexthop_compat_mode - BOOLEAN
understands the new API, this sysctl can be disabled to achieve full
performance benefits of the new API by disabling the nexthop expansion
and extraneous notifications.

Note that as a backward-compatible mode, dumping of modern features
might be incomplete or wrong. For example, resilient groups will not be
shown as such, but rather as just a list of next hops. Also weights that
do not fit into 8 bits will show incorrectly.

Default: true (backward compat mode)

fib_notify_on_flag_change - INTEGER
Expand Down
4 changes: 3 additions & 1 deletion Documentation/power/runtime_pm.rst
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:

`int pm_runtime_resume_and_get(struct device *dev);`
- run pm_runtime_resume(dev) and if successful, increment the device's
usage counter; return the result of pm_runtime_resume
usage counter; returns 0 on success (whether or not the device's
runtime PM status was already 'active') or the error code from
pm_runtime_resume() on failure.

`int pm_request_idle(struct device *dev);`
- submit a request to execute the subsystem-level idle callback for the
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 12
SUBLEVEL = 5
SUBLEVEL = 6
EXTRAVERSION =
NAME = Baby Opossum Posse

Expand Down
55 changes: 52 additions & 3 deletions arch/arm64/kvm/sys_regs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1535,6 +1535,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
break;
case SYS_ID_AA64PFR2_EL1:
/* We only expose FPMR */
Expand Down Expand Up @@ -1724,6 +1725,13 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,

val &= ~ID_AA64PFR0_EL1_AMU_MASK;

/*
* MPAM is disabled by default as KVM also needs a set of PARTID to
* program the MPAMVPMx_EL2 PARTID remapping registers with. But some
* older kernels let the guest see the ID bit.
*/
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;

return val;
}

Expand Down Expand Up @@ -1834,6 +1842,42 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
return set_id_reg(vcpu, rd, val);
}

static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 user_val)
{
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;

/*
* Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
* in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
* guests, but didn't add trap handling. KVM doesn't support MPAM and
* always returns an UNDEF for these registers. The guest must see 0
* for this field.
*
* But KVM must also accept values from user-space that were provided
* by KVM. On CPUs that support MPAM, permit user-space to write
* the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
*/
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;

return set_id_reg(vcpu, rd, user_val);
}

static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd, u64 user_val)
{
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;

/* See set_id_aa64pfr0_el1 for comment about MPAM */
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;

return set_id_reg(vcpu, rd, user_val);
}

/*
* cpufeature ID register user accessors
*
Expand Down Expand Up @@ -2377,15 +2421,20 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ID_AA64PFR0_EL1),
.access = access_id_reg,
.get_user = get_id_reg,
.set_user = set_id_reg,
.set_user = set_id_aa64pfr0_el1,
.reset = read_sanitised_id_aa64pfr0_el1,
.val = ~(ID_AA64PFR0_EL1_AMU |
ID_AA64PFR0_EL1_MPAM |
ID_AA64PFR0_EL1_SVE |
ID_AA64PFR0_EL1_RAS |
ID_AA64PFR0_EL1_AdvSIMD |
ID_AA64PFR0_EL1_FP), },
ID_WRITABLE(ID_AA64PFR1_EL1, ~(ID_AA64PFR1_EL1_PFAR |
{ SYS_DESC(SYS_ID_AA64PFR1_EL1),
.access = access_id_reg,
.get_user = get_id_reg,
.set_user = set_id_aa64pfr1_el1,
.reset = kvm_read_sanitised_id_reg,
.val = ~(ID_AA64PFR1_EL1_PFAR |
ID_AA64PFR1_EL1_DF2 |
ID_AA64PFR1_EL1_MTEX |
ID_AA64PFR1_EL1_THE |
Expand All @@ -2397,7 +2446,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64PFR1_EL1_RES0 |
ID_AA64PFR1_EL1_MPAM_frac |
ID_AA64PFR1_EL1_RAS_frac |
ID_AA64PFR1_EL1_MTE)),
ID_AA64PFR1_EL1_MTE), },
ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR),
ID_UNALLOCATED(4,3),
ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
Expand Down
4 changes: 3 additions & 1 deletion arch/riscv/include/asm/kfence.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
else
set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));

flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
preempt_disable();
local_flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
preempt_enable();

return true;
}
Expand Down
2 changes: 1 addition & 1 deletion arch/riscv/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ static void __init init_resources(void)
static void __init parse_dtb(void)
{
/* Early scan of device tree from init memory */
if (early_init_dt_scan(dtb_early_va, __pa(dtb_early_va))) {
if (early_init_dt_scan(dtb_early_va, dtb_early_pa)) {
const char *name = of_flat_dt_get_machine_name();

if (name) {
Expand Down
7 changes: 4 additions & 3 deletions arch/riscv/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -1566,7 +1566,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
pmd_clear(pmd);
}

static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, bool is_vmemmap)
{
struct page *page = pud_page(*pud);
struct ptdesc *ptdesc = page_ptdesc(page);
Expand All @@ -1579,7 +1579,8 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
return;
}

pagetable_pmd_dtor(ptdesc);
if (!is_vmemmap)
pagetable_pmd_dtor(ptdesc);
if (PageReserved(page))
free_reserved_page(page);
else
Expand Down Expand Up @@ -1703,7 +1704,7 @@ static void __meminit remove_pud_mapping(pud_t *pud_base, unsigned long addr, un
remove_pmd_mapping(pmd_base, addr, next, is_vmemmap, altmap);

if (pgtable_l4_enabled)
free_pmd_table(pmd_base, pudp);
free_pmd_table(pmd_base, pudp, is_vmemmap);
}
}

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/events/intel/ds.c
Original file line number Diff line number Diff line change
Expand Up @@ -1468,7 +1468,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
* hence we need to drain when changing said
* size.
*/
intel_pmu_drain_large_pebs(cpuc);
intel_pmu_drain_pebs_buffer();
adaptive_pebs_record_size_update();
wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
cpuc->active_pebs_data_cfg = pebs_data_cfg;
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,8 @@ static inline unsigned long long l1tf_pfn_limit(void)
return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
}

void init_cpu_devs(void);
void get_cpu_vendor(struct cpuinfo_x86 *c);
extern void early_cpu_init(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
Expand Down
15 changes: 15 additions & 0 deletions arch/x86/include/asm/static_call.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,4 +65,19 @@

extern bool __static_call_fixup(void *tramp, u8 op, void *dest);

extern void __static_call_update_early(void *tramp, void *func);

#define static_call_update_early(name, _func) \
({ \
typeof(&STATIC_CALL_TRAMP(name)) __F = (_func); \
if (static_call_initialized) { \
__static_call_update(&STATIC_CALL_KEY(name), \
STATIC_CALL_TRAMP_ADDR(name), __F);\
} else { \
WRITE_ONCE(STATIC_CALL_KEY(name).func, _func); \
__static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\
__F); \
} \
})

#endif /* _ASM_STATIC_CALL_H */
6 changes: 3 additions & 3 deletions arch/x86/include/asm/sync_core.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
#include <asm/special_insns.h>

#ifdef CONFIG_X86_32
static inline void iret_to_self(void)
static __always_inline void iret_to_self(void)
{
asm volatile (
"pushfl\n\t"
Expand All @@ -19,7 +19,7 @@ static inline void iret_to_self(void)
: ASM_CALL_CONSTRAINT : : "memory");
}
#else
static inline void iret_to_self(void)
static __always_inline void iret_to_self(void)
{
unsigned int tmp;

Expand Down Expand Up @@ -55,7 +55,7 @@ static inline void iret_to_self(void)
* Like all of Linux's memory ordering operations, this is a
* compiler barrier as well.
*/
static inline void sync_core(void)
static __always_inline void sync_core(void)
{
/*
* The SERIALIZE instruction is the most straightforward way to
Expand Down
36 changes: 22 additions & 14 deletions arch/x86/include/asm/xen/hypercall.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,11 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/pgtable.h>
#include <linux/instrumentation.h>

#include <trace/events/xen.h>

#include <asm/alternative.h>
#include <asm/page.h>
#include <asm/smap.h>
#include <asm/nospec-branch.h>
Expand Down Expand Up @@ -86,11 +88,20 @@ struct xen_dm_op_buf;
* there aren't more than 5 arguments...)
*/

extern struct { char _entry[32]; } hypercall_page[];
void xen_hypercall_func(void);
DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func);

#define __HYPERCALL "call hypercall_page+%c[offset]"
#define __HYPERCALL_ENTRY(x) \
[offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
#ifdef MODULE
#define __ADDRESSABLE_xen_hypercall
#else
#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall)
#endif

#define __HYPERCALL \
__ADDRESSABLE_xen_hypercall \
"call __SCT__xen_hypercall"

#define __HYPERCALL_ENTRY(x) "a" (x)

#ifdef CONFIG_X86_32
#define __HYPERCALL_RETREG "eax"
Expand Down Expand Up @@ -148,7 +159,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_0ARG(); \
asm volatile (__HYPERCALL \
: __HYPERCALL_0PARAM \
: __HYPERCALL_ENTRY(name) \
: __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER0); \
(type)__res; \
})
Expand All @@ -159,7 +170,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_1ARG(a1); \
asm volatile (__HYPERCALL \
: __HYPERCALL_1PARAM \
: __HYPERCALL_ENTRY(name) \
: __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER1); \
(type)__res; \
})
Expand All @@ -170,7 +181,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_2ARG(a1, a2); \
asm volatile (__HYPERCALL \
: __HYPERCALL_2PARAM \
: __HYPERCALL_ENTRY(name) \
: __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER2); \
(type)__res; \
})
Expand All @@ -181,7 +192,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_3ARG(a1, a2, a3); \
asm volatile (__HYPERCALL \
: __HYPERCALL_3PARAM \
: __HYPERCALL_ENTRY(name) \
: __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER3); \
(type)__res; \
})
Expand All @@ -192,7 +203,7 @@ extern struct { char _entry[32]; } hypercall_page[];
__HYPERCALL_4ARG(a1, a2, a3, a4); \
asm volatile (__HYPERCALL \
: __HYPERCALL_4PARAM \
: __HYPERCALL_ENTRY(name) \
: __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
: __HYPERCALL_CLOBBER4); \
(type)__res; \
})
Expand All @@ -206,12 +217,9 @@ xen_single_call(unsigned int call,
__HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);

if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
return -EINVAL;

asm volatile(CALL_NOSPEC
asm volatile(__HYPERCALL
: __HYPERCALL_5PARAM
: [thunk_target] "a" (&hypercall_page[call])
: __HYPERCALL_ENTRY(call)
: __HYPERCALL_CLOBBER5);

return (long)__res;
Expand Down
5 changes: 0 additions & 5 deletions arch/x86/kernel/callthunks.c
Original file line number Diff line number Diff line change
Expand Up @@ -142,11 +142,6 @@ static bool skip_addr(void *dest)
if (dest >= (void *)relocate_kernel &&
dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
return true;
#endif
#ifdef CONFIG_XEN
if (dest >= (void *)hypercall_page &&
dest < (void*)hypercall_page + PAGE_SIZE)
return true;
#endif
return false;
}
Expand Down
Loading

0 comments on commit 4bc72b4

Please sign in to comment.