Skip to content

Commit

Permalink
Automatic merge of 'next-test' into merge-test (2024-10-22 11:14)
Browse files Browse the repository at this point in the history
  • Loading branch information
mpe committed Oct 22, 2024
2 parents 79f22a4 + bb86adc commit 9e17418
Show file tree
Hide file tree
Showing 52 changed files with 2,477 additions and 379 deletions.
6 changes: 6 additions & 0 deletions arch/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1682,4 +1682,10 @@ config CC_HAS_SANE_FUNCTION_ALIGNMENT
config ARCH_NEED_CMPXCHG_1_EMU
bool

config ARCH_WANTS_PRE_LINK_VMLINUX
bool
help
An architecture can select this if it provides arch/<arch>/tools/Makefile
with .arch.vmlinux.o target to be linked into vmlinux.

endmenu
2 changes: 1 addition & 1 deletion arch/powerpc/Kbuild
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@ obj-$(CONFIG_KEXEC_CORE) += kexec/
obj-$(CONFIG_KEXEC_FILE) += purgatory/

# for cleaning
subdir- += boot
subdir- += boot tools
22 changes: 21 additions & 1 deletion arch/powerpc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -234,6 +234,8 @@ config PPC
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_ARGS if ARCH_USING_PATCHABLE_FUNCTION_ENTRY || MPROFILE_KERNEL || PPC32
select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS if PPC_FTRACE_OUT_OF_LINE || (PPC32 && ARCH_USING_PATCHABLE_FUNCTION_ENTRY)
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS if HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS
select HAVE_DYNAMIC_FTRACE_WITH_REGS if ARCH_USING_PATCHABLE_FUNCTION_ENTRY || MPROFILE_KERNEL || PPC32
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
Expand All @@ -243,7 +245,7 @@ config PPC
select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER if PPC64 || (PPC32 && CC_IS_GCC)
select HAVE_FUNCTION_TRACER if !COMPILE_TEST && (PPC64 || (PPC32 && CC_IS_GCC))
select HAVE_GCC_PLUGINS if GCC_VERSION >= 50200 # plugin support on gcc <= 5.1 is buggy on PPC
select HAVE_GENERIC_VDSO
select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC_BOOK3S_64 && SMP
Expand Down Expand Up @@ -273,6 +275,8 @@ config PPC
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
select HAVE_SAMPLE_FTRACE_DIRECT if HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_SETUP_PER_CPU_AREA if PPC64
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
Expand Down Expand Up @@ -569,6 +573,22 @@ config ARCH_USING_PATCHABLE_FUNCTION_ENTRY
def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-fpatchable-function-entry.sh $(CC) -mlittle-endian) if PPC64 && CPU_LITTLE_ENDIAN
def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-fpatchable-function-entry.sh $(CC) -mbig-endian) if PPC64 && CPU_BIG_ENDIAN

config PPC_FTRACE_OUT_OF_LINE
def_bool PPC64 && ARCH_USING_PATCHABLE_FUNCTION_ENTRY
select ARCH_WANTS_PRE_LINK_VMLINUX

config PPC_FTRACE_OUT_OF_LINE_NUM_RESERVE
int "Number of ftrace out-of-line stubs to reserve within .text"
depends on PPC_FTRACE_OUT_OF_LINE
default 32768
help
Number of stubs to reserve for use by ftrace. This space is
reserved within .text, and is distinct from any additional space
added at the end of .text before the final vmlinux link. Set to
zero to have stubs only be generated at the end of vmlinux (only
if the size of vmlinux is less than 32MB). Set to a higher value
if building vmlinux larger than 48MB.

config HOTPLUG_CPU
bool "Support for enabling/disabling CPUs"
depends on SMP && (PPC_PSERIES || \
Expand Down
8 changes: 8 additions & 0 deletions arch/powerpc/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,15 @@ CC_FLAGS_NO_FPU := $(call cc-option,-msoft-float)
ifdef CONFIG_FUNCTION_TRACER
ifdef CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
CC_FLAGS_FTRACE := -fpatchable-function-entry=1
else
ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS # PPC32 only
CC_FLAGS_FTRACE := -fpatchable-function-entry=3,1
else
CC_FLAGS_FTRACE := -fpatchable-function-entry=2
endif
endif
else
CC_FLAGS_FTRACE := -pg
ifdef CONFIG_MPROFILE_KERNEL
Expand Down
8 changes: 8 additions & 0 deletions arch/powerpc/Makefile.postlink
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ else
$(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@"
endif

quiet_cmd_ftrace_check = CHKFTRC $@
cmd_ftrace_check = $(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/ftrace_check.sh "$(NM)" "$@"

# `@true` prevents complaint when there is nothing to be done

vmlinux: FORCE
Expand All @@ -34,6 +37,11 @@ endif
ifdef CONFIG_RELOCATABLE
$(call if_changed,relocs_check)
endif
ifdef CONFIG_FUNCTION_TRACER
ifndef CONFIG_PPC64_ELF_ABI_V1
$(call cmd,ftrace_check)
endif
endif

clean:
rm -f .tmp_symbols.txt
Expand Down
7 changes: 7 additions & 0 deletions arch/powerpc/include/asm/fadump.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,11 @@ extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
int depth, void *data);
extern int fadump_reserve_mem(void);
#endif

#if defined(CONFIG_FA_DUMP) && defined(CONFIG_CMA)
void fadump_cma_init(void);
#else
static inline void fadump_cma_init(void) { }
#endif

#endif /* _ASM_POWERPC_FADUMP_H */
33 changes: 32 additions & 1 deletion arch/powerpc/include/asm/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,10 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
struct module;
struct dyn_ftrace;
struct dyn_arch_ftrace {
struct module *mod;
#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
/* pointer to the associated out-of-line stub */
unsigned long ool_stub;
#endif
};

#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
Expand Down Expand Up @@ -131,8 +134,36 @@ static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }

#ifdef CONFIG_FUNCTION_TRACER
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
struct ftrace_ool_stub {
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
struct ftrace_ops *ftrace_op;
#endif
u32 insn[4];
} __aligned(sizeof(unsigned long));
extern struct ftrace_ool_stub ftrace_ool_stub_text_end[], ftrace_ool_stub_text[],
ftrace_ool_stub_inittext[];
extern unsigned int ftrace_ool_stub_text_end_count, ftrace_ool_stub_text_count,
ftrace_ool_stub_inittext_count;
#endif
void ftrace_free_init_tramp(void);
unsigned long ftrace_call_adjust(unsigned long addr);

#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
/*
* When an ftrace registered caller is tracing a function that is also set by a
* register_ftrace_direct() call, it needs to be differentiated in the
* ftrace_caller trampoline so that the direct call can be invoked after the
* other ftrace ops. To do this, place the direct caller in the orig_gpr3 field
* of pt_regs. This tells ftrace_caller that there's a direct caller.
*/
static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
{
struct pt_regs *regs = &fregs->regs;

regs->orig_gpr3 = addr;
}
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
#else
static inline void ftrace_free_init_tramp(void) { }
static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; }
Expand Down
8 changes: 6 additions & 2 deletions arch/powerpc/include/asm/kfence.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
#define ARCH_FUNC_PREFIX "."
#endif

#ifdef CONFIG_KFENCE
extern bool kfence_early_init;
extern bool kfence_disabled;

static inline void disable_kfence(void)
Expand All @@ -27,7 +27,11 @@ static inline bool arch_kfence_init_pool(void)
{
return !kfence_disabled;
}
#endif

static inline bool kfence_early_init_enabled(void)
{
return IS_ENABLED(CONFIG_KFENCE) && kfence_early_init;
}

#ifdef CONFIG_PPC64
static inline bool kfence_protect_page(unsigned long addr, bool protect)
Expand Down
5 changes: 5 additions & 0 deletions arch/powerpc/include/asm/module.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,11 @@ struct mod_arch_specific {
#ifdef CONFIG_DYNAMIC_FTRACE
unsigned long tramp;
unsigned long tramp_regs;
#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
struct ftrace_ool_stub *ool_stubs;
unsigned int ool_stub_count;
unsigned int ool_stub_index;
#endif
#endif
};

Expand Down
14 changes: 14 additions & 0 deletions arch/powerpc/include/asm/ppc-opcode.h
Original file line number Diff line number Diff line change
Expand Up @@ -587,12 +587,26 @@
#define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr))
#define PPC_RAW_EIEIO() (0x7c0006ac)

/* bcl 20,31,$+4 */
#define PPC_RAW_BCL4() (0x429f0005)
#define PPC_RAW_BRANCH(offset) (0x48000000 | PPC_LI(offset))
#define PPC_RAW_BL(offset) (0x48000001 | PPC_LI(offset))
#define PPC_RAW_TW(t0, a, b) (0x7c000008 | ___PPC_RS(t0) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_TRAP() PPC_RAW_TW(31, 0, 0)
#define PPC_RAW_SETB(t, bfa) (0x7c000100 | ___PPC_RT(t) | ___PPC_RA((bfa) << 2))

#ifdef CONFIG_PPC32
#define PPC_RAW_STL PPC_RAW_STW
#define PPC_RAW_STLU PPC_RAW_STWU
#define PPC_RAW_LL PPC_RAW_LWZ
#define PPC_RAW_CMPLI PPC_RAW_CMPWI
#else
#define PPC_RAW_STL PPC_RAW_STD
#define PPC_RAW_STLU PPC_RAW_STDU
#define PPC_RAW_LL PPC_RAW_LD
#define PPC_RAW_CMPLI PPC_RAW_CMPDI
#endif

/* Deal with instructions that older assemblers aren't aware of */
#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
#define PPC_CP_ABORT stringify_in_c(.long PPC_RAW_CP_ABORT)
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/vdso.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ int vdso_getcpu_init(void);
#ifdef __VDSO64__
#define V_FUNCTION_BEGIN(name) \
.globl name; \
.type name,@function; \
name: \

#define V_FUNCTION_END(name) \
Expand Down
16 changes: 14 additions & 2 deletions arch/powerpc/include/asm/vdso/getrandom.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@

#ifndef __ASSEMBLY__

#include <asm/vdso_datapage.h>

static __always_inline int do_syscall_3(const unsigned long _r0, const unsigned long _r3,
const unsigned long _r4, const unsigned long _r5)
{
Expand Down Expand Up @@ -43,11 +45,21 @@ static __always_inline ssize_t getrandom_syscall(void *buffer, size_t len, unsig

static __always_inline struct vdso_rng_data *__arch_get_vdso_rng_data(void)
{
return NULL;
struct vdso_arch_data *data;

asm (
" bcl 20, 31, .+4 ;"
"0: mflr %0 ;"
" addis %0, %0, (_vdso_datapage - 0b)@ha ;"
" addi %0, %0, (_vdso_datapage - 0b)@l ;"
: "=r" (data) : : "lr"
);

return &data->rng_data;
}

ssize_t __c_kernel_getrandom(void *buffer, size_t len, unsigned int flags, void *opaque_state,
size_t opaque_len, const struct vdso_rng_data *vd);
size_t opaque_len);

#endif /* !__ASSEMBLY__ */

Expand Down
24 changes: 7 additions & 17 deletions arch/powerpc/include/asm/vdso_datapage.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,9 @@ struct vdso_arch_data {
__u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
__u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */

struct vdso_data data[CS_BASES];
struct vdso_rng_data rng_data;

struct vdso_data data[CS_BASES] __aligned(1 << CONFIG_PAGE_SHIFT);
};

#else /* CONFIG_PPC64 */
Expand All @@ -95,8 +96,9 @@ struct vdso_arch_data {
__u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */
__u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
__u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */
struct vdso_data data[CS_BASES];
struct vdso_rng_data rng_data;

struct vdso_data data[CS_BASES] __aligned(1 << CONFIG_PAGE_SHIFT);
};

#endif /* CONFIG_PPC64 */
Expand All @@ -105,29 +107,17 @@ extern struct vdso_arch_data *vdso_data;

#else /* __ASSEMBLY__ */

.macro get_datapage ptr
.macro get_datapage ptr offset=0
bcl 20, 31, .+4
999:
mflr \ptr
addis \ptr, \ptr, (_vdso_datapage - 999b)@ha
addi \ptr, \ptr, (_vdso_datapage - 999b)@l
addis \ptr, \ptr, (_vdso_datapage - 999b + \offset)@ha
addi \ptr, \ptr, (_vdso_datapage - 999b + \offset)@l
.endm

#include <asm/asm-offsets.h>
#include <asm/page.h>

.macro get_realdatapage ptr scratch
get_datapage \ptr
#ifdef CONFIG_TIME_NS
lwz \scratch, VDSO_CLOCKMODE_OFFSET(\ptr)
xoris \scratch, \scratch, VDSO_CLOCKMODE_TIMENS@h
xori \scratch, \scratch, VDSO_CLOCKMODE_TIMENS@l
cntlzw \scratch, \scratch
rlwinm \scratch, \scratch, PAGE_SHIFT - 5, 1 << PAGE_SHIFT
add \ptr, \ptr, \scratch
#endif
.endm

#endif /* __ASSEMBLY__ */

#endif /* __KERNEL__ */
Expand Down
12 changes: 11 additions & 1 deletion arch/powerpc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,6 @@ int main(void)

/* datapage offsets for use by vdso */
OFFSET(VDSO_DATA_OFFSET, vdso_arch_data, data);
OFFSET(VDSO_RNG_DATA_OFFSET, vdso_arch_data, rng_data);
OFFSET(CFG_TB_TICKS_PER_SEC, vdso_arch_data, tb_ticks_per_sec);
#ifdef CONFIG_PPC64
OFFSET(CFG_ICACHE_BLOCKSZ, vdso_arch_data, icache_block_size);
Expand Down Expand Up @@ -677,5 +676,16 @@ int main(void)
DEFINE(BPT_SIZE, BPT_SIZE);
#endif

#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
DEFINE(FTRACE_OOL_STUB_SIZE, sizeof(struct ftrace_ool_stub));
#endif

#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
OFFSET(FTRACE_OPS_FUNC, ftrace_ops, func);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
OFFSET(FTRACE_OPS_DIRECT_CALL, ftrace_ops, direct_call);
#endif
#endif

return 0;
}
Loading

0 comments on commit 9e17418

Please sign in to comment.