Merge branch 'topic/livepatch' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux into for-4.7/livepatching-ppc64le
Pull livepatching support for ppc64 architecture from Michael Ellerman. Signed-off-by: Jiri Kosina <jkosina@suse.cz>
This commit is contained in:
commit
4d4fb97a62
25 changed files with 783 additions and 145 deletions
|
@ -94,6 +94,7 @@ config PPC
|
|||
select OF_RESERVED_MEM
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
|
@ -158,6 +159,7 @@ config PPC
|
|||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||
|
||||
config GENERIC_CSUM
|
||||
def_bool CPU_LITTLE_ENDIAN
|
||||
|
@ -373,6 +375,24 @@ config PPC_TRANSACTIONAL_MEM
|
|||
---help---
|
||||
Support user-mode Transactional Memory on POWERPC.
|
||||
|
||||
config DISABLE_MPROFILE_KERNEL
|
||||
bool "Disable use of mprofile-kernel for kernel tracing"
|
||||
depends on PPC64 && CPU_LITTLE_ENDIAN
|
||||
default y
|
||||
help
|
||||
Selecting this options disables use of the mprofile-kernel ABI for
|
||||
kernel tracing. That will cause options such as live patching
|
||||
(CONFIG_LIVEPATCH) which depend on CONFIG_DYNAMIC_FTRACE_WITH_REGS to
|
||||
be disabled also.
|
||||
|
||||
If you have a toolchain which supports mprofile-kernel, then you can
|
||||
enable this. Otherwise leave it disabled. If you're not sure, say
|
||||
"N".
|
||||
|
||||
config MPROFILE_KERNEL
|
||||
depends on PPC64 && CPU_LITTLE_ENDIAN
|
||||
def_bool !DISABLE_MPROFILE_KERNEL
|
||||
|
||||
config IOMMU_HELPER
|
||||
def_bool PPC64
|
||||
|
||||
|
@ -1087,3 +1107,5 @@ config PPC_LIB_RHEAP
|
|||
bool
|
||||
|
||||
source "arch/powerpc/kvm/Kconfig"
|
||||
|
||||
source "kernel/livepatch/Kconfig"
|
||||
|
|
|
@ -133,6 +133,21 @@ else
|
|||
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
|
||||
endif
|
||||
|
||||
ifdef CONFIG_MPROFILE_KERNEL
|
||||
ifeq ($(shell $(srctree)/arch/powerpc/scripts/gcc-check-mprofile-kernel.sh $(CC) -I$(srctree)/include -D__KERNEL__),OK)
|
||||
CC_FLAGS_FTRACE := -pg -mprofile-kernel
|
||||
KBUILD_CPPFLAGS += -DCC_USING_MPROFILE_KERNEL
|
||||
else
|
||||
# If the user asked for mprofile-kernel but the toolchain doesn't
|
||||
# support it, emit a warning and deliberately break the build later
|
||||
# with mprofile-kernel-not-supported. We would prefer to make this an
|
||||
# error right here, but then the user would never be able to run
|
||||
# oldconfig to change their configuration.
|
||||
$(warning Compiler does not support mprofile-kernel, set CONFIG_DISABLE_MPROFILE_KERNEL)
|
||||
CC_FLAGS_FTRACE := -mprofile-kernel-not-supported
|
||||
endif
|
||||
endif
|
||||
|
||||
CFLAGS-$(CONFIG_CELL_CPU) += $(call cc-option,-mcpu=cell)
|
||||
CFLAGS-$(CONFIG_POWER4_CPU) += $(call cc-option,-mcpu=power4)
|
||||
CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5)
|
||||
|
|
|
@ -99,4 +99,25 @@ static inline unsigned long ppc_global_function_entry(void *func)
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/*
|
||||
* Some instruction encodings commonly used in dynamic ftracing
|
||||
* and function live patching.
|
||||
*/
|
||||
|
||||
/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
|
||||
#if defined(_CALL_ELF) && _CALL_ELF == 2
|
||||
#define R2_STACK_OFFSET 24
|
||||
#else
|
||||
#define R2_STACK_OFFSET 40
|
||||
#endif
|
||||
|
||||
#define PPC_INST_LD_TOC (PPC_INST_LD | ___PPC_RT(__REG_R2) | \
|
||||
___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
|
||||
|
||||
/* usually preceded by a mflr r0 */
|
||||
#define PPC_INST_STD_LR (PPC_INST_STD | ___PPC_RS(__REG_R0) | \
|
||||
___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#endif /* _ASM_POWERPC_CODE_PATCHING_H */
|
||||
|
|
|
@ -46,6 +46,8 @@
|
|||
extern void _mcount(void);
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
# define FTRACE_ADDR ((unsigned long)ftrace_caller)
|
||||
# define FTRACE_REGS_ADDR FTRACE_ADDR
|
||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
{
|
||||
/* reloction of mcount call site is the same as the address */
|
||||
|
@ -58,6 +60,9 @@ struct dyn_arch_ftrace {
|
|||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__)
|
||||
|
|
62
arch/powerpc/include/asm/livepatch.h
Normal file
62
arch/powerpc/include/asm/livepatch.h
Normal file
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* livepatch.h - powerpc-specific Kernel Live Patching Core
|
||||
*
|
||||
* Copyright (C) 2015-2016, SUSE, IBM Corp.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef _ASM_POWERPC_LIVEPATCH_H
|
||||
#define _ASM_POWERPC_LIVEPATCH_H
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
static inline int klp_check_compiler_support(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int klp_write_module_reloc(struct module *mod, unsigned long
|
||||
type, unsigned long loc, unsigned long value)
|
||||
{
|
||||
/* This requires infrastructure changes; we need the loadinfos. */
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
|
||||
{
|
||||
regs->nip = ip;
|
||||
}
|
||||
|
||||
#define klp_get_ftrace_location klp_get_ftrace_location
|
||||
static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
|
||||
{
|
||||
/*
|
||||
* Live patch works only with -mprofile-kernel on PPC. In this case,
|
||||
* the ftrace location is always within the first 16 bytes.
|
||||
*/
|
||||
return ftrace_location_range(faddr, faddr + 16);
|
||||
}
|
||||
|
||||
static inline void klp_init_thread_info(struct thread_info *ti)
|
||||
{
|
||||
/* + 1 to account for STACK_END_MAGIC */
|
||||
ti->livepatch_sp = (unsigned long *)(ti + 1) + 1;
|
||||
}
|
||||
#else
|
||||
static void klp_init_thread_info(struct thread_info *ti) { }
|
||||
#endif /* CONFIG_LIVEPATCH */
|
||||
|
||||
#endif /* _ASM_POWERPC_LIVEPATCH_H */
|
|
@ -78,10 +78,18 @@ struct mod_arch_specific {
|
|||
# endif /* MODULE */
|
||||
#endif
|
||||
|
||||
bool is_module_trampoline(u32 *insns);
|
||||
int module_trampoline_target(struct module *mod, u32 *trampoline,
|
||||
int module_trampoline_target(struct module *mod, unsigned long trampoline,
|
||||
unsigned long *target);
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs);
|
||||
#else
|
||||
static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct exception_table_entry;
|
||||
void sort_ex_table(struct exception_table_entry *start,
|
||||
struct exception_table_entry *finish);
|
||||
|
|
|
@ -22,6 +22,18 @@ static inline int in_kernel_text(unsigned long addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long kernel_toc_addr(void)
|
||||
{
|
||||
/* Defined by the linker, see vmlinux.lds.S */
|
||||
extern unsigned long __toc_start;
|
||||
|
||||
/*
|
||||
* The TOC register (r2) points 32kB into the TOC, so that 64kB of
|
||||
* the TOC can be addressed using a single machine instruction.
|
||||
*/
|
||||
return (unsigned long)(&__toc_start) + 0x8000UL;
|
||||
}
|
||||
|
||||
static inline int overlaps_interrupt_vector_text(unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
|
|
|
@ -43,7 +43,9 @@ struct thread_info {
|
|||
int preempt_count; /* 0 => preemptable,
|
||||
<0 => BUG */
|
||||
unsigned long local_flags; /* private flags for thread */
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
unsigned long *livepatch_sp;
|
||||
#endif
|
||||
/* low level flags - has atomic operations done on it */
|
||||
unsigned long flags ____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
|
|
@ -16,14 +16,14 @@ endif
|
|||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Do not trace early boot code
|
||||
CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
|
||||
CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
|
||||
CFLAGS_REMOVE_btext.o = -pg -mno-sched-epilog
|
||||
CFLAGS_REMOVE_prom.o = -pg -mno-sched-epilog
|
||||
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_prom_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_btext.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_prom.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
|
||||
# do not trace tracer code
|
||||
CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
|
||||
CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
|
||||
# timers used by tracing
|
||||
CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
|
||||
CFLAGS_REMOVE_time.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
obj-y := cputable.o ptrace.o syscalls.o \
|
||||
|
|
|
@ -86,6 +86,10 @@ int main(void)
|
|||
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
|
||||
#endif
|
||||
|
||||
DEFINE(KSP, offsetof(struct thread_struct, ksp));
|
||||
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
|
||||
#ifdef CONFIG_BOOKE
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/magic.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -1143,8 +1144,12 @@ _GLOBAL(enter_prom)
|
|||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
_GLOBAL(mcount)
|
||||
_GLOBAL(_mcount)
|
||||
blr
|
||||
mflr r12
|
||||
mtctr r12
|
||||
mtlr r0
|
||||
bctr
|
||||
|
||||
#ifndef CC_USING_MPROFILE_KERNEL
|
||||
_GLOBAL_TOC(ftrace_caller)
|
||||
/* Taken from output of objdump from lib64/glibc */
|
||||
mflr r3
|
||||
|
@ -1166,8 +1171,213 @@ _GLOBAL(ftrace_graph_stub)
|
|||
ld r0, 128(r1)
|
||||
mtlr r0
|
||||
addi r1, r1, 112
|
||||
|
||||
#else /* CC_USING_MPROFILE_KERNEL */
|
||||
/*
|
||||
*
|
||||
* ftrace_caller() is the function that replaces _mcount() when ftrace is
|
||||
* active.
|
||||
*
|
||||
* We arrive here after a function A calls function B, and we are the trace
|
||||
* function for B. When we enter r1 points to A's stack frame, B has not yet
|
||||
* had a chance to allocate one yet.
|
||||
*
|
||||
* Additionally r2 may point either to the TOC for A, or B, depending on
|
||||
* whether B did a TOC setup sequence before calling us.
|
||||
*
|
||||
* On entry the LR points back to the _mcount() call site, and r0 holds the
|
||||
* saved LR as it was on entry to B, ie. the original return address at the
|
||||
* call site in A.
|
||||
*
|
||||
* Our job is to save the register state into a struct pt_regs (on the stack)
|
||||
* and then arrange for the ftrace function to be called.
|
||||
*/
|
||||
_GLOBAL(ftrace_caller)
|
||||
/* Save the original return address in A's stack frame */
|
||||
std r0,LRSAVE(r1)
|
||||
|
||||
/* Create our stack frame + pt_regs */
|
||||
stdu r1,-SWITCH_FRAME_SIZE(r1)
|
||||
|
||||
/* Save all gprs to pt_regs */
|
||||
SAVE_8GPRS(0,r1)
|
||||
SAVE_8GPRS(8,r1)
|
||||
SAVE_8GPRS(16,r1)
|
||||
SAVE_8GPRS(24,r1)
|
||||
|
||||
/* Load special regs for save below */
|
||||
mfmsr r8
|
||||
mfctr r9
|
||||
mfxer r10
|
||||
mfcr r11
|
||||
|
||||
/* Get the _mcount() call site out of LR */
|
||||
mflr r7
|
||||
/* Save it as pt_regs->nip & pt_regs->link */
|
||||
std r7, _NIP(r1)
|
||||
std r7, _LINK(r1)
|
||||
|
||||
/* Save callee's TOC in the ABI compliant location */
|
||||
std r2, 24(r1)
|
||||
ld r2,PACATOC(r13) /* get kernel TOC in r2 */
|
||||
|
||||
addis r3,r2,function_trace_op@toc@ha
|
||||
addi r3,r3,function_trace_op@toc@l
|
||||
ld r5,0(r3)
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
mr r14,r7 /* remember old NIP */
|
||||
#endif
|
||||
/* Calculate ip from nip-4 into r3 for call below */
|
||||
subi r3, r7, MCOUNT_INSN_SIZE
|
||||
|
||||
/* Put the original return address in r4 as parent_ip */
|
||||
mr r4, r0
|
||||
|
||||
/* Save special regs */
|
||||
std r8, _MSR(r1)
|
||||
std r9, _CTR(r1)
|
||||
std r10, _XER(r1)
|
||||
std r11, _CCR(r1)
|
||||
|
||||
/* Load &pt_regs in r6 for call below */
|
||||
addi r6, r1 ,STACK_FRAME_OVERHEAD
|
||||
|
||||
/* ftrace_call(r3, r4, r5, r6) */
|
||||
.globl ftrace_call
|
||||
ftrace_call:
|
||||
bl ftrace_stub
|
||||
nop
|
||||
|
||||
/* Load ctr with the possibly modified NIP */
|
||||
ld r3, _NIP(r1)
|
||||
mtctr r3
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
cmpd r14,r3 /* has NIP been altered? */
|
||||
#endif
|
||||
|
||||
/* Restore gprs */
|
||||
REST_8GPRS(0,r1)
|
||||
REST_8GPRS(8,r1)
|
||||
REST_8GPRS(16,r1)
|
||||
REST_8GPRS(24,r1)
|
||||
|
||||
/* Restore callee's TOC */
|
||||
ld r2, 24(r1)
|
||||
|
||||
/* Pop our stack frame */
|
||||
addi r1, r1, SWITCH_FRAME_SIZE
|
||||
|
||||
/* Restore original LR for return to B */
|
||||
ld r0, LRSAVE(r1)
|
||||
mtlr r0
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
/* Based on the cmpd above, if the NIP was altered handle livepatch */
|
||||
bne- livepatch_handler
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
stdu r1, -112(r1)
|
||||
.globl ftrace_graph_call
|
||||
ftrace_graph_call:
|
||||
b ftrace_graph_stub
|
||||
_GLOBAL(ftrace_graph_stub)
|
||||
addi r1, r1, 112
|
||||
#endif
|
||||
|
||||
ld r0,LRSAVE(r1) /* restore callee's lr at _mcount site */
|
||||
mtlr r0
|
||||
bctr /* jump after _mcount site */
|
||||
#endif /* CC_USING_MPROFILE_KERNEL */
|
||||
|
||||
_GLOBAL(ftrace_stub)
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_LIVEPATCH
|
||||
/*
|
||||
* This function runs in the mcount context, between two functions. As
|
||||
* such it can only clobber registers which are volatile and used in
|
||||
* function linkage.
|
||||
*
|
||||
* We get here when a function A, calls another function B, but B has
|
||||
* been live patched with a new function C.
|
||||
*
|
||||
* On entry:
|
||||
* - we have no stack frame and can not allocate one
|
||||
* - LR points back to the original caller (in A)
|
||||
* - CTR holds the new NIP in C
|
||||
* - r0 & r12 are free
|
||||
*
|
||||
* r0 can't be used as the base register for a DS-form load or store, so
|
||||
* we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
|
||||
*/
|
||||
livepatch_handler:
|
||||
CURRENT_THREAD_INFO(r12, r1)
|
||||
|
||||
/* Save stack pointer into r0 */
|
||||
mr r0, r1
|
||||
|
||||
/* Allocate 3 x 8 bytes */
|
||||
ld r1, TI_livepatch_sp(r12)
|
||||
addi r1, r1, 24
|
||||
std r1, TI_livepatch_sp(r12)
|
||||
|
||||
/* Save toc & real LR on livepatch stack */
|
||||
std r2, -24(r1)
|
||||
mflr r12
|
||||
std r12, -16(r1)
|
||||
|
||||
/* Store stack end marker */
|
||||
lis r12, STACK_END_MAGIC@h
|
||||
ori r12, r12, STACK_END_MAGIC@l
|
||||
std r12, -8(r1)
|
||||
|
||||
/* Restore real stack pointer */
|
||||
mr r1, r0
|
||||
|
||||
/* Put ctr in r12 for global entry and branch there */
|
||||
mfctr r12
|
||||
bctrl
|
||||
|
||||
/*
|
||||
* Now we are returning from the patched function to the original
|
||||
* caller A. We are free to use r0 and r12, and we can use r2 until we
|
||||
* restore it.
|
||||
*/
|
||||
|
||||
CURRENT_THREAD_INFO(r12, r1)
|
||||
|
||||
/* Save stack pointer into r0 */
|
||||
mr r0, r1
|
||||
|
||||
ld r1, TI_livepatch_sp(r12)
|
||||
|
||||
/* Check stack marker hasn't been trashed */
|
||||
lis r2, STACK_END_MAGIC@h
|
||||
ori r2, r2, STACK_END_MAGIC@l
|
||||
ld r12, -8(r1)
|
||||
1: tdne r12, r2
|
||||
EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
|
||||
|
||||
/* Restore LR & toc from livepatch stack */
|
||||
ld r12, -16(r1)
|
||||
mtlr r12
|
||||
ld r2, -24(r1)
|
||||
|
||||
/* Pop livepatch stack frame */
|
||||
CURRENT_THREAD_INFO(r12, r0)
|
||||
subi r1, r1, 24
|
||||
std r1, TI_livepatch_sp(r12)
|
||||
|
||||
/* Restore real stack pointer */
|
||||
mr r1, r0
|
||||
|
||||
/* Return to original caller of live patched function */
|
||||
blr
|
||||
#endif
|
||||
|
||||
|
||||
#else
|
||||
_GLOBAL_TOC(_mcount)
|
||||
/* Taken from output of objdump from lib64/glibc */
|
||||
|
@ -1198,6 +1408,7 @@ _GLOBAL(ftrace_stub)
|
|||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#ifndef CC_USING_MPROFILE_KERNEL
|
||||
_GLOBAL(ftrace_graph_caller)
|
||||
/* load r4 with local address */
|
||||
ld r4, 128(r1)
|
||||
|
@ -1222,6 +1433,56 @@ _GLOBAL(ftrace_graph_caller)
|
|||
addi r1, r1, 112
|
||||
blr
|
||||
|
||||
#else /* CC_USING_MPROFILE_KERNEL */
|
||||
_GLOBAL(ftrace_graph_caller)
|
||||
/* with -mprofile-kernel, parameter regs are still alive at _mcount */
|
||||
std r10, 104(r1)
|
||||
std r9, 96(r1)
|
||||
std r8, 88(r1)
|
||||
std r7, 80(r1)
|
||||
std r6, 72(r1)
|
||||
std r5, 64(r1)
|
||||
std r4, 56(r1)
|
||||
std r3, 48(r1)
|
||||
|
||||
/* Save callee's TOC in the ABI compliant location */
|
||||
std r2, 24(r1)
|
||||
ld r2, PACATOC(r13) /* get kernel TOC in r2 */
|
||||
|
||||
mfctr r4 /* ftrace_caller has moved local addr here */
|
||||
std r4, 40(r1)
|
||||
mflr r3 /* ftrace_caller has restored LR from stack */
|
||||
subi r4, r4, MCOUNT_INSN_SIZE
|
||||
|
||||
bl prepare_ftrace_return
|
||||
nop
|
||||
|
||||
/*
|
||||
* prepare_ftrace_return gives us the address we divert to.
|
||||
* Change the LR to this.
|
||||
*/
|
||||
mtlr r3
|
||||
|
||||
ld r0, 40(r1)
|
||||
mtctr r0
|
||||
ld r10, 104(r1)
|
||||
ld r9, 96(r1)
|
||||
ld r8, 88(r1)
|
||||
ld r7, 80(r1)
|
||||
ld r6, 72(r1)
|
||||
ld r5, 64(r1)
|
||||
ld r4, 56(r1)
|
||||
ld r3, 48(r1)
|
||||
|
||||
/* Restore callee's TOC */
|
||||
ld r2, 24(r1)
|
||||
|
||||
addi r1, r1, 112
|
||||
mflr r0
|
||||
std r0, LRSAVE(r1)
|
||||
bctr
|
||||
#endif /* CC_USING_MPROFILE_KERNEL */
|
||||
|
||||
_GLOBAL(return_to_handler)
|
||||
/* need to save return values */
|
||||
std r4, -32(r1)
|
||||
|
|
|
@ -61,8 +61,11 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
|
|||
return -EFAULT;
|
||||
|
||||
/* Make sure it is what we expect it to be */
|
||||
if (replaced != old)
|
||||
if (replaced != old) {
|
||||
pr_err("%p: replaced (%#x) != old (%#x)",
|
||||
(void *)ip, replaced, old);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* replace the text with the new text */
|
||||
if (patch_instruction((unsigned int *)ip, new))
|
||||
|
@ -106,14 +109,15 @@ static int
|
|||
__ftrace_make_nop(struct module *mod,
|
||||
struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
unsigned int op;
|
||||
unsigned long entry, ptr;
|
||||
unsigned long entry, ptr, tramp;
|
||||
unsigned long ip = rec->ip;
|
||||
void *tramp;
|
||||
unsigned int op, pop;
|
||||
|
||||
/* read where this goes */
|
||||
if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
|
||||
if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
|
||||
pr_err("Fetching opcode failed.\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Make sure that that this is still a 24bit jump */
|
||||
if (!is_bl_op(op)) {
|
||||
|
@ -122,14 +126,9 @@ __ftrace_make_nop(struct module *mod,
|
|||
}
|
||||
|
||||
/* lets find where the pointer goes */
|
||||
tramp = (void *)find_bl_target(ip, op);
|
||||
tramp = find_bl_target(ip, op);
|
||||
|
||||
pr_devel("ip:%lx jumps to %p", ip, tramp);
|
||||
|
||||
if (!is_module_trampoline(tramp)) {
|
||||
pr_err("Not a trampoline\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
pr_devel("ip:%lx jumps to %lx", ip, tramp);
|
||||
|
||||
if (module_trampoline_target(mod, tramp, &ptr)) {
|
||||
pr_err("Failed to get trampoline target\n");
|
||||
|
@ -158,10 +157,42 @@ __ftrace_make_nop(struct module *mod,
|
|||
*
|
||||
* Use a b +8 to jump over the load.
|
||||
*/
|
||||
op = 0x48000008; /* b +8 */
|
||||
|
||||
if (patch_instruction((unsigned int *)ip, op))
|
||||
pop = PPC_INST_BRANCH | 8; /* b +8 */
|
||||
|
||||
/*
|
||||
* Check what is in the next instruction. We can see ld r2,40(r1), but
|
||||
* on first pass after boot we will see mflr r0.
|
||||
*/
|
||||
if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) {
|
||||
pr_err("Fetching op failed.\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (op != PPC_INST_LD_TOC) {
|
||||
unsigned int inst;
|
||||
|
||||
if (probe_kernel_read(&inst, (void *)(ip - 4), 4)) {
|
||||
pr_err("Fetching instruction at %lx failed.\n", ip - 4);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* We expect either a mlfr r0, or a std r0, LRSAVE(r1) */
|
||||
if (inst != PPC_INST_MFLR && inst != PPC_INST_STD_LR) {
|
||||
pr_err("Unexpected instructions around bl _mcount\n"
|
||||
"when enabling dynamic ftrace!\t"
|
||||
"(%08x,bl,%08x)\n", inst, op);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* When using -mkernel_profile there is no load to jump over */
|
||||
pop = PPC_INST_NOP;
|
||||
}
|
||||
|
||||
if (patch_instruction((unsigned int *)ip, pop)) {
|
||||
pr_err("Patching NOP failed.\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -287,6 +318,39 @@ int ftrace_make_nop(struct module *mod,
|
|||
|
||||
#ifdef CONFIG_MODULES
|
||||
#ifdef CONFIG_PPC64
|
||||
/*
|
||||
* Examine the existing instructions for __ftrace_make_call.
|
||||
* They should effectively be a NOP, and follow formal constraints,
|
||||
* depending on the ABI. Return false if they don't.
|
||||
*/
|
||||
#ifndef CC_USING_MPROFILE_KERNEL
|
||||
static int
|
||||
expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
|
||||
{
|
||||
/*
|
||||
* We expect to see:
|
||||
*
|
||||
* b +8
|
||||
* ld r2,XX(r1)
|
||||
*
|
||||
* The load offset is different depending on the ABI. For simplicity
|
||||
* just mask it out when doing the compare.
|
||||
*/
|
||||
if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
static int
|
||||
expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
|
||||
{
|
||||
/* look for patched "NOP" on ppc64 with -mprofile-kernel */
|
||||
if (op0 != PPC_INST_NOP)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
|
@ -297,17 +361,9 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|||
if (probe_kernel_read(op, ip, sizeof(op)))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* We expect to see:
|
||||
*
|
||||
* b +8
|
||||
* ld r2,XX(r1)
|
||||
*
|
||||
* The load offset is different depending on the ABI. For simplicity
|
||||
* just mask it out when doing the compare.
|
||||
*/
|
||||
if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) {
|
||||
pr_err("Unexpected call sequence: %x %x\n", op[0], op[1]);
|
||||
if (!expected_nop_sequence(ip, op[0], op[1])) {
|
||||
pr_err("Unexpected call sequence at %p: %x %x\n",
|
||||
ip, op[0], op[1]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -330,7 +386,16 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
unsigned long addr)
|
||||
{
|
||||
return ftrace_make_call(rec, addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_PPC64: */
|
||||
static int
|
||||
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
|
@ -455,20 +520,13 @@ void ftrace_replace_code(int enable)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the default ftrace_modify_all_code, but without
|
||||
* stop_machine().
|
||||
*/
|
||||
void arch_ftrace_update_code(int command)
|
||||
{
|
||||
if (command & FTRACE_UPDATE_CALLS)
|
||||
ftrace_replace_code(1);
|
||||
else if (command & FTRACE_DISABLE_CALLS)
|
||||
ftrace_replace_code(0);
|
||||
|
||||
if (command & FTRACE_UPDATE_TRACE_FUNC)
|
||||
ftrace_update_ftrace_func(ftrace_trace_function);
|
||||
|
||||
if (command & FTRACE_START_FUNC_RET)
|
||||
ftrace_enable_ftrace_graph_caller();
|
||||
else if (command & FTRACE_STOP_FUNC_RET)
|
||||
ftrace_disable_ftrace_graph_caller();
|
||||
ftrace_modify_all_code(command);
|
||||
}
|
||||
|
||||
int __init ftrace_dyn_arch_init(void)
|
||||
|
|
|
@ -66,6 +66,7 @@
|
|||
#include <asm/udbg.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/livepatch.h>
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#include <asm/paca.h>
|
||||
|
@ -607,10 +608,12 @@ void irq_ctx_init(void)
|
|||
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
|
||||
tp = softirq_ctx[i];
|
||||
tp->cpu = i;
|
||||
klp_init_thread_info(tp);
|
||||
|
||||
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
|
||||
tp = hardirq_ctx[i];
|
||||
tp->cpu = i;
|
||||
klp_init_thread_info(tp);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,11 @@ int module_finalize(const Elf_Ehdr *hdr,
|
|||
const Elf_Shdr *sechdrs, struct module *me)
|
||||
{
|
||||
const Elf_Shdr *sect;
|
||||
int rc;
|
||||
|
||||
rc = module_finalize_ftrace(me, sechdrs);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Apply feature fixups */
|
||||
sect = find_section(hdr, sechdrs, "__ftr_fixup");
|
||||
|
|
|
@ -181,7 +181,7 @@ static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
|
|||
/* Set up a trampoline in the PLT to bounce us to the distant function */
|
||||
static uint32_t do_plt_call(void *location,
|
||||
Elf32_Addr val,
|
||||
Elf32_Shdr *sechdrs,
|
||||
const Elf32_Shdr *sechdrs,
|
||||
struct module *mod)
|
||||
{
|
||||
struct ppc_plt_entry *entry;
|
||||
|
@ -294,11 +294,19 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
|
|||
return -ENOEXEC;
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
module->arch.tramp =
|
||||
do_plt_call(module->core_layout.base,
|
||||
(unsigned long)ftrace_caller,
|
||||
sechdrs, module);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
|
||||
{
|
||||
module->arch.tramp = do_plt_call(module->core_layout.base,
|
||||
(unsigned long)ftrace_caller,
|
||||
sechdrs, module);
|
||||
if (!module->arch.tramp)
|
||||
return -ENOENT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <asm/code-patching.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
/* FIXME: We don't do .init separately. To do this, we'd need to have
|
||||
a separate r2 value in the init and core section, and stub between
|
||||
|
@ -41,7 +42,6 @@
|
|||
--RR. */
|
||||
|
||||
#if defined(_CALL_ELF) && _CALL_ELF == 2
|
||||
#define R2_STACK_OFFSET 24
|
||||
|
||||
/* An address is simply the address of the function. */
|
||||
typedef unsigned long func_desc_t;
|
||||
|
@ -73,7 +73,6 @@ static unsigned int local_entry_offset(const Elf64_Sym *sym)
|
|||
return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
|
||||
}
|
||||
#else
|
||||
#define R2_STACK_OFFSET 40
|
||||
|
||||
/* An address is address of the OPD entry, which contains address of fn. */
|
||||
typedef struct ppc64_opd_entry func_desc_t;
|
||||
|
@ -96,6 +95,8 @@ static unsigned int local_entry_offset(const Elf64_Sym *sym)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define STUB_MAGIC 0x73747562 /* stub */
|
||||
|
||||
/* Like PPC32, we need little trampolines to do > 24-bit jumps (into
|
||||
the kernel itself). But on PPC64, these need to be used for every
|
||||
jump, actually, to reset r2 (TOC+0x8000). */
|
||||
|
@ -105,7 +106,8 @@ struct ppc64_stub_entry
|
|||
* need 6 instructions on ABIv2 but we always allocate 7 so
|
||||
* so we don't have to modify the trampoline load instruction. */
|
||||
u32 jump[7];
|
||||
u32 unused;
|
||||
/* Used by ftrace to identify stubs */
|
||||
u32 magic;
|
||||
/* Data for the above code */
|
||||
func_desc_t funcdata;
|
||||
};
|
||||
|
@ -139,70 +141,39 @@ static u32 ppc64_stub_insns[] = {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
||||
static u32 ppc64_stub_mask[] = {
|
||||
0xffff0000,
|
||||
0xffff0000,
|
||||
0xffffffff,
|
||||
0xffffffff,
|
||||
#if !defined(_CALL_ELF) || _CALL_ELF != 2
|
||||
0xffffffff,
|
||||
#endif
|
||||
0xffffffff,
|
||||
0xffffffff
|
||||
};
|
||||
|
||||
bool is_module_trampoline(u32 *p)
|
||||
{
|
||||
unsigned int i;
|
||||
u32 insns[ARRAY_SIZE(ppc64_stub_insns)];
|
||||
|
||||
BUILD_BUG_ON(sizeof(ppc64_stub_insns) != sizeof(ppc64_stub_mask));
|
||||
|
||||
if (probe_kernel_read(insns, p, sizeof(insns)))
|
||||
return -EFAULT;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) {
|
||||
u32 insna = insns[i];
|
||||
u32 insnb = ppc64_stub_insns[i];
|
||||
u32 mask = ppc64_stub_mask[i];
|
||||
|
||||
if ((insna & mask) != (insnb & mask))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int module_trampoline_target(struct module *mod, u32 *trampoline,
|
||||
int module_trampoline_target(struct module *mod, unsigned long addr,
|
||||
unsigned long *target)
|
||||
{
|
||||
u32 buf[2];
|
||||
u16 upper, lower;
|
||||
long offset;
|
||||
void *toc_entry;
|
||||
struct ppc64_stub_entry *stub;
|
||||
func_desc_t funcdata;
|
||||
u32 magic;
|
||||
|
||||
if (probe_kernel_read(buf, trampoline, sizeof(buf)))
|
||||
if (!within_module_core(addr, mod)) {
|
||||
pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
upper = buf[0] & 0xffff;
|
||||
lower = buf[1] & 0xffff;
|
||||
stub = (struct ppc64_stub_entry *)addr;
|
||||
|
||||
/* perform the addis/addi, both signed */
|
||||
offset = ((short)upper << 16) + (short)lower;
|
||||
|
||||
/*
|
||||
* Now get the address this trampoline jumps to. This
|
||||
* is always 32 bytes into our trampoline stub.
|
||||
*/
|
||||
toc_entry = (void *)mod->arch.toc + offset + 32;
|
||||
|
||||
if (probe_kernel_read(target, toc_entry, sizeof(*target)))
|
||||
if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) {
|
||||
pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (magic != STUB_MAGIC) {
|
||||
pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) {
|
||||
pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
*target = stub_func_addr(funcdata);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/* Count how many different 24-bit relocations (different symbol,
|
||||
|
@ -413,7 +384,7 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
|
|||
/* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this
|
||||
gives the value maximum span in an instruction which uses a signed
|
||||
offset) */
|
||||
static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me)
|
||||
static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me)
|
||||
{
|
||||
return sechdrs[me->arch.toc_section].sh_addr + 0x8000;
|
||||
}
|
||||
|
@ -426,7 +397,7 @@ static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me)
|
|||
#define PPC_HA(v) PPC_HI ((v) + 0x8000)
|
||||
|
||||
/* Patch stub to reference function and correct r2 value. */
|
||||
static inline int create_stub(Elf64_Shdr *sechdrs,
|
||||
static inline int create_stub(const Elf64_Shdr *sechdrs,
|
||||
struct ppc64_stub_entry *entry,
|
||||
unsigned long addr,
|
||||
struct module *me)
|
||||
|
@ -447,12 +418,14 @@ static inline int create_stub(Elf64_Shdr *sechdrs,
|
|||
entry->jump[0] |= PPC_HA(reladdr);
|
||||
entry->jump[1] |= PPC_LO(reladdr);
|
||||
entry->funcdata = func_desc(addr);
|
||||
entry->magic = STUB_MAGIC;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Create stub to jump to function described in this OPD/ptr: we need the
|
||||
stub to set up the TOC ptr (r2) for the function. */
|
||||
static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
|
||||
static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs,
|
||||
unsigned long addr,
|
||||
struct module *me)
|
||||
{
|
||||
|
@ -476,17 +449,60 @@ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
|
|||
return (unsigned long)&stubs[i];
|
||||
}
|
||||
|
||||
#ifdef CC_USING_MPROFILE_KERNEL
|
||||
static bool is_early_mcount_callsite(u32 *instruction)
|
||||
{
|
||||
/*
|
||||
* Check if this is one of the -mprofile-kernel sequences.
|
||||
*/
|
||||
if (instruction[-1] == PPC_INST_STD_LR &&
|
||||
instruction[-2] == PPC_INST_MFLR)
|
||||
return true;
|
||||
|
||||
if (instruction[-1] == PPC_INST_MFLR)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* In case of _mcount calls, do not save the current callee's TOC (in r2) into
|
||||
* the original caller's stack frame. If we did we would clobber the saved TOC
|
||||
* value of the original caller.
|
||||
*/
|
||||
static void squash_toc_save_inst(const char *name, unsigned long addr)
|
||||
{
|
||||
struct ppc64_stub_entry *stub = (struct ppc64_stub_entry *)addr;
|
||||
|
||||
/* Only for calls to _mcount */
|
||||
if (strcmp("_mcount", name) != 0)
|
||||
return;
|
||||
|
||||
stub->jump[2] = PPC_INST_NOP;
|
||||
}
|
||||
#else
|
||||
static void squash_toc_save_inst(const char *name, unsigned long addr) { }
|
||||
|
||||
/* without -mprofile-kernel, mcount calls are never early */
|
||||
static bool is_early_mcount_callsite(u32 *instruction)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* We expect a noop next: if it is, replace it with instruction to
|
||||
restore r2. */
|
||||
static int restore_r2(u32 *instruction, struct module *me)
|
||||
{
|
||||
if (*instruction != PPC_INST_NOP) {
|
||||
if (is_early_mcount_callsite(instruction - 1))
|
||||
return 1;
|
||||
pr_err("%s: Expect noop after relocate, got %08x\n",
|
||||
me->name, *instruction);
|
||||
return 0;
|
||||
}
|
||||
/* ld r2,R2_STACK_OFFSET(r1) */
|
||||
*instruction = 0xe8410000 | R2_STACK_OFFSET;
|
||||
*instruction = PPC_INST_LD_TOC;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -611,6 +627,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
|||
return -ENOENT;
|
||||
if (!restore_r2((u32 *)location + 1, me))
|
||||
return -ENOEXEC;
|
||||
|
||||
squash_toc_save_inst(strtab + sym->st_name, value);
|
||||
} else
|
||||
value += local_entry_offset(sym);
|
||||
|
||||
|
@ -693,12 +711,84 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
|||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
me->arch.toc = my_r2(sechdrs, me);
|
||||
me->arch.tramp = stub_for_addr(sechdrs,
|
||||
(unsigned long)ftrace_caller,
|
||||
me);
|
||||
|
||||
#ifdef CC_USING_MPROFILE_KERNEL
|
||||
|
||||
#define PACATOC offsetof(struct paca_struct, kernel_toc)
|
||||
|
||||
/*
|
||||
* For mprofile-kernel we use a special stub for ftrace_caller() because we
|
||||
* can't rely on r2 containing this module's TOC when we enter the stub.
|
||||
*
|
||||
* That can happen if the function calling us didn't need to use the toc. In
|
||||
* that case it won't have setup r2, and the r2 value will be either the
|
||||
* kernel's toc, or possibly another modules toc.
|
||||
*
|
||||
* To deal with that this stub uses the kernel toc, which is always accessible
|
||||
* via the paca (in r13). The target (ftrace_caller()) is responsible for
|
||||
* saving and restoring the toc before returning.
|
||||
*/
|
||||
static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me)
|
||||
{
|
||||
struct ppc64_stub_entry *entry;
|
||||
unsigned int i, num_stubs;
|
||||
static u32 stub_insns[] = {
|
||||
0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */
|
||||
0x3d8c0000, /* addis r12,r12,<high> */
|
||||
0x398c0000, /* addi r12,r12,<low> */
|
||||
0x7d8903a6, /* mtctr r12 */
|
||||
0x4e800420, /* bctr */
|
||||
};
|
||||
long reladdr;
|
||||
|
||||
num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*entry);
|
||||
|
||||
/* Find the next available stub entry */
|
||||
entry = (void *)sechdrs[me->arch.stubs_section].sh_addr;
|
||||
for (i = 0; i < num_stubs && stub_func_addr(entry->funcdata); i++, entry++);
|
||||
|
||||
if (i >= num_stubs) {
|
||||
pr_err("%s: Unable to find a free slot for ftrace stub.\n", me->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
memcpy(entry->jump, stub_insns, sizeof(stub_insns));
|
||||
|
||||
/* Stub uses address relative to kernel toc (from the paca) */
|
||||
reladdr = (unsigned long)ftrace_caller - kernel_toc_addr();
|
||||
if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
|
||||
pr_err("%s: Address of ftrace_caller out of range of kernel_toc.\n", me->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
entry->jump[1] |= PPC_HA(reladdr);
|
||||
entry->jump[2] |= PPC_LO(reladdr);
|
||||
|
||||
/* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */
|
||||
entry->funcdata = func_desc((unsigned long)ftrace_caller);
|
||||
entry->magic = STUB_MAGIC;
|
||||
|
||||
return (unsigned long)entry;
|
||||
}
|
||||
#else
|
||||
static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs, struct module *me)
|
||||
{
|
||||
return stub_for_addr(sechdrs, (unsigned long)ftrace_caller, me);
|
||||
}
|
||||
#endif
|
||||
|
||||
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
|
||||
{
|
||||
mod->arch.toc = my_r2(sechdrs, mod);
|
||||
mod->arch.tramp = create_ftrace_stub(sechdrs, mod);
|
||||
|
||||
if (!mod->arch.tramp)
|
||||
return -ENOENT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -17,10 +17,6 @@
|
|||
#include <asm/pgtable.h>
|
||||
#include <asm/kexec.h>
|
||||
|
||||
/* This symbol is provided by the linker - let it fill in the paca
|
||||
* field correctly */
|
||||
extern unsigned long __toc_start;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
|
||||
/*
|
||||
|
@ -149,11 +145,6 @@ EXPORT_SYMBOL(paca);
|
|||
|
||||
void __init initialise_paca(struct paca_struct *new_paca, int cpu)
|
||||
{
|
||||
/* The TOC register (GPR2) points 32kB into the TOC, so that 64kB
|
||||
* of the TOC can be addressed using a single machine instruction.
|
||||
*/
|
||||
unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
new_paca->lppaca_ptr = new_lppaca(cpu);
|
||||
#else
|
||||
|
@ -161,7 +152,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
|
|||
#endif
|
||||
new_paca->lock_token = 0x8000;
|
||||
new_paca->paca_index = cpu;
|
||||
new_paca->kernel_toc = kernel_toc;
|
||||
new_paca->kernel_toc = kernel_toc_addr();
|
||||
new_paca->kernelbase = (unsigned long) _stext;
|
||||
/* Only set MSR:IR/DR when MMU is initialized */
|
||||
new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
|
||||
|
|
|
@ -55,6 +55,8 @@
|
|||
#include <asm/firmware.h>
|
||||
#endif
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/livepatch.h>
|
||||
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kdebug.h>
|
||||
|
||||
|
@ -1267,13 +1269,15 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
extern void ret_from_kernel_thread(void);
|
||||
void (*f)(void);
|
||||
unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
|
||||
struct thread_info *ti = task_thread_info(p);
|
||||
|
||||
klp_init_thread_info(ti);
|
||||
|
||||
/* Copy registers */
|
||||
sp -= sizeof(struct pt_regs);
|
||||
childregs = (struct pt_regs *) sp;
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
/* kernel thread */
|
||||
struct thread_info *ti = (void *)task_stack_page(p);
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->gpr[1] = sp + sizeof(struct pt_regs);
|
||||
/* function */
|
||||
|
|
|
@ -69,6 +69,7 @@
|
|||
#include <asm/kvm_ppc.h>
|
||||
#include <asm/hugetlb.h>
|
||||
#include <asm/epapr_hcalls.h>
|
||||
#include <asm/livepatch.h>
|
||||
|
||||
#ifdef DEBUG
|
||||
#define DBG(fmt...) udbg_printf(fmt)
|
||||
|
@ -667,16 +668,16 @@ static void __init emergency_stack_init(void)
|
|||
limit = min(safe_stack_limit(), ppc64_rma_size);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
unsigned long sp;
|
||||
sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
|
||||
sp += THREAD_SIZE;
|
||||
paca[i].emergency_sp = __va(sp);
|
||||
struct thread_info *ti;
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
klp_init_thread_info(ti);
|
||||
paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* emergency stack for machine check exception handling. */
|
||||
sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
|
||||
sp += THREAD_SIZE;
|
||||
paca[i].mc_emergency_sp = __va(sp);
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
klp_init_thread_info(ti);
|
||||
paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -700,6 +701,8 @@ void __init setup_arch(char **cmdline_p)
|
|||
if (ppc_md.panic)
|
||||
setup_panic();
|
||||
|
||||
klp_init_thread_info(&init_thread_info);
|
||||
|
||||
init_mm.start_code = (unsigned long)_stext;
|
||||
init_mm.end_code = (unsigned long) _etext;
|
||||
init_mm.end_data = (unsigned long) _edata;
|
||||
|
|
|
@ -6,8 +6,8 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
|
|||
|
||||
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
|
||||
|
||||
CFLAGS_REMOVE_code-patching.o = -pg
|
||||
CFLAGS_REMOVE_feature-fixups.o = -pg
|
||||
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
|
||||
|
||||
obj-y += string.o alloc.o crtsavres.o ppc_ksyms.o code-patching.o \
|
||||
feature-fixups.o
|
||||
|
|
|
@ -2,7 +2,7 @@ CFLAGS_bootx_init.o += -fPIC
|
|||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Do not trace early boot code
|
||||
CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog
|
||||
CFLAGS_REMOVE_bootx_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
|
||||
obj-y += pic.o setup.o time.o feature.o pci.o \
|
||||
|
|
23
arch/powerpc/scripts/gcc-check-mprofile-kernel.sh
Executable file
23
arch/powerpc/scripts/gcc-check-mprofile-kernel.sh
Executable file
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o pipefail
|
||||
|
||||
# To debug, uncomment the following line
|
||||
# set -x
|
||||
|
||||
# Test whether the compile option -mprofile-kernel exists and generates
|
||||
# profiling code (ie. a call to _mcount()).
|
||||
echo "int func() { return 0; }" | \
|
||||
$* -S -x c -O2 -p -mprofile-kernel - -o - 2> /dev/null | \
|
||||
grep -q "_mcount"
|
||||
|
||||
# Test whether the notrace attribute correctly suppresses calls to _mcount().
|
||||
|
||||
echo -e "#include <linux/compiler.h>\nnotrace int func() { return 0; }" | \
|
||||
$* -S -x c -O2 -p -mprofile-kernel - -o - 2> /dev/null | \
|
||||
grep -q "_mcount" && \
|
||||
exit 1
|
||||
|
||||
echo "OK"
|
||||
exit 0
|
|
@ -455,6 +455,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable);
|
|||
int ftrace_test_record(struct dyn_ftrace *rec, int enable);
|
||||
void ftrace_run_stop_machine(int command);
|
||||
unsigned long ftrace_location(unsigned long ip);
|
||||
unsigned long ftrace_location_range(unsigned long start, unsigned long end);
|
||||
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
|
||||
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
|
||||
|
||||
|
|
|
@ -334,6 +334,19 @@ static void notrace klp_ftrace_handler(unsigned long ip,
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert a function address into the appropriate ftrace location.
|
||||
*
|
||||
* Usually this is just the address of the function, but on some architectures
|
||||
* it's more complicated so allow them to provide a custom behaviour.
|
||||
*/
|
||||
#ifndef klp_get_ftrace_location
|
||||
static unsigned long klp_get_ftrace_location(unsigned long faddr)
|
||||
{
|
||||
return faddr;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void klp_disable_func(struct klp_func *func)
|
||||
{
|
||||
struct klp_ops *ops;
|
||||
|
@ -348,8 +361,14 @@ static void klp_disable_func(struct klp_func *func)
|
|||
return;
|
||||
|
||||
if (list_is_singular(&ops->func_stack)) {
|
||||
unsigned long ftrace_loc;
|
||||
|
||||
ftrace_loc = klp_get_ftrace_location(func->old_addr);
|
||||
if (WARN_ON(!ftrace_loc))
|
||||
return;
|
||||
|
||||
WARN_ON(unregister_ftrace_function(&ops->fops));
|
||||
WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
|
||||
WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
|
||||
|
||||
list_del_rcu(&func->stack_node);
|
||||
list_del(&ops->node);
|
||||
|
@ -374,6 +393,15 @@ static int klp_enable_func(struct klp_func *func)
|
|||
|
||||
ops = klp_find_ops(func->old_addr);
|
||||
if (!ops) {
|
||||
unsigned long ftrace_loc;
|
||||
|
||||
ftrace_loc = klp_get_ftrace_location(func->old_addr);
|
||||
if (!ftrace_loc) {
|
||||
pr_err("failed to find location for function '%s'\n",
|
||||
func->old_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
|
||||
if (!ops)
|
||||
return -ENOMEM;
|
||||
|
@ -388,7 +416,7 @@ static int klp_enable_func(struct klp_func *func)
|
|||
INIT_LIST_HEAD(&ops->func_stack);
|
||||
list_add_rcu(&func->stack_node, &ops->func_stack);
|
||||
|
||||
ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
|
||||
ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
|
||||
if (ret) {
|
||||
pr_err("failed to set ftrace filter for function '%s' (%d)\n",
|
||||
func->old_name, ret);
|
||||
|
@ -399,7 +427,7 @@ static int klp_enable_func(struct klp_func *func)
|
|||
if (ret) {
|
||||
pr_err("failed to register ftrace handler for function '%s' (%d)\n",
|
||||
func->old_name, ret);
|
||||
ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
|
||||
ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
|
|
@ -1533,7 +1533,19 @@ static int ftrace_cmp_recs(const void *a, const void *b)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
|
||||
/**
|
||||
* ftrace_location_range - return the first address of a traced location
|
||||
* if it touches the given ip range
|
||||
* @start: start of range to search.
|
||||
* @end: end of range to search (inclusive). @end points to the last byte
|
||||
* to check.
|
||||
*
|
||||
* Returns rec->ip if the related ftrace location is a least partly within
|
||||
* the given address range. That is, the first address of the instruction
|
||||
* that is either a NOP or call to the function tracer. It checks the ftrace
|
||||
* internal tables to determine if the address belongs or not.
|
||||
*/
|
||||
unsigned long ftrace_location_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
struct ftrace_page *pg;
|
||||
struct dyn_ftrace *rec;
|
||||
|
|
Loading…
Reference in a new issue