perf: Remove the nmi parameter from the swevent and overflow interface

The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.

For the various event classes:

  - hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
    the PMI-tail (ARM etc.)
  - tracepoint: nmi=0; since tracepoint could be from NMI context.
  - software: nmi=[0,1]; some, like the schedule thing cannot
    perform wakeups, and hence need 0.

As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).

The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2011-06-27 14:41:57 +02:00 committed by Ingo Molnar
parent 1880c4ae18
commit a8b0ca17b8
46 changed files with 119 additions and 141 deletions

View file

@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
data.period = event->hw.last_period; data.period = event->hw.last_period;
if (alpha_perf_event_set_period(event, hwc, idx)) { if (alpha_perf_event_set_period(event, hwc, idx)) {
if (perf_event_overflow(event, 1, &data, regs)) { if (perf_event_overflow(event, &data, regs)) {
/* Interrupts coming too quickly; "throttle" the /* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while. * counter, i.e., disable it for a little while.
*/ */

View file

@ -479,7 +479,7 @@ armv6pmu_handle_irq(int irq_num,
if (!armpmu_event_set_period(event, hwc, idx)) if (!armpmu_event_set_period(event, hwc, idx))
continue; continue;
if (perf_event_overflow(event, 0, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); armpmu->disable(hwc, idx);
} }

View file

@ -787,7 +787,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
if (!armpmu_event_set_period(event, hwc, idx)) if (!armpmu_event_set_period(event, hwc, idx))
continue; continue;
if (perf_event_overflow(event, 0, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); armpmu->disable(hwc, idx);
} }

View file

@ -251,7 +251,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
if (!armpmu_event_set_period(event, hwc, idx)) if (!armpmu_event_set_period(event, hwc, idx))
continue; continue;
if (perf_event_overflow(event, 0, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); armpmu->disable(hwc, idx);
} }
@ -583,7 +583,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
if (!armpmu_event_set_period(event, hwc, idx)) if (!armpmu_event_set_period(event, hwc, idx))
continue; continue;
if (perf_event_overflow(event, 0, &data, regs)) if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx); armpmu->disable(hwc, idx);
} }

View file

@ -396,7 +396,7 @@ static long ptrace_hbp_idx_to_num(int idx)
/* /*
* Handle hitting a HW-breakpoint. * Handle hitting a HW-breakpoint.
*/ */
static void ptrace_hbptriggered(struct perf_event *bp, int unused, static void ptrace_hbptriggered(struct perf_event *bp,
struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs) struct pt_regs *regs)
{ {

View file

@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
unsigned int address, destreg, data, type; unsigned int address, destreg, data, type;
unsigned int res = 0; unsigned int res = 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
if (current->pid != previous_pid) { if (current->pid != previous_pid) {
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",

View file

@ -318,11 +318,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
fault = __do_page_fault(mm, addr, fsr, tsk); fault = __do_page_fault(mm, addr, fsr, tsk);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
if (fault & VM_FAULT_MAJOR) if (fault & VM_FAULT_MAJOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
else if (fault & VM_FAULT_MINOR) else if (fault & VM_FAULT_MINOR)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
/* /*
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR

View file

@ -527,7 +527,7 @@ handle_associated_event(struct cpu_hw_events *cpuc,
if (!mipspmu_event_set_period(event, hwc, idx)) if (!mipspmu_event_set_period(event, hwc, idx))
return; return;
if (perf_event_overflow(event, 0, data, regs)) if (perf_event_overflow(event, data, regs))
mipspmu->disable_event(idx); mipspmu->disable_event(idx);
} }

View file

@ -578,12 +578,12 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
{ {
if ((opcode & OPCODE) == LL) { if ((opcode & OPCODE) == LL) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0); 1, regs, 0);
return simulate_ll(regs, opcode); return simulate_ll(regs, opcode);
} }
if ((opcode & OPCODE) == SC) { if ((opcode & OPCODE) == SC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0); 1, regs, 0);
return simulate_sc(regs, opcode); return simulate_sc(regs, opcode);
} }
@ -602,7 +602,7 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
int rd = (opcode & RD) >> 11; int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16; int rt = (opcode & RT) >> 16;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0); 1, regs, 0);
switch (rd) { switch (rd) {
case 0: /* CPU number */ case 0: /* CPU number */
regs->regs[rt] = smp_processor_id(); regs->regs[rt] = smp_processor_id();
@ -640,7 +640,7 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
{ {
if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0); 1, regs, 0);
return 0; return 0;
} }

View file

@ -111,8 +111,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
unsigned long value; unsigned long value;
unsigned int res; unsigned int res;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
1, 0, regs, 0);
/* /*
* This load never faults. * This load never faults.
@ -517,7 +516,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
mm_segment_t seg; mm_segment_t seg;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1, 0, regs, regs->cp0_badvaddr); 1, regs, regs->cp0_badvaddr);
/* /*
* Did we catch a fault trying to load an instruction? * Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode? * Or are we running in MIPS16 mode?

View file

@ -272,8 +272,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
} }
emul: emul:
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
1, 0, xcp, 0);
MIPS_FPU_EMU_INC_STATS(emulated); MIPS_FPU_EMU_INC_STATS(emulated);
switch (MIPSInst_OPCODE(ir)) { switch (MIPSInst_OPCODE(ir)) {
case ldc1_op:{ case ldc1_op:{

View file

@ -145,7 +145,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
@ -154,12 +154,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
BUG(); BUG();
} }
if (fault & VM_FAULT_MAJOR) { if (fault & VM_FAULT_MAJOR) {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
1, 0, regs, address);
tsk->maj_flt++; tsk->maj_flt++;
} else { } else {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
1, 0, regs, address);
tsk->min_flt++; tsk->min_flt++;
} }

View file

@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type);
#define PPC_WARN_EMULATED(type, regs) \ #define PPC_WARN_EMULATED(type, regs) \
do { \ do { \
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \
1, 0, regs, 0); \ 1, regs, 0); \
__PPC_WARN_EMULATED(type); \ __PPC_WARN_EMULATED(type); \
} while (0) } while (0)
#define PPC_WARN_ALIGNMENT(type, regs) \ #define PPC_WARN_ALIGNMENT(type, regs) \
do { \ do { \
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \
1, 0, regs, regs->dar); \ 1, regs, regs->dar); \
__PPC_WARN_EMULATED(type); \ __PPC_WARN_EMULATED(type); \
} while (0) } while (0)

View file

@ -1207,7 +1207,7 @@ struct pmu power_pmu = {
* here so there is no possibility of being interrupted. * here so there is no possibility of being interrupted.
*/ */
static void record_and_restart(struct perf_event *event, unsigned long val, static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi) struct pt_regs *regs)
{ {
u64 period = event->hw.sample_period; u64 period = event->hw.sample_period;
s64 prev, delta, left; s64 prev, delta, left;
@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type & PERF_SAMPLE_ADDR) if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr); perf_get_data_addr(regs, &data.addr);
if (perf_event_overflow(event, nmi, &data, regs)) if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0); power_pmu_stop(event, 0);
} }
} }
@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if ((int)val < 0) { if ((int)val < 0) {
/* event has overflowed */ /* event has overflowed */
found = 1; found = 1;
record_and_restart(event, val, regs, nmi); record_and_restart(event, val, regs);
} }
} }

View file

@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = {
* here so there is no possibility of being interrupted. * here so there is no possibility of being interrupted.
*/ */
static void record_and_restart(struct perf_event *event, unsigned long val, static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs, int nmi) struct pt_regs *regs)
{ {
u64 period = event->hw.sample_period; u64 period = event->hw.sample_period;
s64 prev, delta, left; s64 prev, delta, left;
@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
perf_sample_data_init(&data, 0); perf_sample_data_init(&data, 0);
data.period = event->hw.last_period; data.period = event->hw.last_period;
if (perf_event_overflow(event, nmi, &data, regs)) if (perf_event_overflow(event, &data, regs))
fsl_emb_pmu_stop(event, 0); fsl_emb_pmu_stop(event, 0);
} }
} }
@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
if (event) { if (event) {
/* event has overflowed */ /* event has overflowed */
found = 1; found = 1;
record_and_restart(event, val, regs, nmi); record_and_restart(event, val, regs);
} else { } else {
/* /*
* Disabled counter is negative, * Disabled counter is negative,

View file

@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task)
} }
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT
void ptrace_triggered(struct perf_event *bp, int nmi, void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs) struct perf_sample_data *data, struct pt_regs *regs)
{ {
struct perf_event_attr attr; struct perf_event_attr attr;

View file

@ -173,7 +173,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
die("Weird page fault", regs, SIGSEGV); die("Weird page fault", regs, SIGSEGV);
} }
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/* When running in the kernel we expect faults to occur only to /* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the * addresses in user space. All other faults represent errors in the
@ -319,7 +319,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
} }
if (ret & VM_FAULT_MAJOR) { if (ret & VM_FAULT_MAJOR) {
current->maj_flt++; current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address); regs, address);
#ifdef CONFIG_PPC_SMLPAR #ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) { if (firmware_has_feature(FW_FEATURE_CMO)) {
@ -330,7 +330,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
#endif #endif
} else { } else {
current->min_flt++; current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address); regs, address);
} }
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);

View file

@ -299,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
goto out; goto out;
address = trans_exc_code & __FAIL_ADDR_MASK; address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
flags = FAULT_FLAG_ALLOW_RETRY; flags = FAULT_FLAG_ALLOW_RETRY;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
flags |= FAULT_FLAG_WRITE; flags |= FAULT_FLAG_WRITE;
@ -345,11 +345,11 @@ static inline int do_exception(struct pt_regs *regs, int access,
if (flags & FAULT_FLAG_ALLOW_RETRY) { if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) { if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++; tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address); regs, address);
} else { } else {
tsk->min_flt++; tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address); regs, address);
} }
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {

View file

@ -63,7 +63,7 @@ static inline int put_stack_long(struct task_struct *task, int offset,
return 0; return 0;
} }
void ptrace_triggered(struct perf_event *bp, int nmi, void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs) struct perf_sample_data *data, struct pt_regs *regs)
{ {
struct perf_event_attr attr; struct perf_event_attr attr;

View file

@ -393,7 +393,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
*/ */
if (!expected) { if (!expected) {
unaligned_fixups_notify(current, instruction, regs); unaligned_fixups_notify(current, instruction, regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
regs, address); regs, address);
} }

View file

@ -434,7 +434,7 @@ static int misaligned_load(struct pt_regs *regs,
return error; return error;
} }
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
destreg = (opcode >> 4) & 0x3f; destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) { if (user_mode(regs)) {
@ -512,7 +512,7 @@ static int misaligned_store(struct pt_regs *regs,
return error; return error;
} }
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
srcreg = (opcode >> 4) & 0x3f; srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) { if (user_mode(regs)) {
@ -588,7 +588,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
return error; return error;
} }
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
destreg = (opcode >> 4) & 0x3f; destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) { if (user_mode(regs)) {
@ -665,7 +665,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
return error; return error;
} }
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
srcreg = (opcode >> 4) & 0x3f; srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) { if (user_mode(regs)) {

View file

@ -620,7 +620,7 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu); struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (!(task_thread_info(tsk)->status & TS_USEDFPU)) { if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
/* initialize once. */ /* initialize once. */

View file

@ -160,7 +160,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if ((regs->sr & SR_IMASK) != SR_IMASK) if ((regs->sr & SR_IMASK) != SR_IMASK)
local_irq_enable(); local_irq_enable();
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/* /*
* If we're in an interrupt, have no user context or are running * If we're in an interrupt, have no user context or are running
@ -210,11 +210,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
} }
if (fault & VM_FAULT_MAJOR) { if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++; tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address); regs, address);
} else { } else {
tsk->min_flt++; tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address); regs, address);
} }

View file

@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
/* Not an IO address, so reenable interrupts */ /* Not an IO address, so reenable interrupts */
local_irq_enable(); local_irq_enable();
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/* /*
* If we're in an interrupt or have no user * If we're in an interrupt or have no user
@ -200,11 +200,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
if (fault & VM_FAULT_MAJOR) { if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++; tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address); regs, address);
} else { } else {
tsk->min_flt++; tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address); regs, address);
} }

View file

@ -1277,7 +1277,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
if (!sparc_perf_event_set_period(event, hwc, idx)) if (!sparc_perf_event_set_period(event, hwc, idx))
continue; continue;
if (perf_event_overflow(event, 1, &data, regs)) if (perf_event_overflow(event, &data, regs))
sparc_pmu_stop(event, 0); sparc_pmu_stop(event, 0);
} }

View file

@ -247,7 +247,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
unsigned long addr = compute_effective_address(regs, insn); unsigned long addr = compute_effective_address(regs, insn);
int err; int err;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch (dir) { switch (dir) {
case load: case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
@ -338,7 +338,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
} }
addr = compute_effective_address(regs, insn); addr = compute_effective_address(regs, insn);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch(dir) { switch(dir) {
case load: case load:
err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),

View file

@ -317,7 +317,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
addr = compute_effective_address(regs, insn, addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f)); ((insn >> 25) & 0x1f));
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
switch (asi) { switch (asi) {
case ASI_NL: case ASI_NL:
case ASI_AIUPL: case ASI_AIUPL:
@ -384,7 +384,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
int ret, i, rd = ((insn >> 25) & 0x1f); int ret, i, rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (insn & 0x2000) { if (insn & 0x2000) {
maybe_flush_windows(0, 0, rd, from_kernel); maybe_flush_windows(0, 0, rd, from_kernel);
value = sign_extend_imm13(insn); value = sign_extend_imm13(insn);
@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
int asi = decode_asi(insn, regs); int asi = decode_asi(insn, regs);
int flag = (freg < 32) ? FPRS_DL : FPRS_DU; int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
save_and_clear_fpu(); save_and_clear_fpu();
current_thread_info()->xfsr[0] &= ~0x1c000; current_thread_info()->xfsr[0] &= ~0x1c000;
@ -554,7 +554,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
unsigned long *reg; unsigned long *reg;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
maybe_flush_windows(0, 0, rd, from_kernel); maybe_flush_windows(0, 0, rd, from_kernel);
reg = fetch_reg_addr(rd, regs); reg = fetch_reg_addr(rd, regs);
@ -586,7 +586,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if (tstate & TSTATE_PRIV) if (tstate & TSTATE_PRIV)
die_if_kernel("lddfmna from kernel", regs); die_if_kernel("lddfmna from kernel", regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
if (test_thread_flag(TIF_32BIT)) if (test_thread_flag(TIF_32BIT))
pc = (u32)pc; pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) { if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
@ -647,7 +647,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
if (tstate & TSTATE_PRIV) if (tstate & TSTATE_PRIV)
die_if_kernel("stdfmna from kernel", regs); die_if_kernel("stdfmna from kernel", regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
if (test_thread_flag(TIF_32BIT)) if (test_thread_flag(TIF_32BIT))
pc = (u32)pc; pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) { if (get_user(insn, (u32 __user *) pc) != -EFAULT) {

View file

@ -802,7 +802,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
BUG_ON(regs->tstate & TSTATE_PRIV); BUG_ON(regs->tstate & TSTATE_PRIV);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (test_thread_flag(TIF_32BIT)) if (test_thread_flag(TIF_32BIT))
pc = (u32)pc; pc = (u32)pc;

View file

@ -164,7 +164,7 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
int retcode = 0; /* assume all succeed */ int retcode = 0; /* assume all succeed */
unsigned long insn; unsigned long insn;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
#ifdef DEBUG_MATHEMU #ifdef DEBUG_MATHEMU
printk("In do_mathemu()... pc is %08lx\n", regs->pc); printk("In do_mathemu()... pc is %08lx\n", regs->pc);

View file

@ -184,7 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
if (tstate & TSTATE_PRIV) if (tstate & TSTATE_PRIV)
die_if_kernel("unfinished/unimplemented FPop from kernel", regs); die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (test_thread_flag(TIF_32BIT)) if (test_thread_flag(TIF_32BIT))
pc = (u32)pc; pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) { if (get_user(insn, (u32 __user *) pc) != -EFAULT) {

View file

@ -251,7 +251,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
if (in_atomic() || !mm) if (in_atomic() || !mm)
goto no_context; goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
@ -301,12 +301,10 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
} }
if (fault & VM_FAULT_MAJOR) { if (fault & VM_FAULT_MAJOR) {
current->maj_flt++; current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
regs, address);
} else { } else {
current->min_flt++; current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
regs, address);
} }
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return; return;

View file

@ -325,7 +325,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
if (in_atomic() || !mm) if (in_atomic() || !mm)
goto intr_or_no_mm; goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (!down_read_trylock(&mm->mmap_sem)) { if (!down_read_trylock(&mm->mmap_sem)) {
if ((regs->tstate & TSTATE_PRIV) && if ((regs->tstate & TSTATE_PRIV) &&
@ -433,12 +433,10 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
} }
if (fault & VM_FAULT_MAJOR) { if (fault & VM_FAULT_MAJOR) {
current->maj_flt++; current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
regs, address);
} else { } else {
current->min_flt++; current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
regs, address);
} }
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);

View file

@ -1339,7 +1339,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
if (!x86_perf_event_set_period(event)) if (!x86_perf_event_set_period(event))
continue; continue;
if (perf_event_overflow(event, 1, &data, regs)) if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
} }

View file

@ -1003,7 +1003,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
data.period = event->hw.last_period; data.period = event->hw.last_period;
if (perf_event_overflow(event, 1, &data, regs)) if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
} }

View file

@ -340,7 +340,7 @@ static int intel_pmu_drain_bts_buffer(void)
*/ */
perf_prepare_sample(&header, &data, event, &regs); perf_prepare_sample(&header, &data, event, &regs);
if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1)) if (perf_output_begin(&handle, event, header.size * (top - at), 1))
return 1; return 1;
for (; at < top; at++) { for (; at < top; at++) {
@ -616,7 +616,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
else else
regs.flags &= ~PERF_EFLAGS_EXACT; regs.flags &= ~PERF_EFLAGS_EXACT;
if (perf_event_overflow(event, 1, &data, &regs)) if (perf_event_overflow(event, &data, &regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
} }

View file

@ -970,7 +970,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
if (!x86_perf_event_set_period(event)) if (!x86_perf_event_set_period(event))
continue; continue;
if (perf_event_overflow(event, 1, &data, regs)) if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0); x86_pmu_stop(event, 0);
} }

View file

@ -608,7 +608,7 @@ int kgdb_arch_init(void)
return register_die_notifier(&kgdb_notifier); return register_die_notifier(&kgdb_notifier);
} }
static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi, static void kgdb_hw_overflow_handler(struct perf_event *event,
struct perf_sample_data *data, struct pt_regs *regs) struct perf_sample_data *data, struct pt_regs *regs)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;

View file

@ -528,7 +528,7 @@ static int genregs_set(struct task_struct *target,
return ret; return ret;
} }
static void ptrace_triggered(struct perf_event *bp, int nmi, static void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs) struct pt_regs *regs)
{ {

View file

@ -1059,7 +1059,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(error_code & PF_RSVD)) if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address); pgtable_bad(regs, error_code, address);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/* /*
* If we're in an interrupt, have no user context or are running * If we're in an interrupt, have no user context or are running
@ -1161,11 +1161,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (flags & FAULT_FLAG_ALLOW_RETRY) { if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) { if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++; tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address); regs, address);
} else { } else {
tsk->min_flt++; tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address); regs, address);
} }
if (fault & VM_FAULT_RETRY) { if (fault & VM_FAULT_RETRY) {

View file

@ -682,7 +682,7 @@ enum perf_event_active_state {
struct file; struct file;
struct perf_sample_data; struct perf_sample_data;
typedef void (*perf_overflow_handler_t)(struct perf_event *, int, typedef void (*perf_overflow_handler_t)(struct perf_event *,
struct perf_sample_data *, struct perf_sample_data *,
struct pt_regs *regs); struct pt_regs *regs);
@ -925,7 +925,6 @@ struct perf_output_handle {
unsigned long size; unsigned long size;
void *addr; void *addr;
int page; int page;
int nmi;
int sample; int sample;
}; };
@ -993,7 +992,7 @@ extern void perf_prepare_sample(struct perf_event_header *header,
struct perf_event *event, struct perf_event *event,
struct pt_regs *regs); struct pt_regs *regs);
extern int perf_event_overflow(struct perf_event *event, int nmi, extern int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs); struct pt_regs *regs);
@ -1012,7 +1011,7 @@ static inline int is_software_event(struct perf_event *event)
extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
#ifndef perf_arch_fetch_caller_regs #ifndef perf_arch_fetch_caller_regs
static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
@ -1034,7 +1033,7 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
} }
static __always_inline void static __always_inline void
perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{ {
struct pt_regs hot_regs; struct pt_regs hot_regs;
@ -1043,7 +1042,7 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
perf_fetch_caller_regs(&hot_regs); perf_fetch_caller_regs(&hot_regs);
regs = &hot_regs; regs = &hot_regs;
} }
__perf_sw_event(event_id, nr, nmi, regs, addr); __perf_sw_event(event_id, nr, regs, addr);
} }
} }
@ -1057,7 +1056,7 @@ static inline void perf_event_task_sched_in(struct task_struct *task)
static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
{ {
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0); perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
__perf_event_task_sched_out(task, next); __perf_event_task_sched_out(task, next);
} }
@ -1119,7 +1118,7 @@ extern void perf_bp_event(struct perf_event *event, void *data);
extern int perf_output_begin(struct perf_output_handle *handle, extern int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size, struct perf_event *event, unsigned int size,
int nmi, int sample); int sample);
extern void perf_output_end(struct perf_output_handle *handle); extern void perf_output_end(struct perf_output_handle *handle);
extern void perf_output_copy(struct perf_output_handle *handle, extern void perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len); const void *buf, unsigned int len);
@ -1143,8 +1142,7 @@ static inline int perf_event_task_disable(void) { return -EINVAL; }
static inline int perf_event_task_enable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; }
static inline void static inline void
perf_sw_event(u32 event_id, u64 nr, int nmi, perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
struct pt_regs *regs, u64 addr) { }
static inline void static inline void
perf_bp_event(struct perf_event *event, void *data) { } perf_bp_event(struct perf_event *event, void *data) { }

View file

@ -3972,7 +3972,7 @@ void perf_prepare_sample(struct perf_event_header *header,
} }
} }
static void perf_event_output(struct perf_event *event, int nmi, static void perf_event_output(struct perf_event *event,
struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs) struct pt_regs *regs)
{ {
@ -3984,7 +3984,7 @@ static void perf_event_output(struct perf_event *event, int nmi,
perf_prepare_sample(&header, data, event, regs); perf_prepare_sample(&header, data, event, regs);
if (perf_output_begin(&handle, event, header.size, nmi, 1)) if (perf_output_begin(&handle, event, header.size, 1))
goto exit; goto exit;
perf_output_sample(&handle, &header, data, event); perf_output_sample(&handle, &header, data, event);
@ -4024,7 +4024,7 @@ perf_event_read_event(struct perf_event *event,
int ret; int ret;
perf_event_header__init_id(&read_event.header, &sample, event); perf_event_header__init_id(&read_event.header, &sample, event);
ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0); ret = perf_output_begin(&handle, event, read_event.header.size, 0);
if (ret) if (ret)
return; return;
@ -4067,7 +4067,7 @@ static void perf_event_task_output(struct perf_event *event,
perf_event_header__init_id(&task_event->event_id.header, &sample, event); perf_event_header__init_id(&task_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, event,
task_event->event_id.header.size, 0, 0); task_event->event_id.header.size, 0);
if (ret) if (ret)
goto out; goto out;
@ -4204,7 +4204,7 @@ static void perf_event_comm_output(struct perf_event *event,
perf_event_header__init_id(&comm_event->event_id.header, &sample, event); perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, event,
comm_event->event_id.header.size, 0, 0); comm_event->event_id.header.size, 0);
if (ret) if (ret)
goto out; goto out;
@ -4351,7 +4351,7 @@ static void perf_event_mmap_output(struct perf_event *event,
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, event,
mmap_event->event_id.header.size, 0, 0); mmap_event->event_id.header.size, 0);
if (ret) if (ret)
goto out; goto out;
@ -4546,7 +4546,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
perf_event_header__init_id(&throttle_event.header, &sample, event); perf_event_header__init_id(&throttle_event.header, &sample, event);
ret = perf_output_begin(&handle, event, ret = perf_output_begin(&handle, event,
throttle_event.header.size, 1, 0); throttle_event.header.size, 0);
if (ret) if (ret)
return; return;
@ -4559,7 +4559,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
* Generic event overflow handling, sampling. * Generic event overflow handling, sampling.
*/ */
static int __perf_event_overflow(struct perf_event *event, int nmi, static int __perf_event_overflow(struct perf_event *event,
int throttle, struct perf_sample_data *data, int throttle, struct perf_sample_data *data,
struct pt_regs *regs) struct pt_regs *regs)
{ {
@ -4602,34 +4602,28 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
if (events && atomic_dec_and_test(&event->event_limit)) { if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1; ret = 1;
event->pending_kill = POLL_HUP; event->pending_kill = POLL_HUP;
if (nmi) { event->pending_disable = 1;
event->pending_disable = 1; irq_work_queue(&event->pending);
irq_work_queue(&event->pending);
} else
perf_event_disable(event);
} }
if (event->overflow_handler) if (event->overflow_handler)
event->overflow_handler(event, nmi, data, regs); event->overflow_handler(event, data, regs);
else else
perf_event_output(event, nmi, data, regs); perf_event_output(event, data, regs);
if (event->fasync && event->pending_kill) { if (event->fasync && event->pending_kill) {
if (nmi) { event->pending_wakeup = 1;
event->pending_wakeup = 1; irq_work_queue(&event->pending);
irq_work_queue(&event->pending);
} else
perf_event_wakeup(event);
} }
return ret; return ret;
} }
int perf_event_overflow(struct perf_event *event, int nmi, int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs) struct pt_regs *regs)
{ {
return __perf_event_overflow(event, nmi, 1, data, regs); return __perf_event_overflow(event, 1, data, regs);
} }
/* /*
@ -4678,7 +4672,7 @@ static u64 perf_swevent_set_period(struct perf_event *event)
} }
static void perf_swevent_overflow(struct perf_event *event, u64 overflow, static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
int nmi, struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
@ -4692,7 +4686,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
return; return;
for (; overflow; overflow--) { for (; overflow; overflow--) {
if (__perf_event_overflow(event, nmi, throttle, if (__perf_event_overflow(event, throttle,
data, regs)) { data, regs)) {
/* /*
* We inhibit the overflow from happening when * We inhibit the overflow from happening when
@ -4705,7 +4699,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
} }
static void perf_swevent_event(struct perf_event *event, u64 nr, static void perf_swevent_event(struct perf_event *event, u64 nr,
int nmi, struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs) struct pt_regs *regs)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
@ -4719,12 +4713,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
return; return;
if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
return perf_swevent_overflow(event, 1, nmi, data, regs); return perf_swevent_overflow(event, 1, data, regs);
if (local64_add_negative(nr, &hwc->period_left)) if (local64_add_negative(nr, &hwc->period_left))
return; return;
perf_swevent_overflow(event, 0, nmi, data, regs); perf_swevent_overflow(event, 0, data, regs);
} }
static int perf_exclude_event(struct perf_event *event, static int perf_exclude_event(struct perf_event *event,
@ -4812,7 +4806,7 @@ find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
} }
static void do_perf_sw_event(enum perf_type_id type, u32 event_id, static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr, int nmi, u64 nr,
struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs) struct pt_regs *regs)
{ {
@ -4828,7 +4822,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
hlist_for_each_entry_rcu(event, node, head, hlist_entry) { hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs)) if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_event(event, nr, nmi, data, regs); perf_swevent_event(event, nr, data, regs);
} }
end: end:
rcu_read_unlock(); rcu_read_unlock();
@ -4849,8 +4843,7 @@ inline void perf_swevent_put_recursion_context(int rctx)
put_recursion_context(swhash->recursion, rctx); put_recursion_context(swhash->recursion, rctx);
} }
void __perf_sw_event(u32 event_id, u64 nr, int nmi, void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
struct pt_regs *regs, u64 addr)
{ {
struct perf_sample_data data; struct perf_sample_data data;
int rctx; int rctx;
@ -4862,7 +4855,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
perf_sample_data_init(&data, addr); perf_sample_data_init(&data, addr);
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs); do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
perf_swevent_put_recursion_context(rctx); perf_swevent_put_recursion_context(rctx);
preempt_enable_notrace(); preempt_enable_notrace();
@ -5110,7 +5103,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
hlist_for_each_entry_rcu(event, node, head, hlist_entry) { hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs)) if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, 1, &data, regs); perf_swevent_event(event, count, &data, regs);
} }
perf_swevent_put_recursion_context(rctx); perf_swevent_put_recursion_context(rctx);
@ -5203,7 +5196,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
perf_sample_data_init(&sample, bp->attr.bp_addr); perf_sample_data_init(&sample, bp->attr.bp_addr);
if (!bp->hw.state && !perf_exclude_event(bp, regs)) if (!bp->hw.state && !perf_exclude_event(bp, regs))
perf_swevent_event(bp, 1, 1, &sample, regs); perf_swevent_event(bp, 1, &sample, regs);
} }
#endif #endif
@ -5232,7 +5225,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
if (regs && !perf_exclude_event(event, regs)) { if (regs && !perf_exclude_event(event, regs)) {
if (!(event->attr.exclude_idle && current->pid == 0)) if (!(event->attr.exclude_idle && current->pid == 0))
if (perf_event_overflow(event, 0, &data, regs)) if (perf_event_overflow(event, &data, regs))
ret = HRTIMER_NORESTART; ret = HRTIMER_NORESTART;
} }

View file

@ -27,7 +27,6 @@ struct ring_buffer {
void *data_pages[0]; void *data_pages[0];
}; };
extern void rb_free(struct ring_buffer *rb); extern void rb_free(struct ring_buffer *rb);
extern struct ring_buffer * extern struct ring_buffer *
rb_alloc(int nr_pages, long watermark, int cpu, int flags); rb_alloc(int nr_pages, long watermark, int cpu, int flags);

View file

@ -38,11 +38,8 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
{ {
atomic_set(&handle->rb->poll, POLL_IN); atomic_set(&handle->rb->poll, POLL_IN);
if (handle->nmi) { handle->event->pending_wakeup = 1;
handle->event->pending_wakeup = 1; irq_work_queue(&handle->event->pending);
irq_work_queue(&handle->event->pending);
} else
perf_event_wakeup(handle->event);
} }
/* /*
@ -102,7 +99,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
int perf_output_begin(struct perf_output_handle *handle, int perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size, struct perf_event *event, unsigned int size,
int nmi, int sample) int sample)
{ {
struct ring_buffer *rb; struct ring_buffer *rb;
unsigned long tail, offset, head; unsigned long tail, offset, head;
@ -127,7 +124,6 @@ int perf_output_begin(struct perf_output_handle *handle,
handle->rb = rb; handle->rb = rb;
handle->event = event; handle->event = event;
handle->nmi = nmi;
handle->sample = sample; handle->sample = sample;
if (!rb->nr_pages) if (!rb->nr_pages)

View file

@ -2220,7 +2220,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (task_cpu(p) != new_cpu) { if (task_cpu(p) != new_cpu) {
p->se.nr_migrations++; p->se.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
} }
__set_task_cpu(p, new_cpu); __set_task_cpu(p, new_cpu);

View file

@ -211,7 +211,7 @@ static struct perf_event_attr wd_hw_attr = {
}; };
/* Callback function for perf event subsystem */ /* Callback function for perf event subsystem */
static void watchdog_overflow_callback(struct perf_event *event, int nmi, static void watchdog_overflow_callback(struct perf_event *event,
struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs) struct pt_regs *regs)
{ {

View file

@ -41,7 +41,7 @@ module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO);
MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any" MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any"
" write operations on the kernel symbol"); " write operations on the kernel symbol");
static void sample_hbp_handler(struct perf_event *bp, int nmi, static void sample_hbp_handler(struct perf_event *bp,
struct perf_sample_data *data, struct perf_sample_data *data,
struct pt_regs *regs) struct pt_regs *regs)
{ {