Merge branch 'perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip into trace/tip/tracing/core-6
Conflicts: include/trace/ftrace.h kernel/trace/trace_kprobe.c Acked-by: Masami Hiramatsu <mhiramat@redhat.com> Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
commit
f0218b3e99
382 changed files with 16274 additions and 8836 deletions
|
@ -107,10 +107,6 @@ void (*dev_config) (struct ata_port *, struct ata_device *);
|
|||
issue of SET FEATURES - XFER MODE, and prior to operation.
|
||||
</para>
|
||||
<para>
|
||||
Called by ata_device_add() after ata_dev_identify() determines
|
||||
a device is present.
|
||||
</para>
|
||||
<para>
|
||||
This entry may be specified as NULL in ata_port_operations.
|
||||
</para>
|
||||
|
||||
|
@ -154,8 +150,8 @@ unsigned int (*mode_filter) (struct ata_port *, struct ata_device *, unsigned in
|
|||
|
||||
<sect2><title>Taskfile read/write</title>
|
||||
<programlisting>
|
||||
void (*tf_load) (struct ata_port *ap, struct ata_taskfile *tf);
|
||||
void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
|
||||
void (*sff_tf_load) (struct ata_port *ap, struct ata_taskfile *tf);
|
||||
void (*sff_tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
|
@ -164,36 +160,35 @@ void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
|
|||
hardware registers / DMA buffers, to obtain the current set of
|
||||
taskfile register values.
|
||||
Most drivers for taskfile-based hardware (PIO or MMIO) use
|
||||
ata_tf_load() and ata_tf_read() for these hooks.
|
||||
ata_sff_tf_load() and ata_sff_tf_read() for these hooks.
|
||||
</para>
|
||||
|
||||
</sect2>
|
||||
|
||||
<sect2><title>PIO data read/write</title>
|
||||
<programlisting>
|
||||
void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
|
||||
void (*sff_data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
All bmdma-style drivers must implement this hook. This is the low-level
|
||||
operation that actually copies the data bytes during a PIO data
|
||||
transfer.
|
||||
Typically the driver
|
||||
will choose one of ata_pio_data_xfer_noirq(), ata_pio_data_xfer(), or
|
||||
ata_mmio_data_xfer().
|
||||
Typically the driver will choose one of ata_sff_data_xfer_noirq(),
|
||||
ata_sff_data_xfer(), or ata_sff_data_xfer32().
|
||||
</para>
|
||||
|
||||
</sect2>
|
||||
|
||||
<sect2><title>ATA command execute</title>
|
||||
<programlisting>
|
||||
void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
|
||||
void (*sff_exec_command)(struct ata_port *ap, struct ata_taskfile *tf);
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
causes an ATA command, previously loaded with
|
||||
->tf_load(), to be initiated in hardware.
|
||||
Most drivers for taskfile-based hardware use ata_exec_command()
|
||||
Most drivers for taskfile-based hardware use ata_sff_exec_command()
|
||||
for this hook.
|
||||
</para>
|
||||
|
||||
|
@ -218,8 +213,8 @@ command.
|
|||
|
||||
<sect2><title>Read specific ATA shadow registers</title>
|
||||
<programlisting>
|
||||
u8 (*check_status)(struct ata_port *ap);
|
||||
u8 (*check_altstatus)(struct ata_port *ap);
|
||||
u8 (*sff_check_status)(struct ata_port *ap);
|
||||
u8 (*sff_check_altstatus)(struct ata_port *ap);
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
|
@ -227,20 +222,14 @@ u8 (*check_altstatus)(struct ata_port *ap);
|
|||
hardware. On some hardware, reading the Status register has
|
||||
the side effect of clearing the interrupt condition.
|
||||
Most drivers for taskfile-based hardware use
|
||||
ata_check_status() for this hook.
|
||||
</para>
|
||||
<para>
|
||||
Note that because this is called from ata_device_add(), at
|
||||
least a dummy function that clears device interrupts must be
|
||||
provided for all drivers, even if the controller doesn't
|
||||
actually have a taskfile status register.
|
||||
ata_sff_check_status() for this hook.
|
||||
</para>
|
||||
|
||||
</sect2>
|
||||
|
||||
<sect2><title>Select ATA device on bus</title>
|
||||
<programlisting>
|
||||
void (*dev_select)(struct ata_port *ap, unsigned int device);
|
||||
void (*sff_dev_select)(struct ata_port *ap, unsigned int device);
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
|
@ -251,9 +240,7 @@ void (*dev_select)(struct ata_port *ap, unsigned int device);
|
|||
</para>
|
||||
<para>
|
||||
Most drivers for taskfile-based hardware use
|
||||
ata_std_dev_select() for this hook. Controllers which do not
|
||||
support second drives on a port (such as SATA contollers) will
|
||||
use ata_noop_dev_select().
|
||||
ata_sff_dev_select() for this hook.
|
||||
</para>
|
||||
|
||||
</sect2>
|
||||
|
@ -441,13 +428,13 @@ void (*irq_clear) (struct ata_port *);
|
|||
to struct ata_host_set.
|
||||
</para>
|
||||
<para>
|
||||
Most legacy IDE drivers use ata_interrupt() for the
|
||||
Most legacy IDE drivers use ata_sff_interrupt() for the
|
||||
irq_handler hook, which scans all ports in the host_set,
|
||||
determines which queued command was active (if any), and calls
|
||||
ata_host_intr(ap,qc).
|
||||
ata_sff_host_intr(ap,qc).
|
||||
</para>
|
||||
<para>
|
||||
Most legacy IDE drivers use ata_bmdma_irq_clear() for the
|
||||
Most legacy IDE drivers use ata_sff_irq_clear() for the
|
||||
irq_clear() hook, which simply clears the interrupt and error
|
||||
flags in the DMA status register.
|
||||
</para>
|
||||
|
@ -496,10 +483,6 @@ void (*host_stop) (struct ata_host_set *host_set);
|
|||
data from port at this time.
|
||||
</para>
|
||||
<para>
|
||||
Many drivers use ata_port_stop() as this hook, which frees the
|
||||
PRD table.
|
||||
</para>
|
||||
<para>
|
||||
->host_stop() is called after all ->port_stop() calls
|
||||
have completed. The hook must finalize hardware shutdown, release DMA
|
||||
and other resources, etc.
|
||||
|
|
|
@ -74,6 +74,11 @@ structure at all. You should use this to keep device-specific data.
|
|||
/* retrieve the value */
|
||||
void *i2c_get_clientdata(const struct i2c_client *client);
|
||||
|
||||
Note that starting with kernel 2.6.34, you don't have to set the `data' field
|
||||
to NULL in remove() or if probe() failed anymore. The i2c-core does this
|
||||
automatically on these occasions. Those are also the only times the core will
|
||||
touch this field.
|
||||
|
||||
|
||||
Accessing the client
|
||||
====================
|
||||
|
|
|
@ -333,14 +333,14 @@ byte 0:
|
|||
byte 1:
|
||||
|
||||
bit 7 6 5 4 3 2 1 0
|
||||
x15 x14 x13 x12 x11 x10 x9 x8
|
||||
. . . . . x10 x9 x8
|
||||
|
||||
byte 2:
|
||||
|
||||
bit 7 6 5 4 3 2 1 0
|
||||
x7 x6 x5 x4 x4 x2 x1 x0
|
||||
|
||||
x15..x0 = absolute x value (horizontal)
|
||||
x10..x0 = absolute x value (horizontal)
|
||||
|
||||
byte 3:
|
||||
|
||||
|
@ -350,14 +350,14 @@ byte 3:
|
|||
byte 4:
|
||||
|
||||
bit 7 6 5 4 3 2 1 0
|
||||
y15 y14 y13 y12 y11 y10 y8 y8
|
||||
. . . . . . y9 y8
|
||||
|
||||
byte 5:
|
||||
|
||||
bit 7 6 5 4 3 2 1 0
|
||||
y7 y6 y5 y4 y3 y2 y1 y0
|
||||
|
||||
y15..y0 = absolute y value (vertical)
|
||||
y9..y0 = absolute y value (vertical)
|
||||
|
||||
|
||||
4.2.2 Two finger touch
|
||||
|
|
|
@ -165,8 +165,8 @@ the user entry_handler invocation is also skipped.
|
|||
|
||||
1.4 How Does Jump Optimization Work?
|
||||
|
||||
If you configured your kernel with CONFIG_OPTPROBES=y (currently
|
||||
this option is supported on x86/x86-64, non-preemptive kernel) and
|
||||
If your kernel is built with CONFIG_OPTPROBES=y (currently this flag
|
||||
is automatically set 'y' on x86/x86-64, non-preemptive kernel) and
|
||||
the "debug.kprobes_optimization" kernel parameter is set to 1 (see
|
||||
sysctl(8)), Kprobes tries to reduce probe-hit overhead by using a jump
|
||||
instruction instead of a breakpoint instruction at each probepoint.
|
||||
|
@ -271,8 +271,6 @@ tweak the kernel's execution path, you need to suppress optimization,
|
|||
using one of the following techniques:
|
||||
- Specify an empty function for the kprobe's post_handler or break_handler.
|
||||
or
|
||||
- Config CONFIG_OPTPROBES=n.
|
||||
or
|
||||
- Execute 'sysctl -w debug.kprobes_optimization=n'
|
||||
|
||||
2. Architectures Supported
|
||||
|
@ -307,10 +305,6 @@ it useful to "Compile the kernel with debug info" (CONFIG_DEBUG_INFO),
|
|||
so you can use "objdump -d -l vmlinux" to see the source-to-object
|
||||
code mapping.
|
||||
|
||||
If you want to reduce probing overhead, set "Kprobes jump optimization
|
||||
support" (CONFIG_OPTPROBES) to "y". You can find this option under the
|
||||
"Kprobes" line.
|
||||
|
||||
4. API Reference
|
||||
|
||||
The Kprobes API includes a "register" function and an "unregister"
|
||||
|
|
|
@ -58,7 +58,7 @@ static void transfer(int fd)
|
|||
};
|
||||
|
||||
ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
|
||||
if (ret == 1)
|
||||
if (ret < 1)
|
||||
pabort("can't send spi message");
|
||||
|
||||
for (ret = 0; ret < ARRAY_SIZE(tx); ret++) {
|
||||
|
|
|
@ -40,7 +40,9 @@ Synopsis of kprobe_events
|
|||
$stack : Fetch stack address.
|
||||
$retval : Fetch return value.(*)
|
||||
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
|
||||
NAME=FETCHARG: Set NAME as the argument name of FETCHARG.
|
||||
NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
|
||||
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
|
||||
(u8/u16/u32/u64/s8/s16/s32/s64) are supported.
|
||||
|
||||
(*) only for return probe.
|
||||
(**) this is useful for fetching a field of data structures.
|
||||
|
|
10
MAINTAINERS
10
MAINTAINERS
|
@ -4353,13 +4353,13 @@ M: Paul Mackerras <paulus@samba.org>
|
|||
M: Ingo Molnar <mingo@elte.hu>
|
||||
M: Arnaldo Carvalho de Melo <acme@redhat.com>
|
||||
S: Supported
|
||||
F: kernel/perf_event.c
|
||||
F: kernel/perf_event*.c
|
||||
F: include/linux/perf_event.h
|
||||
F: arch/*/kernel/perf_event.c
|
||||
F: arch/*/kernel/*/perf_event.c
|
||||
F: arch/*/kernel/*/*/perf_event.c
|
||||
F: arch/*/kernel/perf_event*.c
|
||||
F: arch/*/kernel/*/perf_event*.c
|
||||
F: arch/*/kernel/*/*/perf_event*.c
|
||||
F: arch/*/include/asm/perf_event.h
|
||||
F: arch/*/lib/perf_event.c
|
||||
F: arch/*/lib/perf_event*.c
|
||||
F: arch/*/kernel/perf_callchain.c
|
||||
F: tools/perf/
|
||||
|
||||
|
|
20
arch/Kconfig
20
arch/Kconfig
|
@ -42,15 +42,10 @@ config KPROBES
|
|||
If in doubt, say "N".
|
||||
|
||||
config OPTPROBES
|
||||
bool "Kprobes jump optimization support (EXPERIMENTAL)"
|
||||
default y
|
||||
depends on KPROBES
|
||||
def_bool y
|
||||
depends on KPROBES && HAVE_OPTPROBES
|
||||
depends on !PREEMPT
|
||||
depends on HAVE_OPTPROBES
|
||||
select KALLSYMS_ALL
|
||||
help
|
||||
This option will allow kprobes to optimize breakpoint to
|
||||
a jump for reducing its overhead.
|
||||
|
||||
config HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
bool
|
||||
|
@ -142,6 +137,17 @@ config HAVE_HW_BREAKPOINT
|
|||
bool
|
||||
depends on PERF_EVENTS
|
||||
|
||||
config HAVE_MIXED_BREAKPOINTS_REGS
|
||||
bool
|
||||
depends on HAVE_HW_BREAKPOINT
|
||||
help
|
||||
Depending on the arch implementation of hardware breakpoints,
|
||||
some of them have separate registers for data and instruction
|
||||
breakpoints addresses, others have mixed registers to store
|
||||
them but define the access type in a control register.
|
||||
Select this option if your arch implements breakpoints under the
|
||||
latter fashion.
|
||||
|
||||
config HAVE_USER_RETURN_NOTIFIER
|
||||
bool
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ struct ehci_hcd_omap_platform_data {
|
|||
struct omap_musb_board_data {
|
||||
u8 interface_type;
|
||||
u8 mode;
|
||||
u8 power;
|
||||
u16 power;
|
||||
};
|
||||
|
||||
enum musb_interface {MUSB_INTERFACE_ULPI, MUSB_INTERFACE_UTMI};
|
||||
|
|
|
@ -49,7 +49,7 @@ config AR7
|
|||
family: TNETD7100, 7200 and 7300.
|
||||
|
||||
config BCM47XX
|
||||
bool "BCM47XX based boards"
|
||||
bool "Broadcom BCM47XX based boards"
|
||||
select CEVT_R4K
|
||||
select CSRC_R4K
|
||||
select DMA_NONCOHERENT
|
||||
|
@ -509,6 +509,7 @@ config SIBYTE_SWARM
|
|||
bool "Sibyte BCM91250A-SWARM"
|
||||
select BOOT_ELF32
|
||||
select DMA_COHERENT
|
||||
select HAVE_PATA_PLATFORM
|
||||
select NR_CPUS_DEFAULT_2
|
||||
select SIBYTE_SB1250
|
||||
select SWAP_IO_SPACE
|
||||
|
@ -523,6 +524,7 @@ config SIBYTE_LITTLESUR
|
|||
depends on EXPERIMENTAL
|
||||
select BOOT_ELF32
|
||||
select DMA_COHERENT
|
||||
select HAVE_PATA_PLATFORM
|
||||
select NR_CPUS_DEFAULT_2
|
||||
select SIBYTE_SB1250
|
||||
select SWAP_IO_SPACE
|
||||
|
@ -1305,6 +1307,33 @@ config CPU_CAVIUM_OCTEON
|
|||
|
||||
endchoice
|
||||
|
||||
if CPU_LOONGSON2F
|
||||
config CPU_NOP_WORKAROUNDS
|
||||
bool
|
||||
|
||||
config CPU_JUMP_WORKAROUNDS
|
||||
bool
|
||||
|
||||
config CPU_LOONGSON2F_WORKAROUNDS
|
||||
bool "Loongson 2F Workarounds"
|
||||
default y
|
||||
select CPU_NOP_WORKAROUNDS
|
||||
select CPU_JUMP_WORKAROUNDS
|
||||
help
|
||||
Loongson 2F01 / 2F02 processors have the NOP & JUMP issues which
|
||||
require workarounds. Without workarounds the system may hang
|
||||
unexpectedly. For more information please refer to the gas
|
||||
-mfix-loongson2f-nop and -mfix-loongson2f-jump options.
|
||||
|
||||
Loongson 2F03 and later have fixed these issues and no workarounds
|
||||
are needed. The workarounds have no significant side effect on them
|
||||
but may decrease the performance of the system so this option should
|
||||
be disabled unless the kernel is intended to be run on 2F01 or 2F02
|
||||
systems.
|
||||
|
||||
If unsure, please say Y.
|
||||
endif # CPU_LOONGSON2F
|
||||
|
||||
config SYS_SUPPORTS_ZBOOT
|
||||
bool
|
||||
select HAVE_KERNEL_GZIP
|
||||
|
|
|
@ -136,6 +136,19 @@ cflags-$(CONFIG_CPU_LOONGSON2E) += \
|
|||
$(call cc-option,-march=loongson2e,-march=r4600)
|
||||
cflags-$(CONFIG_CPU_LOONGSON2F) += \
|
||||
$(call cc-option,-march=loongson2f,-march=r4600)
|
||||
# enable the workarounds for loongson2f
|
||||
ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
|
||||
ifeq ($(call as-option,-Wa$(comma)-mfix-loongson2f-nop,),)
|
||||
$(error only binutils >= 2.20.2 have needed option -mfix-loongson2f-nop)
|
||||
else
|
||||
cflags-$(CONFIG_CPU_NOP_WORKAROUNDS) += -Wa$(comma)-mfix-loongson2f-nop
|
||||
endif
|
||||
ifeq ($(call as-option,-Wa$(comma)-mfix-loongson2f-jump,),)
|
||||
$(error only binutils >= 2.20.2 have needed option -mfix-loongson2f-jump)
|
||||
else
|
||||
cflags-$(CONFIG_CPU_JUMP_WORKAROUNDS) += -Wa$(comma)-mfix-loongson2f-jump
|
||||
endif
|
||||
endif
|
||||
|
||||
cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
|
||||
-Wa,-mips32 -Wa,--trap
|
||||
|
|
|
@ -66,12 +66,16 @@ static int __init db1200_arch_init(void)
|
|||
set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW);
|
||||
bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT);
|
||||
|
||||
/* do not autoenable these: CPLD has broken edge int handling,
|
||||
* and the CD handler setup requires manual enabling to work
|
||||
* around that.
|
||||
/* insert/eject pairs: one of both is always screaming. To avoid
|
||||
* issues they must not be automatically enabled when initially
|
||||
* requested.
|
||||
*/
|
||||
irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN;
|
||||
irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN;
|
||||
irq_to_desc(DB1200_PC0_INSERT_INT)->status |= IRQ_NOAUTOEN;
|
||||
irq_to_desc(DB1200_PC0_EJECT_INT)->status |= IRQ_NOAUTOEN;
|
||||
irq_to_desc(DB1200_PC1_INSERT_INT)->status |= IRQ_NOAUTOEN;
|
||||
irq_to_desc(DB1200_PC1_EJECT_INT)->status |= IRQ_NOAUTOEN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#
|
||||
# Automatically generated make config: don't edit
|
||||
# Linux kernel version: 2.6.30-rc6
|
||||
# Sun May 31 20:17:18 2009
|
||||
# Linux kernel version: 2.6.34-rc2
|
||||
# Tue Mar 23 10:36:32 2010
|
||||
#
|
||||
CONFIG_MIPS=y
|
||||
|
||||
|
@ -9,13 +9,14 @@ CONFIG_MIPS=y
|
|||
# Machine selection
|
||||
#
|
||||
# CONFIG_MACH_ALCHEMY is not set
|
||||
# CONFIG_AR7 is not set
|
||||
# CONFIG_BCM47XX is not set
|
||||
CONFIG_BCM63XX=y
|
||||
# CONFIG_MIPS_COBALT is not set
|
||||
# CONFIG_MACH_DECSTATION is not set
|
||||
# CONFIG_MACH_JAZZ is not set
|
||||
# CONFIG_LASAT is not set
|
||||
# CONFIG_LEMOTE_FULONG is not set
|
||||
# CONFIG_MACH_LOONGSON is not set
|
||||
# CONFIG_MIPS_MALTA is not set
|
||||
# CONFIG_MIPS_SIM is not set
|
||||
# CONFIG_NEC_MARKEINS is not set
|
||||
|
@ -26,6 +27,7 @@ CONFIG_BCM63XX=y
|
|||
# CONFIG_PNX8550_STB810 is not set
|
||||
# CONFIG_PMC_MSP is not set
|
||||
# CONFIG_PMC_YOSEMITE is not set
|
||||
# CONFIG_POWERTV is not set
|
||||
# CONFIG_SGI_IP22 is not set
|
||||
# CONFIG_SGI_IP27 is not set
|
||||
# CONFIG_SGI_IP28 is not set
|
||||
|
@ -45,13 +47,17 @@ CONFIG_BCM63XX=y
|
|||
# CONFIG_WR_PPMC is not set
|
||||
# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
|
||||
# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
|
||||
# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
|
||||
|
||||
#
|
||||
# CPU support
|
||||
#
|
||||
CONFIG_BCM63XX_CPU_6338=y
|
||||
CONFIG_BCM63XX_CPU_6345=y
|
||||
CONFIG_BCM63XX_CPU_6348=y
|
||||
CONFIG_BCM63XX_CPU_6358=y
|
||||
CONFIG_BOARD_BCM963XX=y
|
||||
CONFIG_LOONGSON_UART_BASE=y
|
||||
CONFIG_RWSEM_GENERIC_SPINLOCK=y
|
||||
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
|
||||
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
|
||||
|
@ -69,10 +75,8 @@ CONFIG_CEVT_R4K=y
|
|||
CONFIG_CSRC_R4K_LIB=y
|
||||
CONFIG_CSRC_R4K=y
|
||||
CONFIG_DMA_NONCOHERENT=y
|
||||
CONFIG_DMA_NEED_PCI_MAP_STATE=y
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_NEED_DMA_MAP_STATE=y
|
||||
CONFIG_SYS_HAS_EARLY_PRINTK=y
|
||||
# CONFIG_HOTPLUG_CPU is not set
|
||||
# CONFIG_NO_IOPORT is not set
|
||||
CONFIG_GENERIC_GPIO=y
|
||||
CONFIG_CPU_BIG_ENDIAN=y
|
||||
|
@ -85,7 +89,8 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
|
|||
#
|
||||
# CPU selection
|
||||
#
|
||||
# CONFIG_CPU_LOONGSON2 is not set
|
||||
# CONFIG_CPU_LOONGSON2E is not set
|
||||
# CONFIG_CPU_LOONGSON2F is not set
|
||||
CONFIG_CPU_MIPS32_R1=y
|
||||
# CONFIG_CPU_MIPS32_R2 is not set
|
||||
# CONFIG_CPU_MIPS64_R1 is not set
|
||||
|
@ -128,7 +133,7 @@ CONFIG_CPU_HAS_PREFETCH=y
|
|||
CONFIG_MIPS_MT_DISABLED=y
|
||||
# CONFIG_MIPS_MT_SMP is not set
|
||||
# CONFIG_MIPS_MT_SMTC is not set
|
||||
CONFIG_CPU_HAS_LLSC=y
|
||||
# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
|
||||
CONFIG_CPU_HAS_SYNC=y
|
||||
CONFIG_GENERIC_HARDIRQS=y
|
||||
CONFIG_GENERIC_IRQ_PROBE=y
|
||||
|
@ -146,9 +151,8 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
|
|||
# CONFIG_PHYS_ADDR_T_64BIT is not set
|
||||
CONFIG_ZONE_DMA_FLAG=0
|
||||
CONFIG_VIRT_TO_BUS=y
|
||||
CONFIG_UNEVICTABLE_LRU=y
|
||||
CONFIG_HAVE_MLOCK=y
|
||||
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
|
||||
# CONFIG_KSM is not set
|
||||
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
|
||||
CONFIG_TICK_ONESHOT=y
|
||||
CONFIG_NO_HZ=y
|
||||
# CONFIG_HIGH_RES_TIMERS is not set
|
||||
|
@ -170,6 +174,7 @@ CONFIG_PREEMPT_NONE=y
|
|||
CONFIG_LOCKDEP_SUPPORT=y
|
||||
CONFIG_STACKTRACE_SUPPORT=y
|
||||
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
|
||||
CONFIG_CONSTRUCTORS=y
|
||||
|
||||
#
|
||||
# General setup
|
||||
|
@ -189,15 +194,12 @@ CONFIG_LOCALVERSION=""
|
|||
#
|
||||
# RCU Subsystem
|
||||
#
|
||||
CONFIG_CLASSIC_RCU=y
|
||||
# CONFIG_TREE_RCU is not set
|
||||
# CONFIG_PREEMPT_RCU is not set
|
||||
# CONFIG_TREE_PREEMPT_RCU is not set
|
||||
CONFIG_TINY_RCU=y
|
||||
# CONFIG_TREE_RCU_TRACE is not set
|
||||
# CONFIG_PREEMPT_RCU_TRACE is not set
|
||||
# CONFIG_IKCONFIG is not set
|
||||
CONFIG_LOG_BUF_SHIFT=17
|
||||
# CONFIG_GROUP_SCHED is not set
|
||||
# CONFIG_CGROUPS is not set
|
||||
CONFIG_SYSFS_DEPRECATED=y
|
||||
CONFIG_SYSFS_DEPRECATED_V2=y
|
||||
# CONFIG_RELAY is not set
|
||||
|
@ -205,11 +207,11 @@ CONFIG_SYSFS_DEPRECATED_V2=y
|
|||
# CONFIG_BLK_DEV_INITRD is not set
|
||||
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||
CONFIG_SYSCTL=y
|
||||
CONFIG_ANON_INODES=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_SYSCTL_SYSCALL=y
|
||||
CONFIG_KALLSYMS=y
|
||||
# CONFIG_KALLSYMS_EXTRA_PASS is not set
|
||||
# CONFIG_STRIP_ASM_SYMS is not set
|
||||
CONFIG_HOTPLUG=y
|
||||
CONFIG_PRINTK=y
|
||||
CONFIG_BUG=y
|
||||
|
@ -223,6 +225,10 @@ CONFIG_BASE_FULL=y
|
|||
# CONFIG_EVENTFD is not set
|
||||
# CONFIG_SHMEM is not set
|
||||
# CONFIG_AIO is not set
|
||||
|
||||
#
|
||||
# Kernel Performance Events And Counters
|
||||
#
|
||||
# CONFIG_VM_EVENT_COUNTERS is not set
|
||||
CONFIG_PCI_QUIRKS=y
|
||||
# CONFIG_SLUB_DEBUG is not set
|
||||
|
@ -231,14 +237,17 @@ CONFIG_COMPAT_BRK=y
|
|||
CONFIG_SLUB=y
|
||||
# CONFIG_SLOB is not set
|
||||
# CONFIG_PROFILING is not set
|
||||
# CONFIG_MARKERS is not set
|
||||
CONFIG_HAVE_OPROFILE=y
|
||||
|
||||
#
|
||||
# GCOV-based kernel profiling
|
||||
#
|
||||
# CONFIG_SLOW_WORK is not set
|
||||
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
|
||||
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
|
||||
CONFIG_BASE_SMALL=0
|
||||
# CONFIG_MODULES is not set
|
||||
CONFIG_BLOCK=y
|
||||
# CONFIG_LBD is not set
|
||||
CONFIG_LBDAF=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_BLK_DEV_INTEGRITY is not set
|
||||
|
||||
|
@ -246,14 +255,41 @@ CONFIG_BLOCK=y
|
|||
# IO Schedulers
|
||||
#
|
||||
CONFIG_IOSCHED_NOOP=y
|
||||
# CONFIG_IOSCHED_AS is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
# CONFIG_DEFAULT_AS is not set
|
||||
# CONFIG_DEFAULT_DEADLINE is not set
|
||||
# CONFIG_DEFAULT_CFQ is not set
|
||||
CONFIG_DEFAULT_NOOP=y
|
||||
CONFIG_DEFAULT_IOSCHED="noop"
|
||||
# CONFIG_INLINE_SPIN_TRYLOCK is not set
|
||||
# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
|
||||
# CONFIG_INLINE_SPIN_LOCK is not set
|
||||
# CONFIG_INLINE_SPIN_LOCK_BH is not set
|
||||
# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
|
||||
# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
|
||||
CONFIG_INLINE_SPIN_UNLOCK=y
|
||||
# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
|
||||
CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
|
||||
# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
|
||||
# CONFIG_INLINE_READ_TRYLOCK is not set
|
||||
# CONFIG_INLINE_READ_LOCK is not set
|
||||
# CONFIG_INLINE_READ_LOCK_BH is not set
|
||||
# CONFIG_INLINE_READ_LOCK_IRQ is not set
|
||||
# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
|
||||
CONFIG_INLINE_READ_UNLOCK=y
|
||||
# CONFIG_INLINE_READ_UNLOCK_BH is not set
|
||||
CONFIG_INLINE_READ_UNLOCK_IRQ=y
|
||||
# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
|
||||
# CONFIG_INLINE_WRITE_TRYLOCK is not set
|
||||
# CONFIG_INLINE_WRITE_LOCK is not set
|
||||
# CONFIG_INLINE_WRITE_LOCK_BH is not set
|
||||
# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
|
||||
# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
|
||||
CONFIG_INLINE_WRITE_UNLOCK=y
|
||||
# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
|
||||
CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
|
||||
# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
|
||||
# CONFIG_MUTEX_SPIN_ON_OWNER is not set
|
||||
# CONFIG_FREEZER is not set
|
||||
|
||||
#
|
||||
|
@ -263,15 +299,12 @@ CONFIG_HW_HAS_PCI=y
|
|||
CONFIG_PCI=y
|
||||
CONFIG_PCI_DOMAINS=y
|
||||
# CONFIG_ARCH_SUPPORTS_MSI is not set
|
||||
# CONFIG_PCI_LEGACY is not set
|
||||
# CONFIG_PCI_STUB is not set
|
||||
# CONFIG_PCI_IOV is not set
|
||||
CONFIG_MMU=y
|
||||
CONFIG_PCCARD=y
|
||||
# CONFIG_PCMCIA_DEBUG is not set
|
||||
CONFIG_PCMCIA=y
|
||||
CONFIG_PCMCIA_LOAD_CIS=y
|
||||
CONFIG_PCMCIA_IOCTL=y
|
||||
CONFIG_CARDBUS=y
|
||||
|
||||
#
|
||||
|
@ -295,6 +328,7 @@ CONFIG_TRAD_SIGNALS=y
|
|||
#
|
||||
# Power management options
|
||||
#
|
||||
CONFIG_ARCH_HIBERNATION_POSSIBLE=y
|
||||
CONFIG_ARCH_SUSPEND_POSSIBLE=y
|
||||
# CONFIG_PM is not set
|
||||
CONFIG_NET=y
|
||||
|
@ -333,6 +367,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
|
|||
# CONFIG_NETFILTER is not set
|
||||
# CONFIG_IP_DCCP is not set
|
||||
# CONFIG_IP_SCTP is not set
|
||||
# CONFIG_RDS is not set
|
||||
# CONFIG_TIPC is not set
|
||||
# CONFIG_ATM is not set
|
||||
# CONFIG_BRIDGE is not set
|
||||
|
@ -347,6 +382,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
|
|||
# CONFIG_ECONET is not set
|
||||
# CONFIG_WAN_ROUTER is not set
|
||||
# CONFIG_PHONET is not set
|
||||
# CONFIG_IEEE802154 is not set
|
||||
# CONFIG_NET_SCHED is not set
|
||||
# CONFIG_DCB is not set
|
||||
|
||||
|
@ -359,7 +395,27 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
|
|||
# CONFIG_IRDA is not set
|
||||
# CONFIG_BT is not set
|
||||
# CONFIG_AF_RXRPC is not set
|
||||
# CONFIG_WIRELESS is not set
|
||||
CONFIG_WIRELESS=y
|
||||
CONFIG_WEXT_CORE=y
|
||||
CONFIG_WEXT_PROC=y
|
||||
CONFIG_CFG80211=y
|
||||
CONFIG_NL80211_TESTMODE=y
|
||||
# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
|
||||
# CONFIG_CFG80211_REG_DEBUG is not set
|
||||
CONFIG_CFG80211_DEFAULT_PS=y
|
||||
# CONFIG_CFG80211_INTERNAL_REGDB is not set
|
||||
CONFIG_CFG80211_WEXT=y
|
||||
CONFIG_WIRELESS_EXT_SYSFS=y
|
||||
# CONFIG_LIB80211 is not set
|
||||
CONFIG_MAC80211=y
|
||||
# CONFIG_MAC80211_RC_PID is not set
|
||||
CONFIG_MAC80211_RC_MINSTREL=y
|
||||
# CONFIG_MAC80211_RC_DEFAULT_PID is not set
|
||||
CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
|
||||
CONFIG_MAC80211_RC_DEFAULT="minstrel"
|
||||
# CONFIG_MAC80211_MESH is not set
|
||||
CONFIG_MAC80211_LEDS=y
|
||||
# CONFIG_MAC80211_DEBUG_MENU is not set
|
||||
# CONFIG_WIMAX is not set
|
||||
# CONFIG_RFKILL is not set
|
||||
# CONFIG_NET_9P is not set
|
||||
|
@ -471,6 +527,7 @@ CONFIG_HAVE_IDE=y
|
|||
#
|
||||
# SCSI device support
|
||||
#
|
||||
CONFIG_SCSI_MOD=y
|
||||
# CONFIG_RAID_ATTRS is not set
|
||||
# CONFIG_SCSI is not set
|
||||
# CONFIG_SCSI_DMA is not set
|
||||
|
@ -484,13 +541,16 @@ CONFIG_HAVE_IDE=y
|
|||
#
|
||||
|
||||
#
|
||||
# Enable only one of the two stacks, unless you know what you are doing
|
||||
# You can enable one or both FireWire driver stacks.
|
||||
#
|
||||
|
||||
#
|
||||
# The newer stack is recommended.
|
||||
#
|
||||
# CONFIG_FIREWIRE is not set
|
||||
# CONFIG_IEEE1394 is not set
|
||||
# CONFIG_I2O is not set
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_COMPAT_NET_DEV_OPS=y
|
||||
# CONFIG_DUMMY is not set
|
||||
# CONFIG_BONDING is not set
|
||||
# CONFIG_MACVLAN is not set
|
||||
|
@ -529,6 +589,7 @@ CONFIG_MII=y
|
|||
# CONFIG_SMC91X is not set
|
||||
# CONFIG_DM9000 is not set
|
||||
# CONFIG_ETHOC is not set
|
||||
# CONFIG_SMSC911X is not set
|
||||
# CONFIG_DNET is not set
|
||||
# CONFIG_NET_TULIP is not set
|
||||
# CONFIG_HP100 is not set
|
||||
|
@ -541,17 +602,48 @@ CONFIG_MII=y
|
|||
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
|
||||
# CONFIG_NET_PCI is not set
|
||||
# CONFIG_B44 is not set
|
||||
# CONFIG_KS8842 is not set
|
||||
# CONFIG_KS8851_MLL is not set
|
||||
# CONFIG_ATL2 is not set
|
||||
CONFIG_BCM63XX_ENET=y
|
||||
# CONFIG_NETDEV_1000 is not set
|
||||
# CONFIG_NETDEV_10000 is not set
|
||||
# CONFIG_TR is not set
|
||||
|
||||
#
|
||||
# Wireless LAN
|
||||
#
|
||||
# CONFIG_WLAN_PRE80211 is not set
|
||||
# CONFIG_WLAN_80211 is not set
|
||||
CONFIG_WLAN=y
|
||||
# CONFIG_PCMCIA_RAYCS is not set
|
||||
# CONFIG_LIBERTAS_THINFIRM is not set
|
||||
# CONFIG_ATMEL is not set
|
||||
# CONFIG_AT76C50X_USB is not set
|
||||
# CONFIG_AIRO_CS is not set
|
||||
# CONFIG_PCMCIA_WL3501 is not set
|
||||
# CONFIG_PRISM54 is not set
|
||||
# CONFIG_USB_ZD1201 is not set
|
||||
# CONFIG_USB_NET_RNDIS_WLAN is not set
|
||||
# CONFIG_RTL8180 is not set
|
||||
# CONFIG_RTL8187 is not set
|
||||
# CONFIG_ADM8211 is not set
|
||||
# CONFIG_MAC80211_HWSIM is not set
|
||||
# CONFIG_MWL8K is not set
|
||||
# CONFIG_ATH_COMMON is not set
|
||||
CONFIG_B43=y
|
||||
CONFIG_B43_PCI_AUTOSELECT=y
|
||||
CONFIG_B43_PCICORE_AUTOSELECT=y
|
||||
# CONFIG_B43_PCMCIA is not set
|
||||
CONFIG_B43_PIO=y
|
||||
# CONFIG_B43_PHY_LP is not set
|
||||
CONFIG_B43_LEDS=y
|
||||
# CONFIG_B43_DEBUG is not set
|
||||
# CONFIG_B43LEGACY is not set
|
||||
# CONFIG_HOSTAP is not set
|
||||
# CONFIG_IPW2100 is not set
|
||||
# CONFIG_IPW2200 is not set
|
||||
# CONFIG_IWLWIFI is not set
|
||||
# CONFIG_LIBERTAS is not set
|
||||
# CONFIG_HERMES is not set
|
||||
# CONFIG_P54_COMMON is not set
|
||||
# CONFIG_RT2X00 is not set
|
||||
# CONFIG_WL12XX is not set
|
||||
# CONFIG_ZD1211RW is not set
|
||||
|
||||
#
|
||||
# Enable WiMAX (Networking options) to see the WiMAX drivers
|
||||
|
@ -574,6 +666,7 @@ CONFIG_BCM63XX_ENET=y
|
|||
# CONFIG_NETCONSOLE is not set
|
||||
# CONFIG_NETPOLL is not set
|
||||
# CONFIG_NET_POLL_CONTROLLER is not set
|
||||
# CONFIG_VMXNET3 is not set
|
||||
# CONFIG_ISDN is not set
|
||||
# CONFIG_PHONE is not set
|
||||
|
||||
|
@ -607,6 +700,7 @@ CONFIG_BCM63XX_ENET=y
|
|||
CONFIG_SERIAL_CORE=y
|
||||
CONFIG_SERIAL_CORE_CONSOLE=y
|
||||
# CONFIG_SERIAL_JSM is not set
|
||||
# CONFIG_SERIAL_TIMBERDALE is not set
|
||||
CONFIG_SERIAL_BCM63XX=y
|
||||
CONFIG_SERIAL_BCM63XX_CONSOLE=y
|
||||
# CONFIG_UNIX98_PTYS is not set
|
||||
|
@ -629,6 +723,11 @@ CONFIG_LEGACY_PTY_COUNT=256
|
|||
CONFIG_DEVPORT=y
|
||||
# CONFIG_I2C is not set
|
||||
# CONFIG_SPI is not set
|
||||
|
||||
#
|
||||
# PPS support
|
||||
#
|
||||
# CONFIG_PPS is not set
|
||||
CONFIG_ARCH_REQUIRE_GPIOLIB=y
|
||||
CONFIG_GPIOLIB=y
|
||||
# CONFIG_GPIO_SYSFS is not set
|
||||
|
@ -636,6 +735,8 @@ CONFIG_GPIOLIB=y
|
|||
#
|
||||
# Memory mapped GPIO expanders:
|
||||
#
|
||||
# CONFIG_GPIO_IT8761E is not set
|
||||
# CONFIG_GPIO_SCH is not set
|
||||
|
||||
#
|
||||
# I2C GPIO expanders:
|
||||
|
@ -644,16 +745,21 @@ CONFIG_GPIOLIB=y
|
|||
#
|
||||
# PCI GPIO expanders:
|
||||
#
|
||||
# CONFIG_GPIO_CS5535 is not set
|
||||
# CONFIG_GPIO_BT8XX is not set
|
||||
# CONFIG_GPIO_LANGWELL is not set
|
||||
|
||||
#
|
||||
# SPI GPIO expanders:
|
||||
#
|
||||
|
||||
#
|
||||
# AC97 GPIO expanders:
|
||||
#
|
||||
# CONFIG_W1 is not set
|
||||
# CONFIG_POWER_SUPPLY is not set
|
||||
# CONFIG_HWMON is not set
|
||||
# CONFIG_THERMAL is not set
|
||||
# CONFIG_THERMAL_HWMON is not set
|
||||
# CONFIG_WATCHDOG is not set
|
||||
CONFIG_SSB_POSSIBLE=y
|
||||
|
||||
|
@ -662,15 +768,16 @@ CONFIG_SSB_POSSIBLE=y
|
|||
#
|
||||
CONFIG_SSB=y
|
||||
CONFIG_SSB_SPROM=y
|
||||
CONFIG_SSB_BLOCKIO=y
|
||||
CONFIG_SSB_PCIHOST_POSSIBLE=y
|
||||
CONFIG_SSB_PCIHOST=y
|
||||
# CONFIG_SSB_B43_PCI_BRIDGE is not set
|
||||
CONFIG_SSB_B43_PCI_BRIDGE=y
|
||||
CONFIG_SSB_PCMCIAHOST_POSSIBLE=y
|
||||
# CONFIG_SSB_PCMCIAHOST is not set
|
||||
# CONFIG_SSB_SILENT is not set
|
||||
# CONFIG_SSB_DEBUG is not set
|
||||
CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
|
||||
# CONFIG_SSB_DRIVER_PCICORE is not set
|
||||
CONFIG_SSB_DRIVER_PCICORE=y
|
||||
# CONFIG_SSB_DRIVER_MIPS is not set
|
||||
|
||||
#
|
||||
|
@ -680,27 +787,15 @@ CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
|
|||
# CONFIG_MFD_SM501 is not set
|
||||
# CONFIG_HTC_PASIC3 is not set
|
||||
# CONFIG_MFD_TMIO is not set
|
||||
# CONFIG_MFD_TIMBERDALE is not set
|
||||
# CONFIG_LPC_SCH is not set
|
||||
# CONFIG_REGULATOR is not set
|
||||
|
||||
#
|
||||
# Multimedia devices
|
||||
#
|
||||
|
||||
#
|
||||
# Multimedia core support
|
||||
#
|
||||
# CONFIG_VIDEO_DEV is not set
|
||||
# CONFIG_DVB_CORE is not set
|
||||
# CONFIG_VIDEO_MEDIA is not set
|
||||
|
||||
#
|
||||
# Multimedia drivers
|
||||
#
|
||||
# CONFIG_DAB is not set
|
||||
# CONFIG_MEDIA_SUPPORT is not set
|
||||
|
||||
#
|
||||
# Graphics support
|
||||
#
|
||||
# CONFIG_VGA_ARB is not set
|
||||
# CONFIG_DRM is not set
|
||||
# CONFIG_VGASTATE is not set
|
||||
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
|
||||
|
@ -710,11 +805,7 @@ CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
|
|||
#
|
||||
# Display device support
|
||||
#
|
||||
CONFIG_DISPLAY_SUPPORT=y
|
||||
|
||||
#
|
||||
# Display hardware drivers
|
||||
#
|
||||
# CONFIG_DISPLAY_SUPPORT is not set
|
||||
# CONFIG_SOUND is not set
|
||||
CONFIG_USB_SUPPORT=y
|
||||
CONFIG_USB_ARCH_HAS_HCD=y
|
||||
|
@ -741,13 +832,14 @@ CONFIG_USB=y
|
|||
# USB Host Controller Drivers
|
||||
#
|
||||
# CONFIG_USB_C67X00_HCD is not set
|
||||
# CONFIG_USB_XHCI_HCD is not set
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
|
||||
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
|
||||
CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y
|
||||
# CONFIG_USB_OXU210HP_HCD is not set
|
||||
# CONFIG_USB_ISP116X_HCD is not set
|
||||
# CONFIG_USB_ISP1760_HCD is not set
|
||||
# CONFIG_USB_ISP1362_HCD is not set
|
||||
CONFIG_USB_OHCI_HCD=y
|
||||
# CONFIG_USB_OHCI_HCD_SSB is not set
|
||||
CONFIG_USB_OHCI_BIG_ENDIAN_DESC=y
|
||||
|
@ -796,7 +888,6 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
|
|||
# CONFIG_USB_RIO500 is not set
|
||||
# CONFIG_USB_LEGOTOWER is not set
|
||||
# CONFIG_USB_LCD is not set
|
||||
# CONFIG_USB_BERRY_CHARGE is not set
|
||||
# CONFIG_USB_LED is not set
|
||||
# CONFIG_USB_CYPRESS_CY7C63 is not set
|
||||
# CONFIG_USB_CYTHERM is not set
|
||||
|
@ -807,8 +898,8 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
|
|||
# CONFIG_USB_LD is not set
|
||||
# CONFIG_USB_TRANCEVIBRATOR is not set
|
||||
# CONFIG_USB_IOWARRIOR is not set
|
||||
# CONFIG_USB_TEST is not set
|
||||
# CONFIG_USB_ISIGHTFW is not set
|
||||
# CONFIG_USB_VST is not set
|
||||
# CONFIG_USB_GADGET is not set
|
||||
|
||||
#
|
||||
|
@ -819,7 +910,29 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
|
|||
# CONFIG_UWB is not set
|
||||
# CONFIG_MMC is not set
|
||||
# CONFIG_MEMSTICK is not set
|
||||
# CONFIG_NEW_LEDS is not set
|
||||
CONFIG_NEW_LEDS=y
|
||||
CONFIG_LEDS_CLASS=y
|
||||
|
||||
#
|
||||
# LED drivers
|
||||
#
|
||||
CONFIG_LEDS_GPIO=y
|
||||
CONFIG_LEDS_GPIO_PLATFORM=y
|
||||
# CONFIG_LEDS_LT3593 is not set
|
||||
CONFIG_LEDS_TRIGGERS=y
|
||||
|
||||
#
|
||||
# LED Triggers
|
||||
#
|
||||
CONFIG_LEDS_TRIGGER_TIMER=y
|
||||
# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
|
||||
# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
|
||||
CONFIG_LEDS_TRIGGER_GPIO=y
|
||||
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
|
||||
|
||||
#
|
||||
# iptables trigger is under Netfilter config (LED target)
|
||||
#
|
||||
# CONFIG_ACCESSIBILITY is not set
|
||||
# CONFIG_INFINIBAND is not set
|
||||
CONFIG_RTC_LIB=y
|
||||
|
@ -827,6 +940,10 @@ CONFIG_RTC_LIB=y
|
|||
# CONFIG_DMADEVICES is not set
|
||||
# CONFIG_AUXDISPLAY is not set
|
||||
# CONFIG_UIO is not set
|
||||
|
||||
#
|
||||
# TI VLYNQ
|
||||
#
|
||||
# CONFIG_STAGING is not set
|
||||
|
||||
#
|
||||
|
@ -838,12 +955,16 @@ CONFIG_RTC_LIB=y
|
|||
# CONFIG_REISERFS_FS is not set
|
||||
# CONFIG_JFS_FS is not set
|
||||
# CONFIG_FS_POSIX_ACL is not set
|
||||
# CONFIG_FILE_LOCKING is not set
|
||||
# CONFIG_XFS_FS is not set
|
||||
# CONFIG_GFS2_FS is not set
|
||||
# CONFIG_OCFS2_FS is not set
|
||||
# CONFIG_BTRFS_FS is not set
|
||||
# CONFIG_NILFS2_FS is not set
|
||||
# CONFIG_FILE_LOCKING is not set
|
||||
CONFIG_FSNOTIFY=y
|
||||
# CONFIG_DNOTIFY is not set
|
||||
# CONFIG_INOTIFY is not set
|
||||
CONFIG_INOTIFY_USER=y
|
||||
# CONFIG_QUOTA is not set
|
||||
# CONFIG_AUTOFS_FS is not set
|
||||
# CONFIG_AUTOFS4_FS is not set
|
||||
|
@ -875,8 +996,6 @@ CONFIG_PROC_KCORE=y
|
|||
CONFIG_PROC_SYSCTL=y
|
||||
CONFIG_PROC_PAGE_MONITOR=y
|
||||
CONFIG_SYSFS=y
|
||||
CONFIG_TMPFS=y
|
||||
# CONFIG_TMPFS_POSIX_ACL is not set
|
||||
# CONFIG_HUGETLB_PAGE is not set
|
||||
# CONFIG_CONFIGFS_FS is not set
|
||||
CONFIG_MISC_FILESYSTEMS=y
|
||||
|
@ -888,6 +1007,7 @@ CONFIG_MISC_FILESYSTEMS=y
|
|||
# CONFIG_BFS_FS is not set
|
||||
# CONFIG_EFS_FS is not set
|
||||
# CONFIG_JFFS2_FS is not set
|
||||
# CONFIG_LOGFS is not set
|
||||
# CONFIG_CRAMFS is not set
|
||||
# CONFIG_SQUASHFS is not set
|
||||
# CONFIG_VXFS_FS is not set
|
||||
|
@ -898,7 +1018,6 @@ CONFIG_MISC_FILESYSTEMS=y
|
|||
# CONFIG_ROMFS_FS is not set
|
||||
# CONFIG_SYSV_FS is not set
|
||||
# CONFIG_UFS_FS is not set
|
||||
# CONFIG_NILFS2_FS is not set
|
||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||
|
||||
#
|
||||
|
@ -906,7 +1025,46 @@ CONFIG_MISC_FILESYSTEMS=y
|
|||
#
|
||||
# CONFIG_PARTITION_ADVANCED is not set
|
||||
CONFIG_MSDOS_PARTITION=y
|
||||
# CONFIG_NLS is not set
|
||||
CONFIG_NLS=y
|
||||
CONFIG_NLS_DEFAULT="iso8859-1"
|
||||
# CONFIG_NLS_CODEPAGE_437 is not set
|
||||
# CONFIG_NLS_CODEPAGE_737 is not set
|
||||
# CONFIG_NLS_CODEPAGE_775 is not set
|
||||
# CONFIG_NLS_CODEPAGE_850 is not set
|
||||
# CONFIG_NLS_CODEPAGE_852 is not set
|
||||
# CONFIG_NLS_CODEPAGE_855 is not set
|
||||
# CONFIG_NLS_CODEPAGE_857 is not set
|
||||
# CONFIG_NLS_CODEPAGE_860 is not set
|
||||
# CONFIG_NLS_CODEPAGE_861 is not set
|
||||
# CONFIG_NLS_CODEPAGE_862 is not set
|
||||
# CONFIG_NLS_CODEPAGE_863 is not set
|
||||
# CONFIG_NLS_CODEPAGE_864 is not set
|
||||
# CONFIG_NLS_CODEPAGE_865 is not set
|
||||
# CONFIG_NLS_CODEPAGE_866 is not set
|
||||
# CONFIG_NLS_CODEPAGE_869 is not set
|
||||
# CONFIG_NLS_CODEPAGE_936 is not set
|
||||
# CONFIG_NLS_CODEPAGE_950 is not set
|
||||
# CONFIG_NLS_CODEPAGE_932 is not set
|
||||
# CONFIG_NLS_CODEPAGE_949 is not set
|
||||
# CONFIG_NLS_CODEPAGE_874 is not set
|
||||
# CONFIG_NLS_ISO8859_8 is not set
|
||||
# CONFIG_NLS_CODEPAGE_1250 is not set
|
||||
# CONFIG_NLS_CODEPAGE_1251 is not set
|
||||
# CONFIG_NLS_ASCII is not set
|
||||
# CONFIG_NLS_ISO8859_1 is not set
|
||||
# CONFIG_NLS_ISO8859_2 is not set
|
||||
# CONFIG_NLS_ISO8859_3 is not set
|
||||
# CONFIG_NLS_ISO8859_4 is not set
|
||||
# CONFIG_NLS_ISO8859_5 is not set
|
||||
# CONFIG_NLS_ISO8859_6 is not set
|
||||
# CONFIG_NLS_ISO8859_7 is not set
|
||||
# CONFIG_NLS_ISO8859_9 is not set
|
||||
# CONFIG_NLS_ISO8859_13 is not set
|
||||
# CONFIG_NLS_ISO8859_14 is not set
|
||||
# CONFIG_NLS_ISO8859_15 is not set
|
||||
# CONFIG_NLS_KOI8_R is not set
|
||||
# CONFIG_NLS_KOI8_U is not set
|
||||
# CONFIG_NLS_UTF8 is not set
|
||||
# CONFIG_DLM is not set
|
||||
|
||||
#
|
||||
|
@ -918,29 +1076,23 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
|
|||
CONFIG_ENABLE_MUST_CHECK=y
|
||||
CONFIG_FRAME_WARN=1024
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
# CONFIG_STRIP_ASM_SYMS is not set
|
||||
# CONFIG_UNUSED_SYMBOLS is not set
|
||||
# CONFIG_DEBUG_FS is not set
|
||||
# CONFIG_HEADERS_CHECK is not set
|
||||
# CONFIG_DEBUG_KERNEL is not set
|
||||
# CONFIG_DEBUG_MEMORY_INIT is not set
|
||||
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
|
||||
CONFIG_SYSCTL_SYSCALL_CHECK=y
|
||||
CONFIG_HAVE_FUNCTION_TRACER=y
|
||||
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
|
||||
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
|
||||
CONFIG_HAVE_DYNAMIC_FTRACE=y
|
||||
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
|
||||
CONFIG_TRACING_SUPPORT=y
|
||||
|
||||
#
|
||||
# Tracers
|
||||
#
|
||||
# CONFIG_IRQSOFF_TRACER is not set
|
||||
# CONFIG_SCHED_TRACER is not set
|
||||
# CONFIG_CONTEXT_SWITCH_TRACER is not set
|
||||
# CONFIG_EVENT_TRACER is not set
|
||||
# CONFIG_BOOT_TRACER is not set
|
||||
# CONFIG_TRACE_BRANCH_PROFILING is not set
|
||||
# CONFIG_KMEMTRACE is not set
|
||||
# CONFIG_WORKQUEUE_TRACER is not set
|
||||
# CONFIG_BLK_DEV_IO_TRACE is not set
|
||||
# CONFIG_FTRACE is not set
|
||||
# CONFIG_SAMPLES is not set
|
||||
CONFIG_HAVE_ARCH_KGDB=y
|
||||
CONFIG_EARLY_PRINTK=y
|
||||
CONFIG_CMDLINE_BOOL=y
|
||||
CONFIG_CMDLINE="console=ttyS0,115200"
|
||||
# CONFIG_CMDLINE_OVERRIDE is not set
|
||||
|
@ -951,8 +1103,108 @@ CONFIG_CMDLINE="console=ttyS0,115200"
|
|||
# CONFIG_KEYS is not set
|
||||
# CONFIG_SECURITY is not set
|
||||
# CONFIG_SECURITYFS is not set
|
||||
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
|
||||
# CONFIG_CRYPTO is not set
|
||||
# CONFIG_DEFAULT_SECURITY_SELINUX is not set
|
||||
# CONFIG_DEFAULT_SECURITY_SMACK is not set
|
||||
# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
|
||||
CONFIG_DEFAULT_SECURITY_DAC=y
|
||||
CONFIG_DEFAULT_SECURITY=""
|
||||
CONFIG_CRYPTO=y
|
||||
|
||||
#
|
||||
# Crypto core or helper
|
||||
#
|
||||
# CONFIG_CRYPTO_FIPS is not set
|
||||
CONFIG_CRYPTO_ALGAPI=y
|
||||
CONFIG_CRYPTO_ALGAPI2=y
|
||||
CONFIG_CRYPTO_AEAD2=y
|
||||
CONFIG_CRYPTO_BLKCIPHER=y
|
||||
CONFIG_CRYPTO_BLKCIPHER2=y
|
||||
CONFIG_CRYPTO_HASH2=y
|
||||
CONFIG_CRYPTO_RNG=y
|
||||
CONFIG_CRYPTO_RNG2=y
|
||||
CONFIG_CRYPTO_PCOMP=y
|
||||
CONFIG_CRYPTO_MANAGER=y
|
||||
CONFIG_CRYPTO_MANAGER2=y
|
||||
# CONFIG_CRYPTO_GF128MUL is not set
|
||||
# CONFIG_CRYPTO_NULL is not set
|
||||
CONFIG_CRYPTO_WORKQUEUE=y
|
||||
# CONFIG_CRYPTO_CRYPTD is not set
|
||||
# CONFIG_CRYPTO_AUTHENC is not set
|
||||
|
||||
#
|
||||
# Authenticated Encryption with Associated Data
|
||||
#
|
||||
# CONFIG_CRYPTO_CCM is not set
|
||||
# CONFIG_CRYPTO_GCM is not set
|
||||
# CONFIG_CRYPTO_SEQIV is not set
|
||||
|
||||
#
|
||||
# Block modes
|
||||
#
|
||||
# CONFIG_CRYPTO_CBC is not set
|
||||
# CONFIG_CRYPTO_CTR is not set
|
||||
# CONFIG_CRYPTO_CTS is not set
|
||||
CONFIG_CRYPTO_ECB=y
|
||||
# CONFIG_CRYPTO_LRW is not set
|
||||
# CONFIG_CRYPTO_PCBC is not set
|
||||
# CONFIG_CRYPTO_XTS is not set
|
||||
|
||||
#
|
||||
# Hash modes
|
||||
#
|
||||
# CONFIG_CRYPTO_HMAC is not set
|
||||
# CONFIG_CRYPTO_XCBC is not set
|
||||
# CONFIG_CRYPTO_VMAC is not set
|
||||
|
||||
#
|
||||
# Digest
|
||||
#
|
||||
# CONFIG_CRYPTO_CRC32C is not set
|
||||
# CONFIG_CRYPTO_GHASH is not set
|
||||
# CONFIG_CRYPTO_MD4 is not set
|
||||
# CONFIG_CRYPTO_MD5 is not set
|
||||
# CONFIG_CRYPTO_MICHAEL_MIC is not set
|
||||
# CONFIG_CRYPTO_RMD128 is not set
|
||||
# CONFIG_CRYPTO_RMD160 is not set
|
||||
# CONFIG_CRYPTO_RMD256 is not set
|
||||
# CONFIG_CRYPTO_RMD320 is not set
|
||||
# CONFIG_CRYPTO_SHA1 is not set
|
||||
# CONFIG_CRYPTO_SHA256 is not set
|
||||
# CONFIG_CRYPTO_SHA512 is not set
|
||||
# CONFIG_CRYPTO_TGR192 is not set
|
||||
# CONFIG_CRYPTO_WP512 is not set
|
||||
|
||||
#
|
||||
# Ciphers
|
||||
#
|
||||
CONFIG_CRYPTO_AES=y
|
||||
# CONFIG_CRYPTO_ANUBIS is not set
|
||||
CONFIG_CRYPTO_ARC4=y
|
||||
# CONFIG_CRYPTO_BLOWFISH is not set
|
||||
# CONFIG_CRYPTO_CAMELLIA is not set
|
||||
# CONFIG_CRYPTO_CAST5 is not set
|
||||
# CONFIG_CRYPTO_CAST6 is not set
|
||||
# CONFIG_CRYPTO_DES is not set
|
||||
# CONFIG_CRYPTO_FCRYPT is not set
|
||||
# CONFIG_CRYPTO_KHAZAD is not set
|
||||
# CONFIG_CRYPTO_SALSA20 is not set
|
||||
# CONFIG_CRYPTO_SEED is not set
|
||||
# CONFIG_CRYPTO_SERPENT is not set
|
||||
# CONFIG_CRYPTO_TEA is not set
|
||||
# CONFIG_CRYPTO_TWOFISH is not set
|
||||
|
||||
#
|
||||
# Compression
|
||||
#
|
||||
# CONFIG_CRYPTO_DEFLATE is not set
|
||||
# CONFIG_CRYPTO_ZLIB is not set
|
||||
# CONFIG_CRYPTO_LZO is not set
|
||||
|
||||
#
|
||||
# Random Number Generation
|
||||
#
|
||||
CONFIG_CRYPTO_ANSI_CPRNG=y
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
# CONFIG_BINARY_PRINTF is not set
|
||||
|
||||
#
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
({ \
|
||||
__typeof(*(m)) __ret; \
|
||||
\
|
||||
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
|
||||
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
|
||||
__asm__ __volatile__( \
|
||||
" .set push \n" \
|
||||
" .set noat \n" \
|
||||
|
|
|
@ -307,7 +307,7 @@ extern unsigned long _loongson_addrwincfg_base;
|
|||
*/
|
||||
#define LOONGSON_ADDRWIN_CFG(s, d, w, src, dst, size) do {\
|
||||
s##_WIN##w##_BASE = (src); \
|
||||
s##_WIN##w##_MMAP = (src) | ADDRWIN_MAP_DST_##d; \
|
||||
s##_WIN##w##_MMAP = (dst) | ADDRWIN_MAP_DST_##d; \
|
||||
s##_WIN##w##_MASK = ~(size-1); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -120,9 +120,14 @@
|
|||
#endif
|
||||
#define FIRST_USER_ADDRESS 0UL
|
||||
|
||||
#define VMALLOC_START MAP_BASE
|
||||
/*
|
||||
* TLB refill handlers also map the vmalloc area into xuseg. Avoid
|
||||
* the first couple of pages so NULL pointer dereferences will still
|
||||
* reliably trap.
|
||||
*/
|
||||
#define VMALLOC_START (MAP_BASE + (2 * PAGE_SIZE))
|
||||
#define VMALLOC_END \
|
||||
(VMALLOC_START + \
|
||||
(MAP_BASE + \
|
||||
min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
|
||||
(1UL << cpu_vmbits)) - (1UL << 32))
|
||||
|
||||
|
|
|
@ -142,9 +142,9 @@ extern int ptrace_set_watch_regs(struct task_struct *child,
|
|||
|
||||
extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit);
|
||||
|
||||
extern NORET_TYPE void die(const char *, const struct pt_regs *) ATTRIB_NORET;
|
||||
extern NORET_TYPE void die(const char *, struct pt_regs *) ATTRIB_NORET;
|
||||
|
||||
static inline void die_if_kernel(const char *str, const struct pt_regs *regs)
|
||||
static inline void die_if_kernel(const char *str, struct pt_regs *regs)
|
||||
{
|
||||
if (unlikely(!user_mode(regs)))
|
||||
die(str, regs);
|
||||
|
|
|
@ -121,7 +121,7 @@
|
|||
.endm
|
||||
#else
|
||||
.macro get_saved_sp /* Uniprocessor variation */
|
||||
#ifdef CONFIG_CPU_LOONGSON2F
|
||||
#ifdef CONFIG_CPU_JUMP_WORKAROUNDS
|
||||
/*
|
||||
* Clear BTB (branch target buffer), forbid RAS (return address
|
||||
* stack) to workaround the Out-of-order Issue in Loongson2F
|
||||
|
|
|
@ -167,6 +167,24 @@ static inline void __cpuinit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
|
|||
#define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1)
|
||||
#define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3)
|
||||
|
||||
static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
|
||||
unsigned int a2, unsigned int a3)
|
||||
{
|
||||
if (a3 < 32)
|
||||
uasm_i_dsrl(p, a1, a2, a3);
|
||||
else
|
||||
uasm_i_dsrl32(p, a1, a2, a3 - 32);
|
||||
}
|
||||
|
||||
static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1,
|
||||
unsigned int a2, unsigned int a3)
|
||||
{
|
||||
if (a3 < 32)
|
||||
uasm_i_dsll(p, a1, a2, a3);
|
||||
else
|
||||
uasm_i_dsll32(p, a1, a2, a3 - 32);
|
||||
}
|
||||
|
||||
/* Handle relocations. */
|
||||
struct uasm_reloc {
|
||||
u32 *addr;
|
||||
|
|
|
@ -76,15 +76,9 @@ void __init plat_mem_setup(void)
|
|||
|
||||
#ifdef CONFIG_VT
|
||||
screen_info = (struct screen_info) {
|
||||
0, 0, /* orig-x, orig-y */
|
||||
0, /* unused */
|
||||
0, /* orig_video_page */
|
||||
0, /* orig_video_mode */
|
||||
160, /* orig_video_cols */
|
||||
0, 0, 0, /* unused, ega_bx, unused */
|
||||
64, /* orig_video_lines */
|
||||
0, /* orig_video_isVGA */
|
||||
16 /* orig_video_points */
|
||||
.orig_video_cols = 160,
|
||||
.orig_video_lines = 64,
|
||||
.orig_video_points = 16,
|
||||
};
|
||||
#endif
|
||||
|
||||
|
|
|
@ -352,9 +352,10 @@ void show_registers(const struct pt_regs *regs)
|
|||
|
||||
static DEFINE_SPINLOCK(die_lock);
|
||||
|
||||
void __noreturn die(const char * str, const struct pt_regs * regs)
|
||||
void __noreturn die(const char * str, struct pt_regs * regs)
|
||||
{
|
||||
static int die_counter;
|
||||
int sig = SIGSEGV;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned long dvpret = dvpe();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
@ -365,6 +366,10 @@ void __noreturn die(const char * str, const struct pt_regs * regs)
|
|||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
mips_mt_regdump(dvpret);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
|
||||
sig = 0;
|
||||
|
||||
printk("%s[#%d]:\n", str, ++die_counter);
|
||||
show_registers(regs);
|
||||
add_taint(TAINT_DIE);
|
||||
|
@ -379,7 +384,7 @@ void __noreturn die(const char * str, const struct pt_regs * regs)
|
|||
panic("Fatal exception");
|
||||
}
|
||||
|
||||
do_exit(SIGSEGV);
|
||||
do_exit(sig);
|
||||
}
|
||||
|
||||
extern struct exception_table_entry __start___dbe_table[];
|
||||
|
@ -1557,12 +1562,7 @@ static char panic_null_cerr[] __cpuinitdata =
|
|||
void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
|
||||
unsigned long size)
|
||||
{
|
||||
#ifdef CONFIG_32BIT
|
||||
unsigned long uncached_ebase = KSEG1ADDR(ebase);
|
||||
#endif
|
||||
#ifdef CONFIG_64BIT
|
||||
unsigned long uncached_ebase = TO_UNCAC(ebase);
|
||||
#endif
|
||||
unsigned long uncached_ebase = CKSEG1ADDR(ebase);
|
||||
|
||||
if (!addr)
|
||||
panic(panic_null_cerr);
|
||||
|
|
|
@ -24,7 +24,7 @@ static const char *system_types[] = {
|
|||
[MACH_LEMOTE_FL2F] "lemote-fuloong-2f-box",
|
||||
[MACH_LEMOTE_ML2F7] "lemote-mengloong-2f-7inches",
|
||||
[MACH_LEMOTE_YL2F89] "lemote-yeeloong-2f-8.9inches",
|
||||
[MACH_DEXXON_GDIUM2F10] "dexxon-gidum-2f-10inches",
|
||||
[MACH_DEXXON_GDIUM2F10] "dexxon-gdium-2f",
|
||||
[MACH_LEMOTE_NAS] "lemote-nas-2f",
|
||||
[MACH_LEMOTE_LL2F] "lemote-lynloong-2f",
|
||||
[MACH_LOONGSON_END] NULL,
|
||||
|
|
|
@ -75,7 +75,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|||
unsigned long end = offset + size;
|
||||
|
||||
if (__uncached_access(file, offset)) {
|
||||
if (((uca_start && offset) >= uca_start) &&
|
||||
if (uca_start && (offset >= uca_start) &&
|
||||
(end <= uca_end))
|
||||
return __pgprot((pgprot_val(vma_prot) &
|
||||
~_CACHE_MASK) |
|
||||
|
@ -96,7 +96,7 @@ static int __init find_vga_mem_init(void)
|
|||
return 0;
|
||||
|
||||
for_each_pci_dev(dev) {
|
||||
if ((dev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
|
||||
if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
|
||||
for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
|
||||
r = &dev->resource[idx];
|
||||
if (!r->start && r->end)
|
||||
|
|
|
@ -16,13 +16,31 @@
|
|||
|
||||
#include <loongson.h>
|
||||
|
||||
static inline void loongson_reboot(void)
|
||||
{
|
||||
#ifndef CONFIG_CPU_JUMP_WORKAROUNDS
|
||||
((void (*)(void))ioremap_nocache(LOONGSON_BOOT_BASE, 4)) ();
|
||||
#else
|
||||
void (*func)(void);
|
||||
|
||||
func = (void *)ioremap_nocache(LOONGSON_BOOT_BASE, 4);
|
||||
|
||||
__asm__ __volatile__(
|
||||
" .set noat \n"
|
||||
" jr %[func] \n"
|
||||
" .set at \n"
|
||||
: /* No outputs */
|
||||
: [func] "r" (func));
|
||||
#endif
|
||||
}
|
||||
|
||||
static void loongson_restart(char *command)
|
||||
{
|
||||
/* do preparation for reboot */
|
||||
mach_prepare_reboot();
|
||||
|
||||
/* reboot via jumping to boot base address */
|
||||
((void (*)(void))ioremap_nocache(LOONGSON_BOOT_BASE, 4)) ();
|
||||
loongson_reboot();
|
||||
}
|
||||
|
||||
static void loongson_poweroff(void)
|
||||
|
|
|
@ -41,15 +41,12 @@ void __init plat_mem_setup(void)
|
|||
conswitchp = &vga_con;
|
||||
|
||||
screen_info = (struct screen_info) {
|
||||
0, 25, /* orig-x, orig-y */
|
||||
0, /* unused */
|
||||
0, /* orig-video-page */
|
||||
0, /* orig-video-mode */
|
||||
80, /* orig-video-cols */
|
||||
0, 0, 0, /* ega_ax, ega_bx, ega_cx */
|
||||
25, /* orig-video-lines */
|
||||
VIDEO_TYPE_VGAC, /* orig-video-isVGA */
|
||||
16 /* orig-video-points */
|
||||
.orig_x = 0,
|
||||
.orig_y = 25,
|
||||
.orig_video_cols = 80,
|
||||
.orig_video_lines = 25,
|
||||
.orig_video_isVGA = VIDEO_TYPE_VGAC,
|
||||
.orig_video_points = 16,
|
||||
};
|
||||
#elif defined(CONFIG_DUMMY_CONSOLE)
|
||||
conswitchp = &dummy_con;
|
||||
|
|
|
@ -79,7 +79,7 @@ void mach_irq_dispatch(unsigned int pending)
|
|||
if (pending & CAUSEF_IP7)
|
||||
do_IRQ(LOONGSON_TIMER_IRQ);
|
||||
else if (pending & CAUSEF_IP6) { /* North Bridge, Perf counter */
|
||||
#ifdef CONFIG_OPROFILE
|
||||
#if defined(CONFIG_OPROFILE) || defined(CONFIG_OPROFILE_MODULE)
|
||||
do_IRQ(LOONGSON2_PERFCNT_IRQ);
|
||||
#endif
|
||||
bonito_irqdispatch();
|
||||
|
|
|
@ -31,6 +31,16 @@
|
|||
#include <asm/war.h>
|
||||
#include <asm/uasm.h>
|
||||
|
||||
/*
|
||||
* TLB load/store/modify handlers.
|
||||
*
|
||||
* Only the fastpath gets synthesized at runtime, the slowpath for
|
||||
* do_page_fault remains normal asm.
|
||||
*/
|
||||
extern void tlb_do_page_fault_0(void);
|
||||
extern void tlb_do_page_fault_1(void);
|
||||
|
||||
|
||||
static inline int r45k_bvahwbug(void)
|
||||
{
|
||||
/* XXX: We should probe for the presence of this bug, but we don't. */
|
||||
|
@ -83,6 +93,7 @@ enum label_id {
|
|||
label_nopage_tlbm,
|
||||
label_smp_pgtable_change,
|
||||
label_r3000_write_probe_fail,
|
||||
label_large_segbits_fault,
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
label_tlb_huge_update,
|
||||
#endif
|
||||
|
@ -101,6 +112,7 @@ UASM_L_LA(_nopage_tlbs)
|
|||
UASM_L_LA(_nopage_tlbm)
|
||||
UASM_L_LA(_smp_pgtable_change)
|
||||
UASM_L_LA(_r3000_write_probe_fail)
|
||||
UASM_L_LA(_large_segbits_fault)
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
UASM_L_LA(_tlb_huge_update)
|
||||
#endif
|
||||
|
@ -157,6 +169,10 @@ static u32 tlb_handler[128] __cpuinitdata;
|
|||
static struct uasm_label labels[128] __cpuinitdata;
|
||||
static struct uasm_reloc relocs[128] __cpuinitdata;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static int check_for_high_segbits __cpuinitdata;
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
|
||||
/*
|
||||
* CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
|
||||
|
@ -408,7 +424,7 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
|
|||
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
|
||||
} else {
|
||||
#ifdef CONFIG_64BIT_PHYS_ADDR
|
||||
uasm_i_dsrl(p, reg, reg, ilog2(_PAGE_GLOBAL));
|
||||
uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
|
||||
#else
|
||||
UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
|
||||
#endif
|
||||
|
@ -532,7 +548,24 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
|||
* The vmalloc handling is not in the hotpath.
|
||||
*/
|
||||
uasm_i_dmfc0(p, tmp, C0_BADVADDR);
|
||||
uasm_il_bltz(p, r, tmp, label_vmalloc);
|
||||
|
||||
if (check_for_high_segbits) {
|
||||
/*
|
||||
* The kernel currently implicitely assumes that the
|
||||
* MIPS SEGBITS parameter for the processor is
|
||||
* (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
|
||||
* allocate virtual addresses outside the maximum
|
||||
* range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
|
||||
* that doesn't prevent user code from accessing the
|
||||
* higher xuseg addresses. Here, we make sure that
|
||||
* everything but the lower xuseg addresses goes down
|
||||
* the module_alloc/vmalloc path.
|
||||
*/
|
||||
uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
|
||||
uasm_il_bnez(p, r, ptr, label_vmalloc);
|
||||
} else {
|
||||
uasm_il_bltz(p, r, tmp, label_vmalloc);
|
||||
}
|
||||
/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
|
||||
|
||||
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
|
||||
|
@ -549,14 +582,14 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
|||
* SMTC uses TCBind value as "CPU" index
|
||||
*/
|
||||
uasm_i_mfc0(p, ptr, C0_TCBIND);
|
||||
uasm_i_dsrl(p, ptr, ptr, 19);
|
||||
uasm_i_dsrl_safe(p, ptr, ptr, 19);
|
||||
# else
|
||||
/*
|
||||
* 64 bit SMP running in XKPHYS has smp_processor_id() << 3
|
||||
* stored in CONTEXT.
|
||||
*/
|
||||
uasm_i_dmfc0(p, ptr, C0_CONTEXT);
|
||||
uasm_i_dsrl(p, ptr, ptr, 23);
|
||||
uasm_i_dsrl_safe(p, ptr, ptr, 23);
|
||||
# endif
|
||||
UASM_i_LA_mostly(p, tmp, pgdc);
|
||||
uasm_i_daddu(p, ptr, ptr, tmp);
|
||||
|
@ -569,44 +602,78 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
|||
|
||||
uasm_l_vmalloc_done(l, *p);
|
||||
|
||||
if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */
|
||||
uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3);
|
||||
else
|
||||
uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32);
|
||||
/* get pgd offset in bytes */
|
||||
uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
|
||||
|
||||
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
|
||||
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
|
||||
uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
|
||||
uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
|
||||
uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
|
||||
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
|
||||
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
|
||||
#endif
|
||||
}
|
||||
|
||||
enum vmalloc64_mode {not_refill, refill};
|
||||
/*
|
||||
* BVADDR is the faulting address, PTR is scratch.
|
||||
* PTR will hold the pgd for vmalloc.
|
||||
*/
|
||||
static void __cpuinit
|
||||
build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||
unsigned int bvaddr, unsigned int ptr)
|
||||
unsigned int bvaddr, unsigned int ptr,
|
||||
enum vmalloc64_mode mode)
|
||||
{
|
||||
long swpd = (long)swapper_pg_dir;
|
||||
int single_insn_swpd;
|
||||
int did_vmalloc_branch = 0;
|
||||
|
||||
single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
|
||||
|
||||
uasm_l_vmalloc(l, *p);
|
||||
|
||||
if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
|
||||
uasm_il_b(p, r, label_vmalloc_done);
|
||||
uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
|
||||
} else {
|
||||
UASM_i_LA_mostly(p, ptr, swpd);
|
||||
uasm_il_b(p, r, label_vmalloc_done);
|
||||
if (uasm_in_compat_space_p(swpd))
|
||||
uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
|
||||
else
|
||||
uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
|
||||
if (mode == refill && check_for_high_segbits) {
|
||||
if (single_insn_swpd) {
|
||||
uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
|
||||
uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
|
||||
did_vmalloc_branch = 1;
|
||||
/* fall through */
|
||||
} else {
|
||||
uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
|
||||
}
|
||||
}
|
||||
if (!did_vmalloc_branch) {
|
||||
if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
|
||||
uasm_il_b(p, r, label_vmalloc_done);
|
||||
uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
|
||||
} else {
|
||||
UASM_i_LA_mostly(p, ptr, swpd);
|
||||
uasm_il_b(p, r, label_vmalloc_done);
|
||||
if (uasm_in_compat_space_p(swpd))
|
||||
uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
|
||||
else
|
||||
uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
|
||||
}
|
||||
}
|
||||
if (mode == refill && check_for_high_segbits) {
|
||||
uasm_l_large_segbits_fault(l, *p);
|
||||
/*
|
||||
* We get here if we are an xsseg address, or if we are
|
||||
* an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
|
||||
*
|
||||
* Ignoring xsseg (assume disabled so would generate
|
||||
* (address errors?), the only remaining possibility
|
||||
* is the upper xuseg addresses. On processors with
|
||||
* TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
|
||||
* addresses would have taken an address error. We try
|
||||
* to mimic that here by taking a load/istream page
|
||||
* fault.
|
||||
*/
|
||||
UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
|
||||
uasm_i_jr(p, ptr);
|
||||
uasm_i_nop(p);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -720,9 +787,9 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
|
|||
UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
|
||||
UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
|
||||
} else {
|
||||
uasm_i_dsrl(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
|
||||
uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
|
||||
UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
|
||||
uasm_i_dsrl(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
|
||||
uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
|
||||
}
|
||||
UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
|
||||
} else {
|
||||
|
@ -793,9 +860,9 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
|
|||
uasm_i_dmfc0(&p, K0, C0_BADVADDR);
|
||||
uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
|
||||
uasm_i_xor(&p, K0, K0, K1);
|
||||
uasm_i_dsrl32(&p, K1, K0, 62 - 32);
|
||||
uasm_i_dsrl(&p, K0, K0, 12 + 1);
|
||||
uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32);
|
||||
uasm_i_dsrl_safe(&p, K1, K0, 62);
|
||||
uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
|
||||
uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
|
||||
uasm_i_or(&p, K0, K0, K1);
|
||||
uasm_il_bnez(&p, &r, K0, label_leave);
|
||||
/* No need for uasm_i_nop */
|
||||
|
@ -825,7 +892,7 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
|
||||
build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, refill);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -934,15 +1001,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
|
|||
dump_handler((u32 *)ebase, 64);
|
||||
}
|
||||
|
||||
/*
|
||||
* TLB load/store/modify handlers.
|
||||
*
|
||||
* Only the fastpath gets synthesized at runtime, the slowpath for
|
||||
* do_page_fault remains normal asm.
|
||||
*/
|
||||
extern void tlb_do_page_fault_0(void);
|
||||
extern void tlb_do_page_fault_1(void);
|
||||
|
||||
/*
|
||||
* 128 instructions for the fastpath handler is generous and should
|
||||
* never be exceeded.
|
||||
|
@ -1302,7 +1360,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
|
|||
uasm_i_eret(p); /* return from trap */
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
build_get_pgd_vmalloc64(p, l, r, tmp, ptr);
|
||||
build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1322,9 +1380,9 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
|
|||
uasm_i_dmfc0(&p, K0, C0_BADVADDR);
|
||||
uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
|
||||
uasm_i_xor(&p, K0, K0, K1);
|
||||
uasm_i_dsrl32(&p, K1, K0, 62 - 32);
|
||||
uasm_i_dsrl(&p, K0, K0, 12 + 1);
|
||||
uasm_i_dsll32(&p, K0, K0, 64 + 12 + 1 - segbits - 32);
|
||||
uasm_i_dsrl_safe(&p, K1, K0, 62);
|
||||
uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
|
||||
uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
|
||||
uasm_i_or(&p, K0, K0, K1);
|
||||
uasm_il_bnez(&p, &r, K0, label_leave);
|
||||
/* No need for uasm_i_nop */
|
||||
|
@ -1526,6 +1584,10 @@ void __cpuinit build_tlb_refill_handler(void)
|
|||
*/
|
||||
static int run_once = 0;
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
|
||||
#endif
|
||||
|
||||
switch (current_cpu_type()) {
|
||||
case CPU_R2000:
|
||||
case CPU_R3000:
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
* Reset the PNX8550 board.
|
||||
*
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <asm/reboot.h>
|
||||
#include <glb.h>
|
||||
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/vt.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
|
||||
|
@ -254,7 +255,7 @@ static int __init sb1250_pcibios_init(void)
|
|||
* XXX ehs: Should this happen in PCI Device mode?
|
||||
*/
|
||||
io_map_base = ioremap(A_PHYS_LDTPCI_IO_MATCH_BYTES, 1024 * 1024);
|
||||
sb1250_controller.io_map_base = io_map_base;
|
||||
sb1250_controller.io_map_base = (unsigned long)io_map_base;
|
||||
set_io_port_base((unsigned long)io_map_base);
|
||||
|
||||
#ifdef CONFIG_SIBYTE_HAS_LDT
|
||||
|
|
|
@ -89,7 +89,7 @@ static void print_buserr(void)
|
|||
void ip22_be_interrupt(int irq)
|
||||
{
|
||||
const int field = 2 * sizeof(unsigned long);
|
||||
const struct pt_regs *regs = get_irq_regs();
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
|
||||
save_and_clear_buserr();
|
||||
print_buserr();
|
||||
|
|
|
@ -453,7 +453,7 @@ static int ip28_be_interrupt(const struct pt_regs *regs)
|
|||
|
||||
void ip22_be_interrupt(int irq)
|
||||
{
|
||||
const struct pt_regs *regs = get_irq_regs();
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
|
||||
count_be_interrupt++;
|
||||
|
||||
|
|
|
@ -145,15 +145,14 @@ void __init plat_mem_setup(void)
|
|||
|
||||
#ifdef CONFIG_VT
|
||||
screen_info = (struct screen_info) {
|
||||
0, 0, /* orig-x, orig-y */
|
||||
0, /* unused */
|
||||
52, /* orig_video_page */
|
||||
3, /* orig_video_mode */
|
||||
80, /* orig_video_cols */
|
||||
4626, 3, 9, /* unused, ega_bx, unused */
|
||||
25, /* orig_video_lines */
|
||||
0x22, /* orig_video_isVGA */
|
||||
16 /* orig_video_points */
|
||||
.orig_video_page = 52,
|
||||
.orig_video_mode = 3,
|
||||
.orig_video_cols = 80,
|
||||
.flags = 12,
|
||||
.orig_video_ega_bx = 3,
|
||||
.orig_video_lines = 25,
|
||||
.orig_video_isVGA = 0x22,
|
||||
.orig_video_points = 16,
|
||||
};
|
||||
/* XXXKW for CFE, get lines/cols from environment */
|
||||
#endif
|
||||
|
|
|
@ -35,6 +35,9 @@ struct cpu_hw_events {
|
|||
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
|
||||
unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
|
||||
unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
|
||||
|
||||
unsigned int group_flag;
|
||||
int n_txn_start;
|
||||
};
|
||||
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
|
||||
|
||||
|
@ -718,66 +721,6 @@ static int collect_events(struct perf_event *group, int max_count,
|
|||
return n;
|
||||
}
|
||||
|
||||
static void event_sched_in(struct perf_event *event)
|
||||
{
|
||||
event->state = PERF_EVENT_STATE_ACTIVE;
|
||||
event->oncpu = smp_processor_id();
|
||||
event->tstamp_running += event->ctx->time - event->tstamp_stopped;
|
||||
if (is_software_event(event))
|
||||
event->pmu->enable(event);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called to enable a whole group of events.
|
||||
* Returns 1 if the group was enabled, or -EAGAIN if it could not be.
|
||||
* Assumes the caller has disabled interrupts and has
|
||||
* frozen the PMU with hw_perf_save_disable.
|
||||
*/
|
||||
int hw_perf_group_sched_in(struct perf_event *group_leader,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *ctx)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw;
|
||||
long i, n, n0;
|
||||
struct perf_event *sub;
|
||||
|
||||
if (!ppmu)
|
||||
return 0;
|
||||
cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
n0 = cpuhw->n_events;
|
||||
n = collect_events(group_leader, ppmu->n_counter - n0,
|
||||
&cpuhw->event[n0], &cpuhw->events[n0],
|
||||
&cpuhw->flags[n0]);
|
||||
if (n < 0)
|
||||
return -EAGAIN;
|
||||
if (check_excludes(cpuhw->event, cpuhw->flags, n0, n))
|
||||
return -EAGAIN;
|
||||
i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
|
||||
if (i < 0)
|
||||
return -EAGAIN;
|
||||
cpuhw->n_events = n0 + n;
|
||||
cpuhw->n_added += n;
|
||||
|
||||
/*
|
||||
* OK, this group can go on; update event states etc.,
|
||||
* and enable any software events
|
||||
*/
|
||||
for (i = n0; i < n0 + n; ++i)
|
||||
cpuhw->event[i]->hw.config = cpuhw->events[i];
|
||||
cpuctx->active_oncpu += n;
|
||||
n = 1;
|
||||
event_sched_in(group_leader);
|
||||
list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
|
||||
if (sub->state != PERF_EVENT_STATE_OFF) {
|
||||
event_sched_in(sub);
|
||||
++n;
|
||||
}
|
||||
}
|
||||
ctx->nr_active += n;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a event to the PMU.
|
||||
* If all events are not already frozen, then we disable and
|
||||
|
@ -805,12 +748,22 @@ static int power_pmu_enable(struct perf_event *event)
|
|||
cpuhw->event[n0] = event;
|
||||
cpuhw->events[n0] = event->hw.config;
|
||||
cpuhw->flags[n0] = event->hw.event_base;
|
||||
|
||||
/*
|
||||
* If group events scheduling transaction was started,
|
||||
* skip the schedulability test here, it will be peformed
|
||||
* at commit time(->commit_txn) as a whole
|
||||
*/
|
||||
if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED)
|
||||
goto nocheck;
|
||||
|
||||
if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
|
||||
goto out;
|
||||
if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
|
||||
goto out;
|
||||
|
||||
event->hw.config = cpuhw->events[n0];
|
||||
|
||||
nocheck:
|
||||
++cpuhw->n_events;
|
||||
++cpuhw->n_added;
|
||||
|
||||
|
@ -896,11 +849,65 @@ static void power_pmu_unthrottle(struct perf_event *event)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Start group events scheduling transaction
|
||||
* Set the flag to make pmu::enable() not perform the
|
||||
* schedulability test, it will be performed at commit time
|
||||
*/
|
||||
void power_pmu_start_txn(const struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
|
||||
cpuhw->n_txn_start = cpuhw->n_events;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop group events scheduling transaction
|
||||
* Clear the flag and pmu::enable() will perform the
|
||||
* schedulability test.
|
||||
*/
|
||||
void power_pmu_cancel_txn(const struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Commit group events scheduling transaction
|
||||
* Perform the group schedulability test as a whole
|
||||
* Return 0 if success
|
||||
*/
|
||||
int power_pmu_commit_txn(const struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw;
|
||||
long i, n;
|
||||
|
||||
if (!ppmu)
|
||||
return -EAGAIN;
|
||||
cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
n = cpuhw->n_events;
|
||||
if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
|
||||
return -EAGAIN;
|
||||
i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
|
||||
if (i < 0)
|
||||
return -EAGAIN;
|
||||
|
||||
for (i = cpuhw->n_txn_start; i < n; ++i)
|
||||
cpuhw->event[i]->hw.config = cpuhw->events[i];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct pmu power_pmu = {
|
||||
.enable = power_pmu_enable,
|
||||
.disable = power_pmu_disable,
|
||||
.read = power_pmu_read,
|
||||
.unthrottle = power_pmu_unthrottle,
|
||||
.start_txn = power_pmu_start_txn,
|
||||
.cancel_txn = power_pmu_cancel_txn,
|
||||
.commit_txn = power_pmu_commit_txn,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -44,6 +44,7 @@ config SUPERH32
|
|||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_HW_BREAKPOINT
|
||||
select HAVE_MIXED_BREAKPOINTS_REGS
|
||||
select PERF_EVENTS if HAVE_HW_BREAKPOINT
|
||||
select ARCH_HIBERNATION_POSSIBLE if MMU
|
||||
|
||||
|
|
|
@ -46,10 +46,14 @@ struct pmu;
|
|||
/* Maximum number of UBC channels */
|
||||
#define HBP_NUM 2
|
||||
|
||||
static inline int hw_breakpoint_slots(int type)
|
||||
{
|
||||
return HBP_NUM;
|
||||
}
|
||||
|
||||
/* arch/sh/kernel/hw_breakpoint.c */
|
||||
extern int arch_check_va_in_userspace(unsigned long va, u16 hbp_len);
|
||||
extern int arch_validate_hwbkpt_settings(struct perf_event *bp,
|
||||
struct task_struct *tsk);
|
||||
extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
|
||||
extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
|
||||
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
|
||||
unsigned long val, void *data);
|
||||
|
||||
|
|
|
@ -119,26 +119,17 @@ static int get_hbp_len(u16 hbp_len)
|
|||
return len_in_bytes;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for virtual address in user space.
|
||||
*/
|
||||
int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
|
||||
{
|
||||
unsigned int len;
|
||||
|
||||
len = get_hbp_len(hbp_len);
|
||||
|
||||
return (va <= TASK_SIZE - len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for virtual address in kernel space.
|
||||
*/
|
||||
static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
|
||||
int arch_check_bp_in_kernelspace(struct perf_event *bp)
|
||||
{
|
||||
unsigned int len;
|
||||
unsigned long va;
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
|
||||
len = get_hbp_len(hbp_len);
|
||||
va = info->address;
|
||||
len = get_hbp_len(info->len);
|
||||
|
||||
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
|
||||
}
|
||||
|
@ -226,8 +217,7 @@ static int arch_build_bp_info(struct perf_event *bp)
|
|||
/*
|
||||
* Validate the arch-specific HW Breakpoint register settings
|
||||
*/
|
||||
int arch_validate_hwbkpt_settings(struct perf_event *bp,
|
||||
struct task_struct *tsk)
|
||||
int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||
{
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
unsigned int align;
|
||||
|
@ -270,15 +260,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
|
|||
if (info->address & align)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check that the virtual address is in the proper range */
|
||||
if (tsk) {
|
||||
if (!arch_check_va_in_userspace(info->address, info->len))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
if (!arch_check_va_in_kernelspace(info->address, info->len))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -363,8 +344,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
|
|||
perf_bp_event(bp, args->regs);
|
||||
|
||||
/* Deliver the signal to userspace */
|
||||
if (arch_check_va_in_userspace(bp->attr.bp_addr,
|
||||
bp->attr.bp_len)) {
|
||||
if (!arch_check_bp_in_kernelspace(bp)) {
|
||||
siginfo_t info;
|
||||
|
||||
info.si_signo = args->signr;
|
||||
|
|
|
@ -85,7 +85,7 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr)
|
|||
|
||||
bp = thread->ptrace_bps[0];
|
||||
if (!bp) {
|
||||
hw_breakpoint_init(&attr);
|
||||
ptrace_breakpoint_init(&attr);
|
||||
|
||||
attr.bp_addr = addr;
|
||||
attr.bp_len = HW_BREAKPOINT_LEN_2;
|
||||
|
|
|
@ -53,11 +53,15 @@ config X86
|
|||
select HAVE_KERNEL_LZMA
|
||||
select HAVE_KERNEL_LZO
|
||||
select HAVE_HW_BREAKPOINT
|
||||
select HAVE_MIXED_BREAKPOINTS_REGS
|
||||
select PERF_EVENTS
|
||||
select ANON_INODES
|
||||
select HAVE_ARCH_KMEMCHECK
|
||||
select HAVE_USER_RETURN_NOTIFIER
|
||||
|
||||
config INSTRUCTION_DECODER
|
||||
def_bool (KPROBES || PERF_EVENTS)
|
||||
|
||||
config OUTPUT_FORMAT
|
||||
string
|
||||
default "elf32-i386" if X86_32
|
||||
|
|
|
@ -502,23 +502,3 @@ config CPU_SUP_UMC_32
|
|||
CPU might render the kernel unbootable.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config X86_DS
|
||||
def_bool X86_PTRACE_BTS
|
||||
depends on X86_DEBUGCTLMSR
|
||||
select HAVE_HW_BRANCH_TRACER
|
||||
|
||||
config X86_PTRACE_BTS
|
||||
bool "Branch Trace Store"
|
||||
default y
|
||||
depends on X86_DEBUGCTLMSR
|
||||
depends on BROKEN
|
||||
---help---
|
||||
This adds a ptrace interface to the hardware's branch trace store.
|
||||
|
||||
Debuggers may use it to collect an execution trace of the debugged
|
||||
application in order to answer the question 'how did I get here?'.
|
||||
Debuggers may trace user mode as well as kernel mode.
|
||||
|
||||
Say Y unless there is no application development on this machine
|
||||
and you want to save a small amount of code size.
|
||||
|
|
|
@ -174,15 +174,6 @@ config IOMMU_LEAK
|
|||
Add a simple leak tracer to the IOMMU code. This is useful when you
|
||||
are debugging a buggy device driver that leaks IOMMU mappings.
|
||||
|
||||
config X86_DS_SELFTEST
|
||||
bool "DS selftest"
|
||||
default y
|
||||
depends on DEBUG_KERNEL
|
||||
depends on X86_DS
|
||||
---help---
|
||||
Perform Debug Store selftests at boot time.
|
||||
If in doubt, say "N".
|
||||
|
||||
config HAVE_MMIOTRACE_SUPPORT
|
||||
def_bool y
|
||||
|
||||
|
|
|
@ -373,6 +373,7 @@ extern atomic_t init_deasserted;
|
|||
extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
static inline u32 apic_read(u32 reg)
|
||||
{
|
||||
return apic->read(reg);
|
||||
|
@ -403,10 +404,19 @@ static inline u32 safe_apic_wait_icr_idle(void)
|
|||
return apic->safe_wait_icr_idle();
|
||||
}
|
||||
|
||||
#else /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
static inline u32 apic_read(u32 reg) { return 0; }
|
||||
static inline void apic_write(u32 reg, u32 val) { }
|
||||
static inline u64 apic_icr_read(void) { return 0; }
|
||||
static inline void apic_icr_write(u32 low, u32 high) { }
|
||||
static inline void apic_wait_icr_idle(void) { }
|
||||
static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
|
||||
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
static inline void ack_APIC_irq(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
/*
|
||||
* ack_APIC_irq() actually gets compiled as a single instruction
|
||||
* ... yummie.
|
||||
|
@ -414,7 +424,6 @@ static inline void ack_APIC_irq(void)
|
|||
|
||||
/* Docs say use 0 for future compatibility */
|
||||
apic_write(APIC_EOI, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned default_get_apic_id(unsigned long x)
|
||||
|
|
|
@ -1,302 +0,0 @@
|
|||
/*
|
||||
* Debug Store (DS) support
|
||||
*
|
||||
* This provides a low-level interface to the hardware's Debug Store
|
||||
* feature that is used for branch trace store (BTS) and
|
||||
* precise-event based sampling (PEBS).
|
||||
*
|
||||
* It manages:
|
||||
* - DS and BTS hardware configuration
|
||||
* - buffer overflow handling (to be done)
|
||||
* - buffer access
|
||||
*
|
||||
* It does not do:
|
||||
* - security checking (is the caller allowed to trace the task)
|
||||
* - buffer allocation (memory accounting)
|
||||
*
|
||||
*
|
||||
* Copyright (C) 2007-2009 Intel Corporation.
|
||||
* Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
|
||||
*/
|
||||
|
||||
#ifndef _ASM_X86_DS_H
|
||||
#define _ASM_X86_DS_H
|
||||
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_DS
|
||||
|
||||
struct task_struct;
|
||||
struct ds_context;
|
||||
struct ds_tracer;
|
||||
struct bts_tracer;
|
||||
struct pebs_tracer;
|
||||
|
||||
typedef void (*bts_ovfl_callback_t)(struct bts_tracer *);
|
||||
typedef void (*pebs_ovfl_callback_t)(struct pebs_tracer *);
|
||||
|
||||
|
||||
/*
|
||||
* A list of features plus corresponding macros to talk about them in
|
||||
* the ds_request function's flags parameter.
|
||||
*
|
||||
* We use the enum to index an array of corresponding control bits;
|
||||
* we use the macro to index a flags bit-vector.
|
||||
*/
|
||||
enum ds_feature {
|
||||
dsf_bts = 0,
|
||||
dsf_bts_kernel,
|
||||
#define BTS_KERNEL (1 << dsf_bts_kernel)
|
||||
/* trace kernel-mode branches */
|
||||
|
||||
dsf_bts_user,
|
||||
#define BTS_USER (1 << dsf_bts_user)
|
||||
/* trace user-mode branches */
|
||||
|
||||
dsf_bts_overflow,
|
||||
dsf_bts_max,
|
||||
dsf_pebs = dsf_bts_max,
|
||||
|
||||
dsf_pebs_max,
|
||||
dsf_ctl_max = dsf_pebs_max,
|
||||
dsf_bts_timestamps = dsf_ctl_max,
|
||||
#define BTS_TIMESTAMPS (1 << dsf_bts_timestamps)
|
||||
/* add timestamps into BTS trace */
|
||||
|
||||
#define BTS_USER_FLAGS (BTS_KERNEL | BTS_USER | BTS_TIMESTAMPS)
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Request BTS or PEBS
|
||||
*
|
||||
* Due to alignement constraints, the actual buffer may be slightly
|
||||
* smaller than the requested or provided buffer.
|
||||
*
|
||||
* Returns a pointer to a tracer structure on success, or
|
||||
* ERR_PTR(errcode) on failure.
|
||||
*
|
||||
* The interrupt threshold is independent from the overflow callback
|
||||
* to allow users to use their own overflow interrupt handling mechanism.
|
||||
*
|
||||
* The function might sleep.
|
||||
*
|
||||
* task: the task to request recording for
|
||||
* cpu: the cpu to request recording for
|
||||
* base: the base pointer for the (non-pageable) buffer;
|
||||
* size: the size of the provided buffer in bytes
|
||||
* ovfl: pointer to a function to be called on buffer overflow;
|
||||
* NULL if cyclic buffer requested
|
||||
* th: the interrupt threshold in records from the end of the buffer;
|
||||
* -1 if no interrupt threshold is requested.
|
||||
* flags: a bit-mask of the above flags
|
||||
*/
|
||||
extern struct bts_tracer *ds_request_bts_task(struct task_struct *task,
|
||||
void *base, size_t size,
|
||||
bts_ovfl_callback_t ovfl,
|
||||
size_t th, unsigned int flags);
|
||||
extern struct bts_tracer *ds_request_bts_cpu(int cpu, void *base, size_t size,
|
||||
bts_ovfl_callback_t ovfl,
|
||||
size_t th, unsigned int flags);
|
||||
extern struct pebs_tracer *ds_request_pebs_task(struct task_struct *task,
|
||||
void *base, size_t size,
|
||||
pebs_ovfl_callback_t ovfl,
|
||||
size_t th, unsigned int flags);
|
||||
extern struct pebs_tracer *ds_request_pebs_cpu(int cpu,
|
||||
void *base, size_t size,
|
||||
pebs_ovfl_callback_t ovfl,
|
||||
size_t th, unsigned int flags);
|
||||
|
||||
/*
|
||||
* Release BTS or PEBS resources
|
||||
* Suspend and resume BTS or PEBS tracing
|
||||
*
|
||||
* Must be called with irq's enabled.
|
||||
*
|
||||
* tracer: the tracer handle returned from ds_request_~()
|
||||
*/
|
||||
extern void ds_release_bts(struct bts_tracer *tracer);
|
||||
extern void ds_suspend_bts(struct bts_tracer *tracer);
|
||||
extern void ds_resume_bts(struct bts_tracer *tracer);
|
||||
extern void ds_release_pebs(struct pebs_tracer *tracer);
|
||||
extern void ds_suspend_pebs(struct pebs_tracer *tracer);
|
||||
extern void ds_resume_pebs(struct pebs_tracer *tracer);
|
||||
|
||||
/*
|
||||
* Release BTS or PEBS resources
|
||||
* Suspend and resume BTS or PEBS tracing
|
||||
*
|
||||
* Cpu tracers must call this on the traced cpu.
|
||||
* Task tracers must call ds_release_~_noirq() for themselves.
|
||||
*
|
||||
* May be called with irq's disabled.
|
||||
*
|
||||
* Returns 0 if successful;
|
||||
* -EPERM if the cpu tracer does not trace the current cpu.
|
||||
* -EPERM if the task tracer does not trace itself.
|
||||
*
|
||||
* tracer: the tracer handle returned from ds_request_~()
|
||||
*/
|
||||
extern int ds_release_bts_noirq(struct bts_tracer *tracer);
|
||||
extern int ds_suspend_bts_noirq(struct bts_tracer *tracer);
|
||||
extern int ds_resume_bts_noirq(struct bts_tracer *tracer);
|
||||
extern int ds_release_pebs_noirq(struct pebs_tracer *tracer);
|
||||
extern int ds_suspend_pebs_noirq(struct pebs_tracer *tracer);
|
||||
extern int ds_resume_pebs_noirq(struct pebs_tracer *tracer);
|
||||
|
||||
|
||||
/*
|
||||
* The raw DS buffer state as it is used for BTS and PEBS recording.
|
||||
*
|
||||
* This is the low-level, arch-dependent interface for working
|
||||
* directly on the raw trace data.
|
||||
*/
|
||||
struct ds_trace {
|
||||
/* the number of bts/pebs records */
|
||||
size_t n;
|
||||
/* the size of a bts/pebs record in bytes */
|
||||
size_t size;
|
||||
/* pointers into the raw buffer:
|
||||
- to the first entry */
|
||||
void *begin;
|
||||
/* - one beyond the last entry */
|
||||
void *end;
|
||||
/* - one beyond the newest entry */
|
||||
void *top;
|
||||
/* - the interrupt threshold */
|
||||
void *ith;
|
||||
/* flags given on ds_request() */
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* An arch-independent view on branch trace data.
|
||||
*/
|
||||
enum bts_qualifier {
|
||||
bts_invalid,
|
||||
#define BTS_INVALID bts_invalid
|
||||
|
||||
bts_branch,
|
||||
#define BTS_BRANCH bts_branch
|
||||
|
||||
bts_task_arrives,
|
||||
#define BTS_TASK_ARRIVES bts_task_arrives
|
||||
|
||||
bts_task_departs,
|
||||
#define BTS_TASK_DEPARTS bts_task_departs
|
||||
|
||||
bts_qual_bit_size = 4,
|
||||
bts_qual_max = (1 << bts_qual_bit_size),
|
||||
};
|
||||
|
||||
struct bts_struct {
|
||||
__u64 qualifier;
|
||||
union {
|
||||
/* BTS_BRANCH */
|
||||
struct {
|
||||
__u64 from;
|
||||
__u64 to;
|
||||
} lbr;
|
||||
/* BTS_TASK_ARRIVES or BTS_TASK_DEPARTS */
|
||||
struct {
|
||||
__u64 clock;
|
||||
pid_t pid;
|
||||
} event;
|
||||
} variant;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* The BTS state.
|
||||
*
|
||||
* This gives access to the raw DS state and adds functions to provide
|
||||
* an arch-independent view of the BTS data.
|
||||
*/
|
||||
struct bts_trace {
|
||||
struct ds_trace ds;
|
||||
|
||||
int (*read)(struct bts_tracer *tracer, const void *at,
|
||||
struct bts_struct *out);
|
||||
int (*write)(struct bts_tracer *tracer, const struct bts_struct *in);
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* The PEBS state.
|
||||
*
|
||||
* This gives access to the raw DS state and the PEBS-specific counter
|
||||
* reset value.
|
||||
*/
|
||||
struct pebs_trace {
|
||||
struct ds_trace ds;
|
||||
|
||||
/* the number of valid counters in the below array */
|
||||
unsigned int counters;
|
||||
|
||||
#define MAX_PEBS_COUNTERS 4
|
||||
/* the counter reset value */
|
||||
unsigned long long counter_reset[MAX_PEBS_COUNTERS];
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Read the BTS or PEBS trace.
|
||||
*
|
||||
* Returns a view on the trace collected for the parameter tracer.
|
||||
*
|
||||
* The view remains valid as long as the traced task is not running or
|
||||
* the tracer is suspended.
|
||||
* Writes into the trace buffer are not reflected.
|
||||
*
|
||||
* tracer: the tracer handle returned from ds_request_~()
|
||||
*/
|
||||
extern const struct bts_trace *ds_read_bts(struct bts_tracer *tracer);
|
||||
extern const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer);
|
||||
|
||||
|
||||
/*
|
||||
* Reset the write pointer of the BTS/PEBS buffer.
|
||||
*
|
||||
* Returns 0 on success; -Eerrno on error
|
||||
*
|
||||
* tracer: the tracer handle returned from ds_request_~()
|
||||
*/
|
||||
extern int ds_reset_bts(struct bts_tracer *tracer);
|
||||
extern int ds_reset_pebs(struct pebs_tracer *tracer);
|
||||
|
||||
/*
|
||||
* Set the PEBS counter reset value.
|
||||
*
|
||||
* Returns 0 on success; -Eerrno on error
|
||||
*
|
||||
* tracer: the tracer handle returned from ds_request_pebs()
|
||||
* counter: the index of the counter
|
||||
* value: the new counter reset value
|
||||
*/
|
||||
extern int ds_set_pebs_reset(struct pebs_tracer *tracer,
|
||||
unsigned int counter, u64 value);
|
||||
|
||||
/*
|
||||
* Initialization
|
||||
*/
|
||||
struct cpuinfo_x86;
|
||||
extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
|
||||
|
||||
/*
|
||||
* Context switch work
|
||||
*/
|
||||
extern void ds_switch_to(struct task_struct *prev, struct task_struct *next);
|
||||
|
||||
#else /* CONFIG_X86_DS */
|
||||
|
||||
struct cpuinfo_x86;
|
||||
static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {}
|
||||
static inline void ds_switch_to(struct task_struct *prev,
|
||||
struct task_struct *next) {}
|
||||
|
||||
#endif /* CONFIG_X86_DS */
|
||||
#endif /* _ASM_X86_DS_H */
|
|
@ -41,12 +41,16 @@ struct arch_hw_breakpoint {
|
|||
/* Total number of available HW breakpoint registers */
|
||||
#define HBP_NUM 4
|
||||
|
||||
static inline int hw_breakpoint_slots(int type)
|
||||
{
|
||||
return HBP_NUM;
|
||||
}
|
||||
|
||||
struct perf_event;
|
||||
struct pmu;
|
||||
|
||||
extern int arch_check_va_in_userspace(unsigned long va, u8 hbp_len);
|
||||
extern int arch_validate_hwbkpt_settings(struct perf_event *bp,
|
||||
struct task_struct *tsk);
|
||||
extern int arch_check_bp_in_kernelspace(struct perf_event *bp);
|
||||
extern int arch_validate_hwbkpt_settings(struct perf_event *bp);
|
||||
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
|
||||
unsigned long val, void *data);
|
||||
|
||||
|
|
|
@ -68,6 +68,8 @@ struct insn {
|
|||
const insn_byte_t *next_byte;
|
||||
};
|
||||
|
||||
#define MAX_INSN_SIZE 16
|
||||
|
||||
#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
|
||||
#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
|
||||
#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
|
||||
|
|
|
@ -347,6 +347,7 @@ extern void __iomem *early_ioremap(resource_size_t phys_addr,
|
|||
extern void __iomem *early_memremap(resource_size_t phys_addr,
|
||||
unsigned long size);
|
||||
extern void early_iounmap(void __iomem *addr, unsigned long size);
|
||||
extern void fixup_early_ioremap(void);
|
||||
|
||||
#define IO_SPACE_LIMIT 0xffff
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
||||
|
||||
|
@ -36,7 +37,6 @@ typedef u8 kprobe_opcode_t;
|
|||
#define RELATIVEJUMP_SIZE 5
|
||||
#define RELATIVECALL_OPCODE 0xe8
|
||||
#define RELATIVE_ADDR_SIZE 4
|
||||
#define MAX_INSN_SIZE 16
|
||||
#define MAX_STACK_SIZE 64
|
||||
#define MIN_STACK_SIZE(ADDR) \
|
||||
(((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
|
||||
|
|
|
@ -71,11 +71,14 @@
|
|||
#define MSR_IA32_LASTINTTOIP 0x000001de
|
||||
|
||||
/* DEBUGCTLMSR bits (others vary by model): */
|
||||
#define _DEBUGCTLMSR_LBR 0 /* last branch recording */
|
||||
#define _DEBUGCTLMSR_BTF 1 /* single-step on branches */
|
||||
|
||||
#define DEBUGCTLMSR_LBR (1UL << _DEBUGCTLMSR_LBR)
|
||||
#define DEBUGCTLMSR_BTF (1UL << _DEBUGCTLMSR_BTF)
|
||||
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
|
||||
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
|
||||
#define DEBUGCTLMSR_TR (1UL << 6)
|
||||
#define DEBUGCTLMSR_BTS (1UL << 7)
|
||||
#define DEBUGCTLMSR_BTINT (1UL << 8)
|
||||
#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
|
||||
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
|
||||
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
|
||||
|
||||
#define MSR_IA32_MC0_CTL 0x00000400
|
||||
#define MSR_IA32_MC0_STATUS 0x00000401
|
||||
|
@ -359,6 +362,8 @@
|
|||
#define MSR_P4_U2L_ESCR0 0x000003b0
|
||||
#define MSR_P4_U2L_ESCR1 0x000003b1
|
||||
|
||||
#define MSR_P4_PEBS_MATRIX_VERT 0x000003f2
|
||||
|
||||
/* Intel Core-based CPU performance counters */
|
||||
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
|
||||
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* Performance event hw details:
|
||||
*/
|
||||
|
||||
#define X86_PMC_MAX_GENERIC 8
|
||||
#define X86_PMC_MAX_GENERIC 32
|
||||
#define X86_PMC_MAX_FIXED 3
|
||||
|
||||
#define X86_PMC_IDX_GENERIC 0
|
||||
|
@ -18,39 +18,31 @@
|
|||
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
|
||||
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
|
||||
|
||||
#define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22)
|
||||
#define ARCH_PERFMON_EVENTSEL_ANY (1 << 21)
|
||||
#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
|
||||
#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
|
||||
#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
|
||||
#define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
|
||||
#define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
|
||||
#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
|
||||
#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
|
||||
#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
|
||||
#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
|
||||
#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
|
||||
#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
|
||||
#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
|
||||
#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
|
||||
|
||||
/*
|
||||
* Includes eventsel and unit mask as well:
|
||||
*/
|
||||
#define AMD64_EVENTSEL_EVENT \
|
||||
(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
|
||||
#define INTEL_ARCH_EVENT_MASK \
|
||||
(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
|
||||
|
||||
|
||||
#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL
|
||||
#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL
|
||||
#define INTEL_ARCH_EDGE_MASK 0x00040000ULL
|
||||
#define INTEL_ARCH_INV_MASK 0x00800000ULL
|
||||
#define INTEL_ARCH_CNT_MASK 0xFF000000ULL
|
||||
#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
|
||||
|
||||
/*
|
||||
* filter mask to validate fixed counter events.
|
||||
* the following filters disqualify for fixed counters:
|
||||
* - inv
|
||||
* - edge
|
||||
* - cnt-mask
|
||||
* The other filters are supported by fixed counters.
|
||||
* The any-thread option is supported starting with v3.
|
||||
*/
|
||||
#define INTEL_ARCH_FIXED_MASK \
|
||||
(INTEL_ARCH_CNT_MASK| \
|
||||
INTEL_ARCH_INV_MASK| \
|
||||
INTEL_ARCH_EDGE_MASK|\
|
||||
INTEL_ARCH_UNIT_MASK|\
|
||||
INTEL_ARCH_EVENT_MASK)
|
||||
#define X86_RAW_EVENT_MASK \
|
||||
(ARCH_PERFMON_EVENTSEL_EVENT | \
|
||||
ARCH_PERFMON_EVENTSEL_UMASK | \
|
||||
ARCH_PERFMON_EVENTSEL_EDGE | \
|
||||
ARCH_PERFMON_EVENTSEL_INV | \
|
||||
ARCH_PERFMON_EVENTSEL_CMASK)
|
||||
#define AMD64_RAW_EVENT_MASK \
|
||||
(X86_RAW_EVENT_MASK | \
|
||||
AMD64_EVENTSEL_EVENT)
|
||||
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
|
||||
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
|
||||
|
@ -67,7 +59,7 @@
|
|||
union cpuid10_eax {
|
||||
struct {
|
||||
unsigned int version_id:8;
|
||||
unsigned int num_events:8;
|
||||
unsigned int num_counters:8;
|
||||
unsigned int bit_width:8;
|
||||
unsigned int mask_length:8;
|
||||
} split;
|
||||
|
@ -76,7 +68,7 @@ union cpuid10_eax {
|
|||
|
||||
union cpuid10_edx {
|
||||
struct {
|
||||
unsigned int num_events_fixed:4;
|
||||
unsigned int num_counters_fixed:4;
|
||||
unsigned int reserved:28;
|
||||
} split;
|
||||
unsigned int full;
|
||||
|
@ -136,6 +128,18 @@ extern void perf_events_lapic_init(void);
|
|||
|
||||
#define PERF_EVENT_INDEX_OFFSET 0
|
||||
|
||||
/*
|
||||
* Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
|
||||
* This flag is otherwise unused and ABI specified to be 0, so nobody should
|
||||
* care what we do with it.
|
||||
*/
|
||||
#define PERF_EFLAGS_EXACT (1UL << 3)
|
||||
|
||||
struct pt_regs;
|
||||
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
|
||||
extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
||||
#define perf_misc_flags(regs) perf_misc_flags(regs)
|
||||
|
||||
#else
|
||||
static inline void init_hw_perf_events(void) { }
|
||||
static inline void perf_events_lapic_init(void) { }
|
||||
|
|
794
arch/x86/include/asm/perf_event_p4.h
Normal file
794
arch/x86/include/asm/perf_event_p4.h
Normal file
|
@ -0,0 +1,794 @@
|
|||
/*
|
||||
* Netburst Perfomance Events (P4, old Xeon)
|
||||
*/
|
||||
|
||||
#ifndef PERF_EVENT_P4_H
|
||||
#define PERF_EVENT_P4_H
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
/*
|
||||
* NetBurst has perfomance MSRs shared between
|
||||
* threads if HT is turned on, ie for both logical
|
||||
* processors (mem: in turn in Atom with HT support
|
||||
* perf-MSRs are not shared and every thread has its
|
||||
* own perf-MSRs set)
|
||||
*/
|
||||
#define ARCH_P4_TOTAL_ESCR (46)
|
||||
#define ARCH_P4_RESERVED_ESCR (2) /* IQ_ESCR(0,1) not always present */
|
||||
#define ARCH_P4_MAX_ESCR (ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
|
||||
#define ARCH_P4_MAX_CCCR (18)
|
||||
#define ARCH_P4_MAX_COUNTER (ARCH_P4_MAX_CCCR / 2)
|
||||
|
||||
#define P4_ESCR_EVENT_MASK 0x7e000000U
|
||||
#define P4_ESCR_EVENT_SHIFT 25
|
||||
#define P4_ESCR_EVENTMASK_MASK 0x01fffe00U
|
||||
#define P4_ESCR_EVENTMASK_SHIFT 9
|
||||
#define P4_ESCR_TAG_MASK 0x000001e0U
|
||||
#define P4_ESCR_TAG_SHIFT 5
|
||||
#define P4_ESCR_TAG_ENABLE 0x00000010U
|
||||
#define P4_ESCR_T0_OS 0x00000008U
|
||||
#define P4_ESCR_T0_USR 0x00000004U
|
||||
#define P4_ESCR_T1_OS 0x00000002U
|
||||
#define P4_ESCR_T1_USR 0x00000001U
|
||||
|
||||
#define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT)
|
||||
#define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT)
|
||||
#define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT)
|
||||
|
||||
/* Non HT mask */
|
||||
#define P4_ESCR_MASK \
|
||||
(P4_ESCR_EVENT_MASK | \
|
||||
P4_ESCR_EVENTMASK_MASK | \
|
||||
P4_ESCR_TAG_MASK | \
|
||||
P4_ESCR_TAG_ENABLE | \
|
||||
P4_ESCR_T0_OS | \
|
||||
P4_ESCR_T0_USR)
|
||||
|
||||
/* HT mask */
|
||||
#define P4_ESCR_MASK_HT \
|
||||
(P4_ESCR_MASK | P4_ESCR_T1_OS | P4_ESCR_T1_USR)
|
||||
|
||||
#define P4_CCCR_OVF 0x80000000U
|
||||
#define P4_CCCR_CASCADE 0x40000000U
|
||||
#define P4_CCCR_OVF_PMI_T0 0x04000000U
|
||||
#define P4_CCCR_OVF_PMI_T1 0x08000000U
|
||||
#define P4_CCCR_FORCE_OVF 0x02000000U
|
||||
#define P4_CCCR_EDGE 0x01000000U
|
||||
#define P4_CCCR_THRESHOLD_MASK 0x00f00000U
|
||||
#define P4_CCCR_THRESHOLD_SHIFT 20
|
||||
#define P4_CCCR_COMPLEMENT 0x00080000U
|
||||
#define P4_CCCR_COMPARE 0x00040000U
|
||||
#define P4_CCCR_ESCR_SELECT_MASK 0x0000e000U
|
||||
#define P4_CCCR_ESCR_SELECT_SHIFT 13
|
||||
#define P4_CCCR_ENABLE 0x00001000U
|
||||
#define P4_CCCR_THREAD_SINGLE 0x00010000U
|
||||
#define P4_CCCR_THREAD_BOTH 0x00020000U
|
||||
#define P4_CCCR_THREAD_ANY 0x00030000U
|
||||
#define P4_CCCR_RESERVED 0x00000fffU
|
||||
|
||||
#define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT)
|
||||
#define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT)
|
||||
|
||||
/* Custom bits in reerved CCCR area */
|
||||
#define P4_CCCR_CACHE_OPS_MASK 0x0000003fU
|
||||
|
||||
|
||||
/* Non HT mask */
|
||||
#define P4_CCCR_MASK \
|
||||
(P4_CCCR_OVF | \
|
||||
P4_CCCR_CASCADE | \
|
||||
P4_CCCR_OVF_PMI_T0 | \
|
||||
P4_CCCR_FORCE_OVF | \
|
||||
P4_CCCR_EDGE | \
|
||||
P4_CCCR_THRESHOLD_MASK | \
|
||||
P4_CCCR_COMPLEMENT | \
|
||||
P4_CCCR_COMPARE | \
|
||||
P4_CCCR_ESCR_SELECT_MASK | \
|
||||
P4_CCCR_ENABLE)
|
||||
|
||||
/* HT mask */
|
||||
#define P4_CCCR_MASK_HT (P4_CCCR_MASK | P4_CCCR_THREAD_ANY)
|
||||
|
||||
#define P4_GEN_ESCR_EMASK(class, name, bit) \
|
||||
class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT)
|
||||
#define P4_ESCR_EMASK_BIT(class, name) class##__##name
|
||||
|
||||
/*
|
||||
* config field is 64bit width and consists of
|
||||
* HT << 63 | ESCR << 32 | CCCR
|
||||
* where HT is HyperThreading bit (since ESCR
|
||||
* has it reserved we may use it for own purpose)
|
||||
*
|
||||
* note that this is NOT the addresses of respective
|
||||
* ESCR and CCCR but rather an only packed value should
|
||||
* be unpacked and written to a proper addresses
|
||||
*
|
||||
* the base idea is to pack as much info as
|
||||
* possible
|
||||
*/
|
||||
#define p4_config_pack_escr(v) (((u64)(v)) << 32)
|
||||
#define p4_config_pack_cccr(v) (((u64)(v)) & 0xffffffffULL)
|
||||
#define p4_config_unpack_escr(v) (((u64)(v)) >> 32)
|
||||
#define p4_config_unpack_cccr(v) (((u64)(v)) & 0xffffffffULL)
|
||||
|
||||
#define p4_config_unpack_emask(v) \
|
||||
({ \
|
||||
u32 t = p4_config_unpack_escr((v)); \
|
||||
t = t & P4_ESCR_EVENTMASK_MASK; \
|
||||
t = t >> P4_ESCR_EVENTMASK_SHIFT; \
|
||||
t; \
|
||||
})
|
||||
|
||||
#define p4_config_unpack_event(v) \
|
||||
({ \
|
||||
u32 t = p4_config_unpack_escr((v)); \
|
||||
t = t & P4_ESCR_EVENT_MASK; \
|
||||
t = t >> P4_ESCR_EVENT_SHIFT; \
|
||||
t; \
|
||||
})
|
||||
|
||||
#define p4_config_unpack_cache_event(v) (((u64)(v)) & P4_CCCR_CACHE_OPS_MASK)
|
||||
|
||||
#define P4_CONFIG_HT_SHIFT 63
|
||||
#define P4_CONFIG_HT (1ULL << P4_CONFIG_HT_SHIFT)
|
||||
|
||||
static inline bool p4_is_event_cascaded(u64 config)
|
||||
{
|
||||
u32 cccr = p4_config_unpack_cccr(config);
|
||||
return !!(cccr & P4_CCCR_CASCADE);
|
||||
}
|
||||
|
||||
static inline int p4_ht_config_thread(u64 config)
|
||||
{
|
||||
return !!(config & P4_CONFIG_HT);
|
||||
}
|
||||
|
||||
static inline u64 p4_set_ht_bit(u64 config)
|
||||
{
|
||||
return config | P4_CONFIG_HT;
|
||||
}
|
||||
|
||||
static inline u64 p4_clear_ht_bit(u64 config)
|
||||
{
|
||||
return config & ~P4_CONFIG_HT;
|
||||
}
|
||||
|
||||
static inline int p4_ht_active(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return smp_num_siblings > 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int p4_ht_thread(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (smp_num_siblings == 2)
|
||||
return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int p4_should_swap_ts(u64 config, int cpu)
|
||||
{
|
||||
return p4_ht_config_thread(config) ^ p4_ht_thread(cpu);
|
||||
}
|
||||
|
||||
static inline u32 p4_default_cccr_conf(int cpu)
|
||||
{
|
||||
/*
|
||||
* Note that P4_CCCR_THREAD_ANY is "required" on
|
||||
* non-HT machines (on HT machines we count TS events
|
||||
* regardless the state of second logical processor
|
||||
*/
|
||||
u32 cccr = P4_CCCR_THREAD_ANY;
|
||||
|
||||
if (!p4_ht_thread(cpu))
|
||||
cccr |= P4_CCCR_OVF_PMI_T0;
|
||||
else
|
||||
cccr |= P4_CCCR_OVF_PMI_T1;
|
||||
|
||||
return cccr;
|
||||
}
|
||||
|
||||
static inline u32 p4_default_escr_conf(int cpu, int exclude_os, int exclude_usr)
|
||||
{
|
||||
u32 escr = 0;
|
||||
|
||||
if (!p4_ht_thread(cpu)) {
|
||||
if (!exclude_os)
|
||||
escr |= P4_ESCR_T0_OS;
|
||||
if (!exclude_usr)
|
||||
escr |= P4_ESCR_T0_USR;
|
||||
} else {
|
||||
if (!exclude_os)
|
||||
escr |= P4_ESCR_T1_OS;
|
||||
if (!exclude_usr)
|
||||
escr |= P4_ESCR_T1_USR;
|
||||
}
|
||||
|
||||
return escr;
|
||||
}
|
||||
|
||||
enum P4_EVENTS {
|
||||
P4_EVENT_TC_DELIVER_MODE,
|
||||
P4_EVENT_BPU_FETCH_REQUEST,
|
||||
P4_EVENT_ITLB_REFERENCE,
|
||||
P4_EVENT_MEMORY_CANCEL,
|
||||
P4_EVENT_MEMORY_COMPLETE,
|
||||
P4_EVENT_LOAD_PORT_REPLAY,
|
||||
P4_EVENT_STORE_PORT_REPLAY,
|
||||
P4_EVENT_MOB_LOAD_REPLAY,
|
||||
P4_EVENT_PAGE_WALK_TYPE,
|
||||
P4_EVENT_BSQ_CACHE_REFERENCE,
|
||||
P4_EVENT_IOQ_ALLOCATION,
|
||||
P4_EVENT_IOQ_ACTIVE_ENTRIES,
|
||||
P4_EVENT_FSB_DATA_ACTIVITY,
|
||||
P4_EVENT_BSQ_ALLOCATION,
|
||||
P4_EVENT_BSQ_ACTIVE_ENTRIES,
|
||||
P4_EVENT_SSE_INPUT_ASSIST,
|
||||
P4_EVENT_PACKED_SP_UOP,
|
||||
P4_EVENT_PACKED_DP_UOP,
|
||||
P4_EVENT_SCALAR_SP_UOP,
|
||||
P4_EVENT_SCALAR_DP_UOP,
|
||||
P4_EVENT_64BIT_MMX_UOP,
|
||||
P4_EVENT_128BIT_MMX_UOP,
|
||||
P4_EVENT_X87_FP_UOP,
|
||||
P4_EVENT_TC_MISC,
|
||||
P4_EVENT_GLOBAL_POWER_EVENTS,
|
||||
P4_EVENT_TC_MS_XFER,
|
||||
P4_EVENT_UOP_QUEUE_WRITES,
|
||||
P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE,
|
||||
P4_EVENT_RETIRED_BRANCH_TYPE,
|
||||
P4_EVENT_RESOURCE_STALL,
|
||||
P4_EVENT_WC_BUFFER,
|
||||
P4_EVENT_B2B_CYCLES,
|
||||
P4_EVENT_BNR,
|
||||
P4_EVENT_SNOOP,
|
||||
P4_EVENT_RESPONSE,
|
||||
P4_EVENT_FRONT_END_EVENT,
|
||||
P4_EVENT_EXECUTION_EVENT,
|
||||
P4_EVENT_REPLAY_EVENT,
|
||||
P4_EVENT_INSTR_RETIRED,
|
||||
P4_EVENT_UOPS_RETIRED,
|
||||
P4_EVENT_UOP_TYPE,
|
||||
P4_EVENT_BRANCH_RETIRED,
|
||||
P4_EVENT_MISPRED_BRANCH_RETIRED,
|
||||
P4_EVENT_X87_ASSIST,
|
||||
P4_EVENT_MACHINE_CLEAR,
|
||||
P4_EVENT_INSTR_COMPLETED,
|
||||
};
|
||||
|
||||
#define P4_OPCODE(event) event##_OPCODE
|
||||
#define P4_OPCODE_ESEL(opcode) ((opcode & 0x00ff) >> 0)
|
||||
#define P4_OPCODE_EVNT(opcode) ((opcode & 0xff00) >> 8)
|
||||
#define P4_OPCODE_PACK(event, sel) (((event) << 8) | sel)
|
||||
|
||||
/*
|
||||
* Comments below the event represent ESCR restriction
|
||||
* for this event and counter index per ESCR
|
||||
*
|
||||
* MSR_P4_IQ_ESCR0 and MSR_P4_IQ_ESCR1 are available only on early
|
||||
* processor builds (family 0FH, models 01H-02H). These MSRs
|
||||
* are not available on later versions, so that we don't use
|
||||
* them completely
|
||||
*
|
||||
* Also note that CCCR1 do not have P4_CCCR_ENABLE bit properly
|
||||
* working so that we should not use this CCCR and respective
|
||||
* counter as result
|
||||
*/
|
||||
enum P4_EVENT_OPCODES {
|
||||
P4_OPCODE(P4_EVENT_TC_DELIVER_MODE) = P4_OPCODE_PACK(0x01, 0x01),
|
||||
/*
|
||||
* MSR_P4_TC_ESCR0: 4, 5
|
||||
* MSR_P4_TC_ESCR1: 6, 7
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST) = P4_OPCODE_PACK(0x03, 0x00),
|
||||
/*
|
||||
* MSR_P4_BPU_ESCR0: 0, 1
|
||||
* MSR_P4_BPU_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_ITLB_REFERENCE) = P4_OPCODE_PACK(0x18, 0x03),
|
||||
/*
|
||||
* MSR_P4_ITLB_ESCR0: 0, 1
|
||||
* MSR_P4_ITLB_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_MEMORY_CANCEL) = P4_OPCODE_PACK(0x02, 0x05),
|
||||
/*
|
||||
* MSR_P4_DAC_ESCR0: 8, 9
|
||||
* MSR_P4_DAC_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_MEMORY_COMPLETE) = P4_OPCODE_PACK(0x08, 0x02),
|
||||
/*
|
||||
* MSR_P4_SAAT_ESCR0: 8, 9
|
||||
* MSR_P4_SAAT_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY) = P4_OPCODE_PACK(0x04, 0x02),
|
||||
/*
|
||||
* MSR_P4_SAAT_ESCR0: 8, 9
|
||||
* MSR_P4_SAAT_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY) = P4_OPCODE_PACK(0x05, 0x02),
|
||||
/*
|
||||
* MSR_P4_SAAT_ESCR0: 8, 9
|
||||
* MSR_P4_SAAT_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY) = P4_OPCODE_PACK(0x03, 0x02),
|
||||
/*
|
||||
* MSR_P4_MOB_ESCR0: 0, 1
|
||||
* MSR_P4_MOB_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE) = P4_OPCODE_PACK(0x01, 0x04),
|
||||
/*
|
||||
* MSR_P4_PMH_ESCR0: 0, 1
|
||||
* MSR_P4_PMH_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE) = P4_OPCODE_PACK(0x0c, 0x07),
|
||||
/*
|
||||
* MSR_P4_BSU_ESCR0: 0, 1
|
||||
* MSR_P4_BSU_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_IOQ_ALLOCATION) = P4_OPCODE_PACK(0x03, 0x06),
|
||||
/*
|
||||
* MSR_P4_FSB_ESCR0: 0, 1
|
||||
* MSR_P4_FSB_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES) = P4_OPCODE_PACK(0x1a, 0x06),
|
||||
/*
|
||||
* MSR_P4_FSB_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY) = P4_OPCODE_PACK(0x17, 0x06),
|
||||
/*
|
||||
* MSR_P4_FSB_ESCR0: 0, 1
|
||||
* MSR_P4_FSB_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_BSQ_ALLOCATION) = P4_OPCODE_PACK(0x05, 0x07),
|
||||
/*
|
||||
* MSR_P4_BSU_ESCR0: 0, 1
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES) = P4_OPCODE_PACK(0x06, 0x07),
|
||||
/*
|
||||
* NOTE: no ESCR name in docs, it's guessed
|
||||
* MSR_P4_BSU_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST) = P4_OPCODE_PACK(0x34, 0x01),
|
||||
/*
|
||||
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_PACKED_SP_UOP) = P4_OPCODE_PACK(0x08, 0x01),
|
||||
/*
|
||||
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_PACKED_DP_UOP) = P4_OPCODE_PACK(0x0c, 0x01),
|
||||
/*
|
||||
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_SCALAR_SP_UOP) = P4_OPCODE_PACK(0x0a, 0x01),
|
||||
/*
|
||||
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_SCALAR_DP_UOP) = P4_OPCODE_PACK(0x0e, 0x01),
|
||||
/*
|
||||
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_64BIT_MMX_UOP) = P4_OPCODE_PACK(0x02, 0x01),
|
||||
/*
|
||||
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_128BIT_MMX_UOP) = P4_OPCODE_PACK(0x1a, 0x01),
|
||||
/*
|
||||
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_X87_FP_UOP) = P4_OPCODE_PACK(0x04, 0x01),
|
||||
/*
|
||||
* MSR_P4_FIRM_ESCR0: 8, 9
|
||||
* MSR_P4_FIRM_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_TC_MISC) = P4_OPCODE_PACK(0x06, 0x01),
|
||||
/*
|
||||
* MSR_P4_TC_ESCR0: 4, 5
|
||||
* MSR_P4_TC_ESCR1: 6, 7
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS) = P4_OPCODE_PACK(0x13, 0x06),
|
||||
/*
|
||||
* MSR_P4_FSB_ESCR0: 0, 1
|
||||
* MSR_P4_FSB_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_TC_MS_XFER) = P4_OPCODE_PACK(0x05, 0x00),
|
||||
/*
|
||||
* MSR_P4_MS_ESCR0: 4, 5
|
||||
* MSR_P4_MS_ESCR1: 6, 7
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES) = P4_OPCODE_PACK(0x09, 0x00),
|
||||
/*
|
||||
* MSR_P4_MS_ESCR0: 4, 5
|
||||
* MSR_P4_MS_ESCR1: 6, 7
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE) = P4_OPCODE_PACK(0x05, 0x02),
|
||||
/*
|
||||
* MSR_P4_TBPU_ESCR0: 4, 5
|
||||
* MSR_P4_TBPU_ESCR1: 6, 7
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE) = P4_OPCODE_PACK(0x04, 0x02),
|
||||
/*
|
||||
* MSR_P4_TBPU_ESCR0: 4, 5
|
||||
* MSR_P4_TBPU_ESCR1: 6, 7
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_RESOURCE_STALL) = P4_OPCODE_PACK(0x01, 0x01),
|
||||
/*
|
||||
* MSR_P4_ALF_ESCR0: 12, 13, 16
|
||||
* MSR_P4_ALF_ESCR1: 14, 15, 17
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_WC_BUFFER) = P4_OPCODE_PACK(0x05, 0x05),
|
||||
/*
|
||||
* MSR_P4_DAC_ESCR0: 8, 9
|
||||
* MSR_P4_DAC_ESCR1: 10, 11
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_B2B_CYCLES) = P4_OPCODE_PACK(0x16, 0x03),
|
||||
/*
|
||||
* MSR_P4_FSB_ESCR0: 0, 1
|
||||
* MSR_P4_FSB_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_BNR) = P4_OPCODE_PACK(0x08, 0x03),
|
||||
/*
|
||||
* MSR_P4_FSB_ESCR0: 0, 1
|
||||
* MSR_P4_FSB_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_SNOOP) = P4_OPCODE_PACK(0x06, 0x03),
|
||||
/*
|
||||
* MSR_P4_FSB_ESCR0: 0, 1
|
||||
* MSR_P4_FSB_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_RESPONSE) = P4_OPCODE_PACK(0x04, 0x03),
|
||||
/*
|
||||
* MSR_P4_FSB_ESCR0: 0, 1
|
||||
* MSR_P4_FSB_ESCR1: 2, 3
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_FRONT_END_EVENT) = P4_OPCODE_PACK(0x08, 0x05),
|
||||
/*
|
||||
* MSR_P4_CRU_ESCR2: 12, 13, 16
|
||||
* MSR_P4_CRU_ESCR3: 14, 15, 17
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_EXECUTION_EVENT) = P4_OPCODE_PACK(0x0c, 0x05),
|
||||
/*
|
||||
* MSR_P4_CRU_ESCR2: 12, 13, 16
|
||||
* MSR_P4_CRU_ESCR3: 14, 15, 17
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_REPLAY_EVENT) = P4_OPCODE_PACK(0x09, 0x05),
|
||||
/*
|
||||
* MSR_P4_CRU_ESCR2: 12, 13, 16
|
||||
* MSR_P4_CRU_ESCR3: 14, 15, 17
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_INSTR_RETIRED) = P4_OPCODE_PACK(0x02, 0x04),
|
||||
/*
|
||||
* MSR_P4_CRU_ESCR0: 12, 13, 16
|
||||
* MSR_P4_CRU_ESCR1: 14, 15, 17
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_UOPS_RETIRED) = P4_OPCODE_PACK(0x01, 0x04),
|
||||
/*
|
||||
* MSR_P4_CRU_ESCR0: 12, 13, 16
|
||||
* MSR_P4_CRU_ESCR1: 14, 15, 17
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_UOP_TYPE) = P4_OPCODE_PACK(0x02, 0x02),
|
||||
/*
|
||||
* MSR_P4_RAT_ESCR0: 12, 13, 16
|
||||
* MSR_P4_RAT_ESCR1: 14, 15, 17
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_BRANCH_RETIRED) = P4_OPCODE_PACK(0x06, 0x05),
|
||||
/*
|
||||
* MSR_P4_CRU_ESCR2: 12, 13, 16
|
||||
* MSR_P4_CRU_ESCR3: 14, 15, 17
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED) = P4_OPCODE_PACK(0x03, 0x04),
|
||||
/*
|
||||
* MSR_P4_CRU_ESCR0: 12, 13, 16
|
||||
* MSR_P4_CRU_ESCR1: 14, 15, 17
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_X87_ASSIST) = P4_OPCODE_PACK(0x03, 0x05),
|
||||
/*
|
||||
* MSR_P4_CRU_ESCR2: 12, 13, 16
|
||||
* MSR_P4_CRU_ESCR3: 14, 15, 17
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_MACHINE_CLEAR) = P4_OPCODE_PACK(0x02, 0x05),
|
||||
/*
|
||||
* MSR_P4_CRU_ESCR2: 12, 13, 16
|
||||
* MSR_P4_CRU_ESCR3: 14, 15, 17
|
||||
*/
|
||||
|
||||
P4_OPCODE(P4_EVENT_INSTR_COMPLETED) = P4_OPCODE_PACK(0x07, 0x04),
|
||||
/*
|
||||
* MSR_P4_CRU_ESCR0: 12, 13, 16
|
||||
* MSR_P4_CRU_ESCR1: 14, 15, 17
|
||||
*/
|
||||
};
|
||||
|
||||
/*
|
||||
* a caller should use P4_ESCR_EMASK_NAME helper to
|
||||
* pick the EventMask needed, for example
|
||||
*
|
||||
* P4_ESCR_EMASK_NAME(P4_EVENT_TC_DELIVER_MODE, DD)
|
||||
*/
|
||||
enum P4_ESCR_EMASKS {
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DD, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DB, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, DI, 2),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BD, 3),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BB, 4),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, BI, 5),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_TC_DELIVER_MODE, ID, 6),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BPU_FETCH_REQUEST, TCMISS, 0),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, HIT, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, MISS, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_ITLB_REFERENCE, HIT_UK, 2),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_CANCEL, ST_RB_FULL, 2),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_CANCEL, 64K_CONF, 3),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_COMPLETE, LSC, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_MEMORY_COMPLETE, SSC, 1),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_LOAD_PORT_REPLAY, SPLIT_LD, 1),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_STORE_PORT_REPLAY, SPLIT_ST, 1),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, NO_STA, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, NO_STD, 3),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, PARTIAL_DATA, 4),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_MOB_LOAD_REPLAY, UNALGN_ADDR, 5),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_PAGE_WALK_TYPE, DTMISS, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_PAGE_WALK_TYPE, ITMISS, 1),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM, 2),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS, 3),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE, 4),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM, 5),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS, 8),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS, 9),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS, 10),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, DEFAULT, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, ALL_READ, 5),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, ALL_WRITE, 6),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_UC, 7),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WC, 8),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WT, 9),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WP, 10),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, MEM_WB, 11),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, OWN, 13),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, OTHER, 14),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ALLOCATION, PREFETCH, 15),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, DEFAULT, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_READ, 5),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, ALL_WRITE, 6),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_UC, 7),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WC, 8),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WT, 9),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WP, 10),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, MEM_WB, 11),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, OWN, 13),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, OTHER, 14),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_IOQ_ACTIVE_ENTRIES, PREFETCH, 15),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OTHER, 2),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_DRV, 3),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OWN, 4),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_FSB_DATA_ACTIVITY, DBSY_OTHER, 5),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE0, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_TYPE1, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LEN0, 2),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LEN1, 3),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_IO_TYPE, 5),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_LOCK_TYPE, 6),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_CACHE_TYPE, 7),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_SPLIT_TYPE, 8),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_DEM_TYPE, 9),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, REQ_ORD_TYPE, 10),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE0, 11),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE1, 12),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ALLOCATION, MEM_TYPE2, 13),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE0, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_TYPE1, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN0, 2),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LEN1, 3),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_IO_TYPE, 5),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_LOCK_TYPE, 6),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_CACHE_TYPE, 7),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_SPLIT_TYPE, 8),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_DEM_TYPE, 9),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, REQ_ORD_TYPE, 10),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE0, 11),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE1, 12),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BSQ_ACTIVE_ENTRIES, MEM_TYPE2, 13),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_SSE_INPUT_ASSIST, ALL, 15),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_PACKED_SP_UOP, ALL, 15),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_PACKED_DP_UOP, ALL, 15),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_SCALAR_SP_UOP, ALL, 15),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_SCALAR_DP_UOP, ALL, 15),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_64BIT_MMX_UOP, ALL, 15),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_128BIT_MMX_UOP, ALL, 15),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_X87_FP_UOP, ALL, 15),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_TC_MISC, FLUSH, 4),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING, 0),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_TC_MS_XFER, CISC, 0),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_BUILD, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_TC_DELIVER, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_UOP_QUEUE_WRITES, FROM_ROM, 2),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CONDITIONAL, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, CALL, 2),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, RETURN, 3),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE, INDIRECT, 4),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, CALL, 2),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN, 3),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT, 4),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_RESOURCE_STALL, SBFULL, 5),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_WC_BUFFER, WCB_EVICTS, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_WC_BUFFER, WCB_FULL_EVICTS, 1),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_FRONT_END_EVENT, NBOGUS, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_FRONT_END_EVENT, BOGUS, 1),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS0, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS1, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS2, 2),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, NBOGUS3, 3),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS0, 4),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS1, 5),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS2, 6),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_EXECUTION_EVENT, BOGUS3, 7),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_REPLAY_EVENT, NBOGUS, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_REPLAY_EVENT, BOGUS, 1),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, NBOGUSTAG, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, BOGUSNTAG, 2),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_RETIRED, BOGUSTAG, 3),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_UOPS_RETIRED, NBOGUS, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_UOPS_RETIRED, BOGUS, 1),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_UOP_TYPE, TAGLOADS, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_UOP_TYPE, TAGSTORES, 2),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMNP, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMNM, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMTP, 2),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_BRANCH_RETIRED, MMTM, 3),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS, 0),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, FPSU, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, FPSO, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, POAO, 2),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, POAU, 3),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_X87_ASSIST, PREA, 4),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, CLEAR, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, MOCLEAR, 1),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_MACHINE_CLEAR, SMCLEAR, 2),
|
||||
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, NBOGUS, 0),
|
||||
P4_GEN_ESCR_EMASK(P4_EVENT_INSTR_COMPLETED, BOGUS, 1),
|
||||
};
|
||||
|
||||
/* P4 PEBS: stale for a while */
|
||||
#define P4_PEBS_METRIC_MASK 0x00001fffU
|
||||
#define P4_PEBS_UOB_TAG 0x01000000U
|
||||
#define P4_PEBS_ENABLE 0x02000000U
|
||||
|
||||
/* Replay metrics for MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT */
|
||||
#define P4_PEBS__1stl_cache_load_miss_retired 0x3000001
|
||||
#define P4_PEBS__2ndl_cache_load_miss_retired 0x3000002
|
||||
#define P4_PEBS__dtlb_load_miss_retired 0x3000004
|
||||
#define P4_PEBS__dtlb_store_miss_retired 0x3000004
|
||||
#define P4_PEBS__dtlb_all_miss_retired 0x3000004
|
||||
#define P4_PEBS__tagged_mispred_branch 0x3018000
|
||||
#define P4_PEBS__mob_load_replay_retired 0x3000200
|
||||
#define P4_PEBS__split_load_retired 0x3000400
|
||||
#define P4_PEBS__split_store_retired 0x3000400
|
||||
|
||||
#define P4_VERT__1stl_cache_load_miss_retired 0x0000001
|
||||
#define P4_VERT__2ndl_cache_load_miss_retired 0x0000001
|
||||
#define P4_VERT__dtlb_load_miss_retired 0x0000001
|
||||
#define P4_VERT__dtlb_store_miss_retired 0x0000002
|
||||
#define P4_VERT__dtlb_all_miss_retired 0x0000003
|
||||
#define P4_VERT__tagged_mispred_branch 0x0000010
|
||||
#define P4_VERT__mob_load_replay_retired 0x0000001
|
||||
#define P4_VERT__split_load_retired 0x0000001
|
||||
#define P4_VERT__split_store_retired 0x0000002
|
||||
|
||||
enum P4_CACHE_EVENTS {
|
||||
P4_CACHE__NONE,
|
||||
|
||||
P4_CACHE__1stl_cache_load_miss_retired,
|
||||
P4_CACHE__2ndl_cache_load_miss_retired,
|
||||
P4_CACHE__dtlb_load_miss_retired,
|
||||
P4_CACHE__dtlb_store_miss_retired,
|
||||
P4_CACHE__itlb_reference_hit,
|
||||
P4_CACHE__itlb_reference_miss,
|
||||
|
||||
P4_CACHE__MAX
|
||||
};
|
||||
|
||||
#endif /* PERF_EVENT_P4_H */
|
|
@ -21,7 +21,6 @@ struct mm_struct;
|
|||
#include <asm/msr.h>
|
||||
#include <asm/desc_defs.h>
|
||||
#include <asm/nops.h>
|
||||
#include <asm/ds.h>
|
||||
|
||||
#include <linux/personality.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
@ -29,6 +28,7 @@ struct mm_struct;
|
|||
#include <linux/threads.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#define HBP_NUM 4
|
||||
/*
|
||||
|
@ -473,10 +473,6 @@ struct thread_struct {
|
|||
unsigned long iopl;
|
||||
/* Max allowed port in the bitmap, in bytes: */
|
||||
unsigned io_bitmap_max;
|
||||
/* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
|
||||
unsigned long debugctlmsr;
|
||||
/* Debug Store context; see asm/ds.h */
|
||||
struct ds_context *ds_ctx;
|
||||
};
|
||||
|
||||
static inline unsigned long native_get_debugreg(int regno)
|
||||
|
@ -803,7 +799,7 @@ extern void cpu_init(void);
|
|||
|
||||
static inline unsigned long get_debugctlmsr(void)
|
||||
{
|
||||
unsigned long debugctlmsr = 0;
|
||||
unsigned long debugctlmsr = 0;
|
||||
|
||||
#ifndef CONFIG_X86_DEBUGCTLMSR
|
||||
if (boot_cpu_data.x86 < 6)
|
||||
|
@ -811,21 +807,6 @@ static inline unsigned long get_debugctlmsr(void)
|
|||
#endif
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
||||
|
||||
return debugctlmsr;
|
||||
}
|
||||
|
||||
static inline unsigned long get_debugctlmsr_on_cpu(int cpu)
|
||||
{
|
||||
u64 debugctlmsr = 0;
|
||||
u32 val1, val2;
|
||||
|
||||
#ifndef CONFIG_X86_DEBUGCTLMSR
|
||||
if (boot_cpu_data.x86 < 6)
|
||||
return 0;
|
||||
#endif
|
||||
rdmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR, &val1, &val2);
|
||||
debugctlmsr = val1 | ((u64)val2 << 32);
|
||||
|
||||
return debugctlmsr;
|
||||
}
|
||||
|
||||
|
@ -838,18 +819,6 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr)
|
|||
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
|
||||
}
|
||||
|
||||
static inline void update_debugctlmsr_on_cpu(int cpu,
|
||||
unsigned long debugctlmsr)
|
||||
{
|
||||
#ifndef CONFIG_X86_DEBUGCTLMSR
|
||||
if (boot_cpu_data.x86 < 6)
|
||||
return;
|
||||
#endif
|
||||
wrmsr_on_cpu(cpu, MSR_IA32_DEBUGCTLMSR,
|
||||
(u32)((u64)debugctlmsr),
|
||||
(u32)((u64)debugctlmsr >> 32));
|
||||
}
|
||||
|
||||
/*
|
||||
* from system description table in BIOS. Mostly for MCA use, but
|
||||
* others may find it useful:
|
||||
|
|
|
@ -82,61 +82,6 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/types.h>
|
||||
|
||||
/* configuration/status structure used in PTRACE_BTS_CONFIG and
|
||||
PTRACE_BTS_STATUS commands.
|
||||
*/
|
||||
struct ptrace_bts_config {
|
||||
/* requested or actual size of BTS buffer in bytes */
|
||||
__u32 size;
|
||||
/* bitmask of below flags */
|
||||
__u32 flags;
|
||||
/* buffer overflow signal */
|
||||
__u32 signal;
|
||||
/* actual size of bts_struct in bytes */
|
||||
__u32 bts_size;
|
||||
};
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define PTRACE_BTS_O_TRACE 0x1 /* branch trace */
|
||||
#define PTRACE_BTS_O_SCHED 0x2 /* scheduling events w/ jiffies */
|
||||
#define PTRACE_BTS_O_SIGNAL 0x4 /* send SIG<signal> on buffer overflow
|
||||
instead of wrapping around */
|
||||
#define PTRACE_BTS_O_ALLOC 0x8 /* (re)allocate buffer */
|
||||
|
||||
#define PTRACE_BTS_CONFIG 40
|
||||
/* Configure branch trace recording.
|
||||
ADDR points to a struct ptrace_bts_config.
|
||||
DATA gives the size of that buffer.
|
||||
A new buffer is allocated, if requested in the flags.
|
||||
An overflow signal may only be requested for new buffers.
|
||||
Returns the number of bytes read.
|
||||
*/
|
||||
#define PTRACE_BTS_STATUS 41
|
||||
/* Return the current configuration in a struct ptrace_bts_config
|
||||
pointed to by ADDR; DATA gives the size of that buffer.
|
||||
Returns the number of bytes written.
|
||||
*/
|
||||
#define PTRACE_BTS_SIZE 42
|
||||
/* Return the number of available BTS records for draining.
|
||||
DATA and ADDR are ignored.
|
||||
*/
|
||||
#define PTRACE_BTS_GET 43
|
||||
/* Get a single BTS record.
|
||||
DATA defines the index into the BTS array, where 0 is the newest
|
||||
entry, and higher indices refer to older entries.
|
||||
ADDR is pointing to struct bts_struct (see asm/ds.h).
|
||||
*/
|
||||
#define PTRACE_BTS_CLEAR 44
|
||||
/* Clear the BTS buffer.
|
||||
DATA and ADDR are ignored.
|
||||
*/
|
||||
#define PTRACE_BTS_DRAIN 45
|
||||
/* Read all available BTS records and clear the buffer.
|
||||
ADDR points to an array of struct bts_struct.
|
||||
DATA gives the size of that buffer.
|
||||
BTS records are read from oldest to newest.
|
||||
Returns number of BTS records drained.
|
||||
*/
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_PTRACE_ABI_H */
|
||||
|
|
|
@ -289,12 +289,6 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
|
|||
extern int do_set_thread_area(struct task_struct *p, int idx,
|
||||
struct user_desc __user *info, int can_allocate);
|
||||
|
||||
#ifdef CONFIG_X86_PTRACE_BTS
|
||||
extern void ptrace_bts_untrace(struct task_struct *tsk);
|
||||
|
||||
#define arch_ptrace_untrace(tsk) ptrace_bts_untrace(tsk)
|
||||
#endif /* CONFIG_X86_PTRACE_BTS */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
|
|
@ -92,8 +92,7 @@ struct thread_info {
|
|||
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
|
||||
#define TIF_FREEZE 23 /* is freezing for suspend */
|
||||
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
|
||||
#define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */
|
||||
#define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
|
||||
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
|
||||
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
|
||||
#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
|
||||
|
||||
|
@ -115,8 +114,7 @@ struct thread_info {
|
|||
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
|
||||
#define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
|
||||
#define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
|
||||
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
||||
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
|
||||
|
@ -147,7 +145,7 @@ struct thread_info {
|
|||
|
||||
/* flags to check in __switch_to() */
|
||||
#define _TIF_WORK_CTXSW \
|
||||
(_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC)
|
||||
(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
|
||||
|
||||
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
|
||||
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
|
||||
|
|
|
@ -47,8 +47,6 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
|
|||
obj-y += process.o
|
||||
obj-y += i387.o xsave.o
|
||||
obj-y += ptrace.o
|
||||
obj-$(CONFIG_X86_DS) += ds.o
|
||||
obj-$(CONFIG_X86_DS_SELFTEST) += ds_selftest.o
|
||||
obj-$(CONFIG_X86_32) += tls.o
|
||||
obj-$(CONFIG_IA32_EMULATION) += tls.o
|
||||
obj-y += step.o
|
||||
|
|
|
@ -2545,6 +2545,9 @@ void irq_force_complete_move(int irq)
|
|||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_cfg *cfg = desc->chip_data;
|
||||
|
||||
if (!cfg)
|
||||
return;
|
||||
|
||||
__irq_complete_move(&desc, cfg->vector);
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -929,7 +929,8 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
|
|||
powernow_table[i].index = index;
|
||||
|
||||
/* Frequency may be rounded for these */
|
||||
if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) {
|
||||
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
|
||||
|| boot_cpu_data.x86 == 0x11) {
|
||||
powernow_table[i].frequency =
|
||||
freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
|
||||
} else
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/ds.h>
|
||||
#include <asm/bugs.h>
|
||||
#include <asm/cpu.h>
|
||||
|
||||
|
@ -388,7 +387,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
|||
set_cpu_cap(c, X86_FEATURE_BTS);
|
||||
if (!(l1 & (1<<12)))
|
||||
set_cpu_cap(c, X86_FEATURE_PEBS);
|
||||
ds_init_intel(c);
|
||||
}
|
||||
|
||||
if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2,7 +2,7 @@
|
|||
|
||||
static DEFINE_RAW_SPINLOCK(amd_nb_lock);
|
||||
|
||||
static __initconst u64 amd_hw_cache_event_ids
|
||||
static __initconst const u64 amd_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
|
@ -111,22 +111,19 @@ static u64 amd_pmu_event_map(int hw_event)
|
|||
return amd_perfmon_event_map[hw_event];
|
||||
}
|
||||
|
||||
static u64 amd_pmu_raw_event(u64 hw_event)
|
||||
static int amd_pmu_hw_config(struct perf_event *event)
|
||||
{
|
||||
#define K7_EVNTSEL_EVENT_MASK 0xF000000FFULL
|
||||
#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
|
||||
#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
|
||||
#define K7_EVNTSEL_INV_MASK 0x000800000ULL
|
||||
#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
|
||||
int ret = x86_pmu_hw_config(event);
|
||||
|
||||
#define K7_EVNTSEL_MASK \
|
||||
(K7_EVNTSEL_EVENT_MASK | \
|
||||
K7_EVNTSEL_UNIT_MASK | \
|
||||
K7_EVNTSEL_EDGE_MASK | \
|
||||
K7_EVNTSEL_INV_MASK | \
|
||||
K7_EVNTSEL_REG_MASK)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return hw_event & K7_EVNTSEL_MASK;
|
||||
if (event->attr.type != PERF_TYPE_RAW)
|
||||
return 0;
|
||||
|
||||
event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -165,7 +162,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
|
|||
* be removed on one CPU at a time AND PMU is disabled
|
||||
* when we come here
|
||||
*/
|
||||
for (i = 0; i < x86_pmu.num_events; i++) {
|
||||
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||
if (nb->owners[i] == event) {
|
||||
cmpxchg(nb->owners+i, event, NULL);
|
||||
break;
|
||||
|
@ -215,7 +212,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
|||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct amd_nb *nb = cpuc->amd_nb;
|
||||
struct perf_event *old = NULL;
|
||||
int max = x86_pmu.num_events;
|
||||
int max = x86_pmu.num_counters;
|
||||
int i, j, k = -1;
|
||||
|
||||
/*
|
||||
|
@ -293,7 +290,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
|
|||
/*
|
||||
* initialize all possible NB constraints
|
||||
*/
|
||||
for (i = 0; i < x86_pmu.num_events; i++) {
|
||||
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||
__set_bit(i, nb->event_constraints[i].idxmsk);
|
||||
nb->event_constraints[i].weight = 1;
|
||||
}
|
||||
|
@ -371,21 +368,22 @@ static void amd_pmu_cpu_dead(int cpu)
|
|||
raw_spin_unlock(&amd_nb_lock);
|
||||
}
|
||||
|
||||
static __initconst struct x86_pmu amd_pmu = {
|
||||
static __initconst const struct x86_pmu amd_pmu = {
|
||||
.name = "AMD",
|
||||
.handle_irq = x86_pmu_handle_irq,
|
||||
.disable_all = x86_pmu_disable_all,
|
||||
.enable_all = x86_pmu_enable_all,
|
||||
.enable = x86_pmu_enable_event,
|
||||
.disable = x86_pmu_disable_event,
|
||||
.hw_config = amd_pmu_hw_config,
|
||||
.schedule_events = x86_schedule_events,
|
||||
.eventsel = MSR_K7_EVNTSEL0,
|
||||
.perfctr = MSR_K7_PERFCTR0,
|
||||
.event_map = amd_pmu_event_map,
|
||||
.raw_event = amd_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
|
||||
.num_events = 4,
|
||||
.event_bits = 48,
|
||||
.event_mask = (1ULL << 48) - 1,
|
||||
.num_counters = 4,
|
||||
.cntval_bits = 48,
|
||||
.cntval_mask = (1ULL << 48) - 1,
|
||||
.apic = 1,
|
||||
/* use highest bit to detect overflow */
|
||||
.max_period = (1ULL << 47) - 1,
|
||||
|
|
|
@ -88,7 +88,7 @@ static u64 intel_pmu_event_map(int hw_event)
|
|||
return intel_perfmon_event_map[hw_event];
|
||||
}
|
||||
|
||||
static __initconst u64 westmere_hw_cache_event_ids
|
||||
static __initconst const u64 westmere_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
|
@ -179,7 +179,7 @@ static __initconst u64 westmere_hw_cache_event_ids
|
|||
},
|
||||
};
|
||||
|
||||
static __initconst u64 nehalem_hw_cache_event_ids
|
||||
static __initconst const u64 nehalem_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
|
@ -270,7 +270,7 @@ static __initconst u64 nehalem_hw_cache_event_ids
|
|||
},
|
||||
};
|
||||
|
||||
static __initconst u64 core2_hw_cache_event_ids
|
||||
static __initconst const u64 core2_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
|
@ -361,7 +361,7 @@ static __initconst u64 core2_hw_cache_event_ids
|
|||
},
|
||||
};
|
||||
|
||||
static __initconst u64 atom_hw_cache_event_ids
|
||||
static __initconst const u64 atom_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
|
@ -452,60 +452,6 @@ static __initconst u64 atom_hw_cache_event_ids
|
|||
},
|
||||
};
|
||||
|
||||
static u64 intel_pmu_raw_event(u64 hw_event)
|
||||
{
|
||||
#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
|
||||
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
||||
#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
|
||||
#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
|
||||
#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
|
||||
|
||||
#define CORE_EVNTSEL_MASK \
|
||||
(INTEL_ARCH_EVTSEL_MASK | \
|
||||
INTEL_ARCH_UNIT_MASK | \
|
||||
INTEL_ARCH_EDGE_MASK | \
|
||||
INTEL_ARCH_INV_MASK | \
|
||||
INTEL_ARCH_CNT_MASK)
|
||||
|
||||
return hw_event & CORE_EVNTSEL_MASK;
|
||||
}
|
||||
|
||||
static void intel_pmu_enable_bts(u64 config)
|
||||
{
|
||||
unsigned long debugctlmsr;
|
||||
|
||||
debugctlmsr = get_debugctlmsr();
|
||||
|
||||
debugctlmsr |= X86_DEBUGCTL_TR;
|
||||
debugctlmsr |= X86_DEBUGCTL_BTS;
|
||||
debugctlmsr |= X86_DEBUGCTL_BTINT;
|
||||
|
||||
if (!(config & ARCH_PERFMON_EVENTSEL_OS))
|
||||
debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
|
||||
|
||||
if (!(config & ARCH_PERFMON_EVENTSEL_USR))
|
||||
debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
|
||||
|
||||
update_debugctlmsr(debugctlmsr);
|
||||
}
|
||||
|
||||
static void intel_pmu_disable_bts(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
unsigned long debugctlmsr;
|
||||
|
||||
if (!cpuc->ds)
|
||||
return;
|
||||
|
||||
debugctlmsr = get_debugctlmsr();
|
||||
|
||||
debugctlmsr &=
|
||||
~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
|
||||
X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
|
||||
|
||||
update_debugctlmsr(debugctlmsr);
|
||||
}
|
||||
|
||||
static void intel_pmu_disable_all(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
@ -514,12 +460,17 @@ static void intel_pmu_disable_all(void)
|
|||
|
||||
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
||||
intel_pmu_disable_bts();
|
||||
|
||||
intel_pmu_pebs_disable_all();
|
||||
intel_pmu_lbr_disable_all();
|
||||
}
|
||||
|
||||
static void intel_pmu_enable_all(void)
|
||||
static void intel_pmu_enable_all(int added)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
intel_pmu_pebs_enable_all();
|
||||
intel_pmu_lbr_enable_all();
|
||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
||||
|
||||
if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
|
||||
|
@ -533,6 +484,42 @@ static void intel_pmu_enable_all(void)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Workaround for:
|
||||
* Intel Errata AAK100 (model 26)
|
||||
* Intel Errata AAP53 (model 30)
|
||||
* Intel Errata BD53 (model 44)
|
||||
*
|
||||
* These chips need to be 'reset' when adding counters by programming
|
||||
* the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
|
||||
* either in sequence on the same PMC or on different PMCs.
|
||||
*/
|
||||
static void intel_pmu_nhm_enable_all(int added)
|
||||
{
|
||||
if (added) {
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
int i;
|
||||
|
||||
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
|
||||
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
|
||||
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
|
||||
|
||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
|
||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
struct perf_event *event = cpuc->events[i];
|
||||
|
||||
if (!event)
|
||||
continue;
|
||||
|
||||
__x86_pmu_enable_event(&event->hw,
|
||||
ARCH_PERFMON_EVENTSEL_ENABLE);
|
||||
}
|
||||
}
|
||||
intel_pmu_enable_all(added);
|
||||
}
|
||||
|
||||
static inline u64 intel_pmu_get_status(void)
|
||||
{
|
||||
u64 status;
|
||||
|
@ -547,8 +534,7 @@ static inline void intel_pmu_ack_status(u64 ack)
|
|||
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_pmu_disable_fixed(struct hw_perf_event *hwc)
|
||||
static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
|
||||
{
|
||||
int idx = hwc->idx - X86_PMC_IDX_FIXED;
|
||||
u64 ctrl_val, mask;
|
||||
|
@ -557,71 +543,10 @@ intel_pmu_disable_fixed(struct hw_perf_event *hwc)
|
|||
|
||||
rdmsrl(hwc->config_base, ctrl_val);
|
||||
ctrl_val &= ~mask;
|
||||
(void)checking_wrmsrl(hwc->config_base, ctrl_val);
|
||||
wrmsrl(hwc->config_base, ctrl_val);
|
||||
}
|
||||
|
||||
static void intel_pmu_drain_bts_buffer(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct debug_store *ds = cpuc->ds;
|
||||
struct bts_record {
|
||||
u64 from;
|
||||
u64 to;
|
||||
u64 flags;
|
||||
};
|
||||
struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
|
||||
struct bts_record *at, *top;
|
||||
struct perf_output_handle handle;
|
||||
struct perf_event_header header;
|
||||
struct perf_sample_data data;
|
||||
struct pt_regs regs;
|
||||
|
||||
if (!event)
|
||||
return;
|
||||
|
||||
if (!ds)
|
||||
return;
|
||||
|
||||
at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
|
||||
top = (struct bts_record *)(unsigned long)ds->bts_index;
|
||||
|
||||
if (top <= at)
|
||||
return;
|
||||
|
||||
ds->bts_index = ds->bts_buffer_base;
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
|
||||
data.period = event->hw.last_period;
|
||||
regs.ip = 0;
|
||||
|
||||
/*
|
||||
* Prepare a generic sample, i.e. fill in the invariant fields.
|
||||
* We will overwrite the from and to address before we output
|
||||
* the sample.
|
||||
*/
|
||||
perf_prepare_sample(&header, &data, event, ®s);
|
||||
|
||||
if (perf_output_begin(&handle, event,
|
||||
header.size * (top - at), 1, 1))
|
||||
return;
|
||||
|
||||
for (; at < top; at++) {
|
||||
data.ip = at->from;
|
||||
data.addr = at->to;
|
||||
|
||||
perf_output_sample(&handle, &header, &data, event);
|
||||
}
|
||||
|
||||
perf_output_end(&handle);
|
||||
|
||||
/* There's new data available. */
|
||||
event->hw.interrupts++;
|
||||
event->pending_kill = POLL_IN;
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_pmu_disable_event(struct perf_event *event)
|
||||
static void intel_pmu_disable_event(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
|
@ -637,14 +562,15 @@ intel_pmu_disable_event(struct perf_event *event)
|
|||
}
|
||||
|
||||
x86_pmu_disable_event(event);
|
||||
|
||||
if (unlikely(event->attr.precise_ip))
|
||||
intel_pmu_pebs_disable(event);
|
||||
}
|
||||
|
||||
static inline void
|
||||
intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
||||
static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
||||
{
|
||||
int idx = hwc->idx - X86_PMC_IDX_FIXED;
|
||||
u64 ctrl_val, bits, mask;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* Enable IRQ generation (0x8),
|
||||
|
@ -669,7 +595,7 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc)
|
|||
rdmsrl(hwc->config_base, ctrl_val);
|
||||
ctrl_val &= ~mask;
|
||||
ctrl_val |= bits;
|
||||
err = checking_wrmsrl(hwc->config_base, ctrl_val);
|
||||
wrmsrl(hwc->config_base, ctrl_val);
|
||||
}
|
||||
|
||||
static void intel_pmu_enable_event(struct perf_event *event)
|
||||
|
@ -689,7 +615,10 @@ static void intel_pmu_enable_event(struct perf_event *event)
|
|||
return;
|
||||
}
|
||||
|
||||
__x86_pmu_enable_event(hwc);
|
||||
if (unlikely(event->attr.precise_ip))
|
||||
intel_pmu_pebs_enable(event);
|
||||
|
||||
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -708,20 +637,20 @@ static void intel_pmu_reset(void)
|
|||
unsigned long flags;
|
||||
int idx;
|
||||
|
||||
if (!x86_pmu.num_events)
|
||||
if (!x86_pmu.num_counters)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_events; idx++) {
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
|
||||
checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
|
||||
}
|
||||
for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
|
||||
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
|
||||
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
|
||||
}
|
||||
|
||||
if (ds)
|
||||
ds->bts_index = ds->bts_buffer_base;
|
||||
|
||||
|
@ -747,7 +676,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
|||
intel_pmu_drain_bts_buffer();
|
||||
status = intel_pmu_get_status();
|
||||
if (!status) {
|
||||
intel_pmu_enable_all();
|
||||
intel_pmu_enable_all(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -762,6 +691,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
|||
|
||||
inc_irq_stat(apic_perf_irqs);
|
||||
ack = status;
|
||||
|
||||
intel_pmu_lbr_read();
|
||||
|
||||
/*
|
||||
* PEBS overflow sets bit 62 in the global status register
|
||||
*/
|
||||
if (__test_and_clear_bit(62, (unsigned long *)&status))
|
||||
x86_pmu.drain_pebs(regs);
|
||||
|
||||
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
||||
struct perf_event *event = cpuc->events[bit];
|
||||
|
||||
|
@ -787,26 +725,22 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
|||
goto again;
|
||||
|
||||
done:
|
||||
intel_pmu_enable_all();
|
||||
intel_pmu_enable_all(0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct event_constraint bts_constraint =
|
||||
EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
|
||||
|
||||
static struct event_constraint *
|
||||
intel_special_constraints(struct perf_event *event)
|
||||
intel_bts_constraints(struct perf_event *event)
|
||||
{
|
||||
unsigned int hw_event;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned int hw_event, bts_event;
|
||||
|
||||
hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
|
||||
|
||||
if (unlikely((hw_event ==
|
||||
x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
|
||||
(event->hw.sample_period == 1))) {
|
||||
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
|
||||
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
|
||||
|
||||
if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
|
||||
return &bts_constraint;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -815,24 +749,53 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
|
|||
{
|
||||
struct event_constraint *c;
|
||||
|
||||
c = intel_special_constraints(event);
|
||||
c = intel_bts_constraints(event);
|
||||
if (c)
|
||||
return c;
|
||||
|
||||
c = intel_pebs_constraints(event);
|
||||
if (c)
|
||||
return c;
|
||||
|
||||
return x86_get_event_constraints(cpuc, event);
|
||||
}
|
||||
|
||||
static __initconst struct x86_pmu core_pmu = {
|
||||
static int intel_pmu_hw_config(struct perf_event *event)
|
||||
{
|
||||
int ret = x86_pmu_hw_config(event);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (event->attr.type != PERF_TYPE_RAW)
|
||||
return 0;
|
||||
|
||||
if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
|
||||
return 0;
|
||||
|
||||
if (x86_pmu.version < 3)
|
||||
return -EINVAL;
|
||||
|
||||
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __initconst const struct x86_pmu core_pmu = {
|
||||
.name = "core",
|
||||
.handle_irq = x86_pmu_handle_irq,
|
||||
.disable_all = x86_pmu_disable_all,
|
||||
.enable_all = x86_pmu_enable_all,
|
||||
.enable = x86_pmu_enable_event,
|
||||
.disable = x86_pmu_disable_event,
|
||||
.hw_config = x86_pmu_hw_config,
|
||||
.schedule_events = x86_schedule_events,
|
||||
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
||||
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
||||
.event_map = intel_pmu_event_map,
|
||||
.raw_event = intel_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||
.apic = 1,
|
||||
/*
|
||||
|
@ -845,17 +808,32 @@ static __initconst struct x86_pmu core_pmu = {
|
|||
.event_constraints = intel_core_event_constraints,
|
||||
};
|
||||
|
||||
static __initconst struct x86_pmu intel_pmu = {
|
||||
static void intel_pmu_cpu_starting(int cpu)
|
||||
{
|
||||
init_debug_store_on_cpu(cpu);
|
||||
/*
|
||||
* Deal with CPUs that don't clear their LBRs on power-up.
|
||||
*/
|
||||
intel_pmu_lbr_reset();
|
||||
}
|
||||
|
||||
static void intel_pmu_cpu_dying(int cpu)
|
||||
{
|
||||
fini_debug_store_on_cpu(cpu);
|
||||
}
|
||||
|
||||
static __initconst const struct x86_pmu intel_pmu = {
|
||||
.name = "Intel",
|
||||
.handle_irq = intel_pmu_handle_irq,
|
||||
.disable_all = intel_pmu_disable_all,
|
||||
.enable_all = intel_pmu_enable_all,
|
||||
.enable = intel_pmu_enable_event,
|
||||
.disable = intel_pmu_disable_event,
|
||||
.hw_config = intel_pmu_hw_config,
|
||||
.schedule_events = x86_schedule_events,
|
||||
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
||||
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
||||
.event_map = intel_pmu_event_map,
|
||||
.raw_event = intel_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||
.apic = 1,
|
||||
/*
|
||||
|
@ -864,14 +842,38 @@ static __initconst struct x86_pmu intel_pmu = {
|
|||
* the generic event period:
|
||||
*/
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.enable_bts = intel_pmu_enable_bts,
|
||||
.disable_bts = intel_pmu_disable_bts,
|
||||
.get_event_constraints = intel_get_event_constraints,
|
||||
|
||||
.cpu_starting = init_debug_store_on_cpu,
|
||||
.cpu_dying = fini_debug_store_on_cpu,
|
||||
.cpu_starting = intel_pmu_cpu_starting,
|
||||
.cpu_dying = intel_pmu_cpu_dying,
|
||||
};
|
||||
|
||||
static void intel_clovertown_quirks(void)
|
||||
{
|
||||
/*
|
||||
* PEBS is unreliable due to:
|
||||
*
|
||||
* AJ67 - PEBS may experience CPL leaks
|
||||
* AJ68 - PEBS PMI may be delayed by one event
|
||||
* AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
|
||||
* AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
|
||||
*
|
||||
* AJ67 could be worked around by restricting the OS/USR flags.
|
||||
* AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
|
||||
*
|
||||
* AJ106 could possibly be worked around by not allowing LBR
|
||||
* usage from PEBS, including the fixup.
|
||||
* AJ68 could possibly be worked around by always programming
|
||||
* a pebs_event_reset[0] value and coping with the lost events.
|
||||
*
|
||||
* But taken together it might just make sense to not enable PEBS on
|
||||
* these chips.
|
||||
*/
|
||||
printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
|
||||
x86_pmu.pebs = 0;
|
||||
x86_pmu.pebs_constraints = NULL;
|
||||
}
|
||||
|
||||
static __init int intel_pmu_init(void)
|
||||
{
|
||||
union cpuid10_edx edx;
|
||||
|
@ -881,12 +883,13 @@ static __init int intel_pmu_init(void)
|
|||
int version;
|
||||
|
||||
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
|
||||
/* check for P6 processor family */
|
||||
if (boot_cpu_data.x86 == 6) {
|
||||
return p6_pmu_init();
|
||||
} else {
|
||||
switch (boot_cpu_data.x86) {
|
||||
case 0x6:
|
||||
return p6_pmu_init();
|
||||
case 0xf:
|
||||
return p4_pmu_init();
|
||||
}
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -904,16 +907,28 @@ static __init int intel_pmu_init(void)
|
|||
x86_pmu = intel_pmu;
|
||||
|
||||
x86_pmu.version = version;
|
||||
x86_pmu.num_events = eax.split.num_events;
|
||||
x86_pmu.event_bits = eax.split.bit_width;
|
||||
x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
|
||||
x86_pmu.num_counters = eax.split.num_counters;
|
||||
x86_pmu.cntval_bits = eax.split.bit_width;
|
||||
x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
|
||||
|
||||
/*
|
||||
* Quirk: v2 perfmon does not report fixed-purpose events, so
|
||||
* assume at least 3 events:
|
||||
*/
|
||||
if (version > 1)
|
||||
x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
|
||||
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
|
||||
|
||||
/*
|
||||
* v2 and above have a perf capabilities MSR
|
||||
*/
|
||||
if (version > 1) {
|
||||
u64 capabilities;
|
||||
|
||||
rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
|
||||
x86_pmu.intel_cap.capabilities = capabilities;
|
||||
}
|
||||
|
||||
intel_ds_init();
|
||||
|
||||
/*
|
||||
* Install the hw-cache-events table:
|
||||
|
@ -924,12 +939,15 @@ static __init int intel_pmu_init(void)
|
|||
break;
|
||||
|
||||
case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
|
||||
x86_pmu.quirks = intel_clovertown_quirks;
|
||||
case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
|
||||
case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
|
||||
case 29: /* six-core 45 nm xeon "Dunnington" */
|
||||
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
intel_pmu_lbr_init_core();
|
||||
|
||||
x86_pmu.event_constraints = intel_core2_event_constraints;
|
||||
pr_cont("Core2 events, ");
|
||||
break;
|
||||
|
@ -940,13 +958,19 @@ static __init int intel_pmu_init(void)
|
|||
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
intel_pmu_lbr_init_nhm();
|
||||
|
||||
x86_pmu.event_constraints = intel_nehalem_event_constraints;
|
||||
pr_cont("Nehalem/Corei7 events, ");
|
||||
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
|
||||
pr_cont("Nehalem events, ");
|
||||
break;
|
||||
|
||||
case 28: /* Atom */
|
||||
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
intel_pmu_lbr_init_atom();
|
||||
|
||||
x86_pmu.event_constraints = intel_gen_event_constraints;
|
||||
pr_cont("Atom events, ");
|
||||
break;
|
||||
|
@ -956,7 +980,10 @@ static __init int intel_pmu_init(void)
|
|||
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
intel_pmu_lbr_init_nhm();
|
||||
|
||||
x86_pmu.event_constraints = intel_westmere_event_constraints;
|
||||
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
|
||||
pr_cont("Westmere events, ");
|
||||
break;
|
||||
|
||||
|
|
641
arch/x86/kernel/cpu/perf_event_intel_ds.c
Normal file
641
arch/x86/kernel/cpu/perf_event_intel_ds.c
Normal file
|
@ -0,0 +1,641 @@
|
|||
#ifdef CONFIG_CPU_SUP_INTEL
|
||||
|
||||
/* The maximal number of PEBS events: */
|
||||
#define MAX_PEBS_EVENTS 4
|
||||
|
||||
/* The size of a BTS record in bytes: */
|
||||
#define BTS_RECORD_SIZE 24
|
||||
|
||||
#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
|
||||
#define PEBS_BUFFER_SIZE PAGE_SIZE
|
||||
|
||||
/*
|
||||
* pebs_record_32 for p4 and core not supported
|
||||
|
||||
struct pebs_record_32 {
|
||||
u32 flags, ip;
|
||||
u32 ax, bc, cx, dx;
|
||||
u32 si, di, bp, sp;
|
||||
};
|
||||
|
||||
*/
|
||||
|
||||
struct pebs_record_core {
|
||||
u64 flags, ip;
|
||||
u64 ax, bx, cx, dx;
|
||||
u64 si, di, bp, sp;
|
||||
u64 r8, r9, r10, r11;
|
||||
u64 r12, r13, r14, r15;
|
||||
};
|
||||
|
||||
struct pebs_record_nhm {
|
||||
u64 flags, ip;
|
||||
u64 ax, bx, cx, dx;
|
||||
u64 si, di, bp, sp;
|
||||
u64 r8, r9, r10, r11;
|
||||
u64 r12, r13, r14, r15;
|
||||
u64 status, dla, dse, lat;
|
||||
};
|
||||
|
||||
/*
|
||||
* A debug store configuration.
|
||||
*
|
||||
* We only support architectures that use 64bit fields.
|
||||
*/
|
||||
struct debug_store {
|
||||
u64 bts_buffer_base;
|
||||
u64 bts_index;
|
||||
u64 bts_absolute_maximum;
|
||||
u64 bts_interrupt_threshold;
|
||||
u64 pebs_buffer_base;
|
||||
u64 pebs_index;
|
||||
u64 pebs_absolute_maximum;
|
||||
u64 pebs_interrupt_threshold;
|
||||
u64 pebs_event_reset[MAX_PEBS_EVENTS];
|
||||
};
|
||||
|
||||
static void init_debug_store_on_cpu(int cpu)
|
||||
{
|
||||
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
|
||||
|
||||
if (!ds)
|
||||
return;
|
||||
|
||||
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
|
||||
(u32)((u64)(unsigned long)ds),
|
||||
(u32)((u64)(unsigned long)ds >> 32));
|
||||
}
|
||||
|
||||
static void fini_debug_store_on_cpu(int cpu)
|
||||
{
|
||||
if (!per_cpu(cpu_hw_events, cpu).ds)
|
||||
return;
|
||||
|
||||
wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
|
||||
}
|
||||
|
||||
static void release_ds_buffers(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (!x86_pmu.bts && !x86_pmu.pebs)
|
||||
return;
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
fini_debug_store_on_cpu(cpu);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
|
||||
|
||||
if (!ds)
|
||||
continue;
|
||||
|
||||
per_cpu(cpu_hw_events, cpu).ds = NULL;
|
||||
|
||||
kfree((void *)(unsigned long)ds->pebs_buffer_base);
|
||||
kfree((void *)(unsigned long)ds->bts_buffer_base);
|
||||
kfree(ds);
|
||||
}
|
||||
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
static int reserve_ds_buffers(void)
|
||||
{
|
||||
int cpu, err = 0;
|
||||
|
||||
if (!x86_pmu.bts && !x86_pmu.pebs)
|
||||
return 0;
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct debug_store *ds;
|
||||
void *buffer;
|
||||
int max, thresh;
|
||||
|
||||
err = -ENOMEM;
|
||||
ds = kzalloc(sizeof(*ds), GFP_KERNEL);
|
||||
if (unlikely(!ds))
|
||||
break;
|
||||
per_cpu(cpu_hw_events, cpu).ds = ds;
|
||||
|
||||
if (x86_pmu.bts) {
|
||||
buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
|
||||
if (unlikely(!buffer))
|
||||
break;
|
||||
|
||||
max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
|
||||
thresh = max / 16;
|
||||
|
||||
ds->bts_buffer_base = (u64)(unsigned long)buffer;
|
||||
ds->bts_index = ds->bts_buffer_base;
|
||||
ds->bts_absolute_maximum = ds->bts_buffer_base +
|
||||
max * BTS_RECORD_SIZE;
|
||||
ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
|
||||
thresh * BTS_RECORD_SIZE;
|
||||
}
|
||||
|
||||
if (x86_pmu.pebs) {
|
||||
buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL);
|
||||
if (unlikely(!buffer))
|
||||
break;
|
||||
|
||||
max = PEBS_BUFFER_SIZE / x86_pmu.pebs_record_size;
|
||||
|
||||
ds->pebs_buffer_base = (u64)(unsigned long)buffer;
|
||||
ds->pebs_index = ds->pebs_buffer_base;
|
||||
ds->pebs_absolute_maximum = ds->pebs_buffer_base +
|
||||
max * x86_pmu.pebs_record_size;
|
||||
/*
|
||||
* Always use single record PEBS
|
||||
*/
|
||||
ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
|
||||
x86_pmu.pebs_record_size;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
}
|
||||
|
||||
if (err)
|
||||
release_ds_buffers();
|
||||
else {
|
||||
for_each_online_cpu(cpu)
|
||||
init_debug_store_on_cpu(cpu);
|
||||
}
|
||||
|
||||
put_online_cpus();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* BTS
|
||||
*/
|
||||
|
||||
static struct event_constraint bts_constraint =
|
||||
EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
|
||||
|
||||
static void intel_pmu_enable_bts(u64 config)
|
||||
{
|
||||
unsigned long debugctlmsr;
|
||||
|
||||
debugctlmsr = get_debugctlmsr();
|
||||
|
||||
debugctlmsr |= DEBUGCTLMSR_TR;
|
||||
debugctlmsr |= DEBUGCTLMSR_BTS;
|
||||
debugctlmsr |= DEBUGCTLMSR_BTINT;
|
||||
|
||||
if (!(config & ARCH_PERFMON_EVENTSEL_OS))
|
||||
debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
|
||||
|
||||
if (!(config & ARCH_PERFMON_EVENTSEL_USR))
|
||||
debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
|
||||
|
||||
update_debugctlmsr(debugctlmsr);
|
||||
}
|
||||
|
||||
static void intel_pmu_disable_bts(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
unsigned long debugctlmsr;
|
||||
|
||||
if (!cpuc->ds)
|
||||
return;
|
||||
|
||||
debugctlmsr = get_debugctlmsr();
|
||||
|
||||
debugctlmsr &=
|
||||
~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
|
||||
DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
|
||||
|
||||
update_debugctlmsr(debugctlmsr);
|
||||
}
|
||||
|
||||
static void intel_pmu_drain_bts_buffer(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct debug_store *ds = cpuc->ds;
|
||||
struct bts_record {
|
||||
u64 from;
|
||||
u64 to;
|
||||
u64 flags;
|
||||
};
|
||||
struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
|
||||
struct bts_record *at, *top;
|
||||
struct perf_output_handle handle;
|
||||
struct perf_event_header header;
|
||||
struct perf_sample_data data;
|
||||
struct pt_regs regs;
|
||||
|
||||
if (!event)
|
||||
return;
|
||||
|
||||
if (!ds)
|
||||
return;
|
||||
|
||||
at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
|
||||
top = (struct bts_record *)(unsigned long)ds->bts_index;
|
||||
|
||||
if (top <= at)
|
||||
return;
|
||||
|
||||
ds->bts_index = ds->bts_buffer_base;
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
data.period = event->hw.last_period;
|
||||
regs.ip = 0;
|
||||
|
||||
/*
|
||||
* Prepare a generic sample, i.e. fill in the invariant fields.
|
||||
* We will overwrite the from and to address before we output
|
||||
* the sample.
|
||||
*/
|
||||
perf_prepare_sample(&header, &data, event, ®s);
|
||||
|
||||
if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
|
||||
return;
|
||||
|
||||
for (; at < top; at++) {
|
||||
data.ip = at->from;
|
||||
data.addr = at->to;
|
||||
|
||||
perf_output_sample(&handle, &header, &data, event);
|
||||
}
|
||||
|
||||
perf_output_end(&handle);
|
||||
|
||||
/* There's new data available. */
|
||||
event->hw.interrupts++;
|
||||
event->pending_kill = POLL_IN;
|
||||
}
|
||||
|
||||
/*
|
||||
* PEBS
|
||||
*/
|
||||
|
||||
static struct event_constraint intel_core_pebs_events[] = {
|
||||
PEBS_EVENT_CONSTRAINT(0x00c0, 0x1), /* INSTR_RETIRED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
|
||||
PEBS_EVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0x01cb, 0x1), /* MEM_LOAD_RETIRED.L1D_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x02cb, 0x1), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x04cb, 0x1), /* MEM_LOAD_RETIRED.L2_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x08cb, 0x1), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x10cb, 0x1), /* MEM_LOAD_RETIRED.DTLB_MISS */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint intel_nehalem_pebs_events[] = {
|
||||
PEBS_EVENT_CONSTRAINT(0x00c0, 0xf), /* INSTR_RETIRED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0xfec1, 0xf), /* X87_OPS_RETIRED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0x00c5, 0xf), /* BR_INST_RETIRED.MISPRED */
|
||||
PEBS_EVENT_CONSTRAINT(0x1fc7, 0xf), /* SIMD_INST_RETURED.ANY */
|
||||
PEBS_EVENT_CONSTRAINT(0x01cb, 0xf), /* MEM_LOAD_RETIRED.L1D_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x02cb, 0xf), /* MEM_LOAD_RETIRED.L1D_LINE_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x04cb, 0xf), /* MEM_LOAD_RETIRED.L2_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x08cb, 0xf), /* MEM_LOAD_RETIRED.L2_LINE_MISS */
|
||||
PEBS_EVENT_CONSTRAINT(0x10cb, 0xf), /* MEM_LOAD_RETIRED.DTLB_MISS */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static struct event_constraint *
|
||||
intel_pebs_constraints(struct perf_event *event)
|
||||
{
|
||||
struct event_constraint *c;
|
||||
|
||||
if (!event->attr.precise_ip)
|
||||
return NULL;
|
||||
|
||||
if (x86_pmu.pebs_constraints) {
|
||||
for_each_event_constraint(c, x86_pmu.pebs_constraints) {
|
||||
if ((event->hw.config & c->cmask) == c->code)
|
||||
return c;
|
||||
}
|
||||
}
|
||||
|
||||
return &emptyconstraint;
|
||||
}
|
||||
|
||||
static void intel_pmu_pebs_enable(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
|
||||
|
||||
cpuc->pebs_enabled |= 1ULL << hwc->idx;
|
||||
WARN_ON_ONCE(cpuc->enabled);
|
||||
|
||||
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
|
||||
intel_pmu_lbr_enable(event);
|
||||
}
|
||||
|
||||
static void intel_pmu_pebs_disable(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
|
||||
if (cpuc->enabled)
|
||||
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
|
||||
|
||||
hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
|
||||
|
||||
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
|
||||
intel_pmu_lbr_disable(event);
|
||||
}
|
||||
|
||||
static void intel_pmu_pebs_enable_all(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (cpuc->pebs_enabled)
|
||||
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
|
||||
}
|
||||
|
||||
static void intel_pmu_pebs_disable_all(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (cpuc->pebs_enabled)
|
||||
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
|
||||
}
|
||||
|
||||
#include <asm/insn.h>
|
||||
|
||||
static inline bool kernel_ip(unsigned long ip)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
return ip > PAGE_OFFSET;
|
||||
#else
|
||||
return (long)ip < 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
unsigned long from = cpuc->lbr_entries[0].from;
|
||||
unsigned long old_to, to = cpuc->lbr_entries[0].to;
|
||||
unsigned long ip = regs->ip;
|
||||
|
||||
/*
|
||||
* We don't need to fixup if the PEBS assist is fault like
|
||||
*/
|
||||
if (!x86_pmu.intel_cap.pebs_trap)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* No LBR entry, no basic block, no rewinding
|
||||
*/
|
||||
if (!cpuc->lbr_stack.nr || !from || !to)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Basic blocks should never cross user/kernel boundaries
|
||||
*/
|
||||
if (kernel_ip(ip) != kernel_ip(to))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* unsigned math, either ip is before the start (impossible) or
|
||||
* the basic block is larger than 1 page (sanity)
|
||||
*/
|
||||
if ((ip - to) > PAGE_SIZE)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We sampled a branch insn, rewind using the LBR stack
|
||||
*/
|
||||
if (ip == to) {
|
||||
regs->ip = from;
|
||||
return 1;
|
||||
}
|
||||
|
||||
do {
|
||||
struct insn insn;
|
||||
u8 buf[MAX_INSN_SIZE];
|
||||
void *kaddr;
|
||||
|
||||
old_to = to;
|
||||
if (!kernel_ip(ip)) {
|
||||
int bytes, size = MAX_INSN_SIZE;
|
||||
|
||||
bytes = copy_from_user_nmi(buf, (void __user *)to, size);
|
||||
if (bytes != size)
|
||||
return 0;
|
||||
|
||||
kaddr = buf;
|
||||
} else
|
||||
kaddr = (void *)to;
|
||||
|
||||
kernel_insn_init(&insn, kaddr);
|
||||
insn_get_length(&insn);
|
||||
to += insn.length;
|
||||
} while (to < ip);
|
||||
|
||||
if (to == ip) {
|
||||
regs->ip = old_to;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Even though we decoded the basic block, the instruction stream
|
||||
* never matched the given IP, either the TO or the IP got corrupted.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_pmu_save_and_restart(struct perf_event *event);
|
||||
|
||||
static void __intel_pmu_pebs_event(struct perf_event *event,
|
||||
struct pt_regs *iregs, void *__pebs)
|
||||
{
|
||||
/*
|
||||
* We cast to pebs_record_core since that is a subset of
|
||||
* both formats and we don't use the other fields in this
|
||||
* routine.
|
||||
*/
|
||||
struct pebs_record_core *pebs = __pebs;
|
||||
struct perf_sample_data data;
|
||||
struct pt_regs regs;
|
||||
|
||||
if (!intel_pmu_save_and_restart(event))
|
||||
return;
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
data.period = event->hw.last_period;
|
||||
|
||||
/*
|
||||
* We use the interrupt regs as a base because the PEBS record
|
||||
* does not contain a full regs set, specifically it seems to
|
||||
* lack segment descriptors, which get used by things like
|
||||
* user_mode().
|
||||
*
|
||||
* In the simple case fix up only the IP and BP,SP regs, for
|
||||
* PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
|
||||
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
|
||||
*/
|
||||
regs = *iregs;
|
||||
regs.ip = pebs->ip;
|
||||
regs.bp = pebs->bp;
|
||||
regs.sp = pebs->sp;
|
||||
|
||||
if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(®s))
|
||||
regs.flags |= PERF_EFLAGS_EXACT;
|
||||
else
|
||||
regs.flags &= ~PERF_EFLAGS_EXACT;
|
||||
|
||||
if (perf_event_overflow(event, 1, &data, ®s))
|
||||
x86_pmu_stop(event);
|
||||
}
|
||||
|
||||
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct debug_store *ds = cpuc->ds;
|
||||
struct perf_event *event = cpuc->events[0]; /* PMC0 only */
|
||||
struct pebs_record_core *at, *top;
|
||||
int n;
|
||||
|
||||
if (!ds || !x86_pmu.pebs)
|
||||
return;
|
||||
|
||||
at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
|
||||
top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
|
||||
|
||||
/*
|
||||
* Whatever else happens, drain the thing
|
||||
*/
|
||||
ds->pebs_index = ds->pebs_buffer_base;
|
||||
|
||||
if (!test_bit(0, cpuc->active_mask))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!event);
|
||||
|
||||
if (!event->attr.precise_ip)
|
||||
return;
|
||||
|
||||
n = top - at;
|
||||
if (n <= 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Should not happen, we program the threshold at 1 and do not
|
||||
* set a reset value.
|
||||
*/
|
||||
WARN_ON_ONCE(n > 1);
|
||||
at += n - 1;
|
||||
|
||||
__intel_pmu_pebs_event(event, iregs, at);
|
||||
}
|
||||
|
||||
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct debug_store *ds = cpuc->ds;
|
||||
struct pebs_record_nhm *at, *top;
|
||||
struct perf_event *event = NULL;
|
||||
u64 status = 0;
|
||||
int bit, n;
|
||||
|
||||
if (!ds || !x86_pmu.pebs)
|
||||
return;
|
||||
|
||||
at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
|
||||
top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
|
||||
|
||||
ds->pebs_index = ds->pebs_buffer_base;
|
||||
|
||||
n = top - at;
|
||||
if (n <= 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Should not happen, we program the threshold at 1 and do not
|
||||
* set a reset value.
|
||||
*/
|
||||
WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
|
||||
|
||||
for ( ; at < top; at++) {
|
||||
for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
|
||||
event = cpuc->events[bit];
|
||||
if (!test_bit(bit, cpuc->active_mask))
|
||||
continue;
|
||||
|
||||
WARN_ON_ONCE(!event);
|
||||
|
||||
if (!event->attr.precise_ip)
|
||||
continue;
|
||||
|
||||
if (__test_and_set_bit(bit, (unsigned long *)&status))
|
||||
continue;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (!event || bit >= MAX_PEBS_EVENTS)
|
||||
continue;
|
||||
|
||||
__intel_pmu_pebs_event(event, iregs, at);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* BTS, PEBS probe and setup
|
||||
*/
|
||||
|
||||
static void intel_ds_init(void)
|
||||
{
|
||||
/*
|
||||
* No support for 32bit formats
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_DTES64))
|
||||
return;
|
||||
|
||||
x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
|
||||
x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
|
||||
if (x86_pmu.pebs) {
|
||||
char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
|
||||
int format = x86_pmu.intel_cap.pebs_format;
|
||||
|
||||
switch (format) {
|
||||
case 0:
|
||||
printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
|
||||
x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
|
||||
x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
|
||||
x86_pmu.pebs_constraints = intel_core_pebs_events;
|
||||
break;
|
||||
|
||||
case 1:
|
||||
printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
|
||||
x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
|
||||
x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
|
||||
x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
|
||||
x86_pmu.pebs = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#else /* CONFIG_CPU_SUP_INTEL */
|
||||
|
||||
static int reserve_ds_buffers(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void release_ds_buffers(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CPU_SUP_INTEL */
|
218
arch/x86/kernel/cpu/perf_event_intel_lbr.c
Normal file
218
arch/x86/kernel/cpu/perf_event_intel_lbr.c
Normal file
|
@ -0,0 +1,218 @@
|
|||
#ifdef CONFIG_CPU_SUP_INTEL
|
||||
|
||||
enum {
|
||||
LBR_FORMAT_32 = 0x00,
|
||||
LBR_FORMAT_LIP = 0x01,
|
||||
LBR_FORMAT_EIP = 0x02,
|
||||
LBR_FORMAT_EIP_FLAGS = 0x03,
|
||||
};
|
||||
|
||||
/*
|
||||
* We only support LBR implementations that have FREEZE_LBRS_ON_PMI
|
||||
* otherwise it becomes near impossible to get a reliable stack.
|
||||
*/
|
||||
|
||||
static void __intel_pmu_lbr_enable(void)
|
||||
{
|
||||
u64 debugctl;
|
||||
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||
debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||
}
|
||||
|
||||
static void __intel_pmu_lbr_disable(void)
|
||||
{
|
||||
u64 debugctl;
|
||||
|
||||
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||
debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
|
||||
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
|
||||
}
|
||||
|
||||
static void intel_pmu_lbr_reset_32(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < x86_pmu.lbr_nr; i++)
|
||||
wrmsrl(x86_pmu.lbr_from + i, 0);
|
||||
}
|
||||
|
||||
static void intel_pmu_lbr_reset_64(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < x86_pmu.lbr_nr; i++) {
|
||||
wrmsrl(x86_pmu.lbr_from + i, 0);
|
||||
wrmsrl(x86_pmu.lbr_to + i, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_pmu_lbr_reset(void)
|
||||
{
|
||||
if (!x86_pmu.lbr_nr)
|
||||
return;
|
||||
|
||||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
|
||||
intel_pmu_lbr_reset_32();
|
||||
else
|
||||
intel_pmu_lbr_reset_64();
|
||||
}
|
||||
|
||||
static void intel_pmu_lbr_enable(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (!x86_pmu.lbr_nr)
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(cpuc->enabled);
|
||||
|
||||
/*
|
||||
* Reset the LBR stack if we changed task context to
|
||||
* avoid data leaks.
|
||||
*/
|
||||
|
||||
if (event->ctx->task && cpuc->lbr_context != event->ctx) {
|
||||
intel_pmu_lbr_reset();
|
||||
cpuc->lbr_context = event->ctx;
|
||||
}
|
||||
|
||||
cpuc->lbr_users++;
|
||||
}
|
||||
|
||||
static void intel_pmu_lbr_disable(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (!x86_pmu.lbr_nr)
|
||||
return;
|
||||
|
||||
cpuc->lbr_users--;
|
||||
WARN_ON_ONCE(cpuc->lbr_users < 0);
|
||||
|
||||
if (cpuc->enabled && !cpuc->lbr_users)
|
||||
__intel_pmu_lbr_disable();
|
||||
}
|
||||
|
||||
static void intel_pmu_lbr_enable_all(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (cpuc->lbr_users)
|
||||
__intel_pmu_lbr_enable();
|
||||
}
|
||||
|
||||
static void intel_pmu_lbr_disable_all(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (cpuc->lbr_users)
|
||||
__intel_pmu_lbr_disable();
|
||||
}
|
||||
|
||||
static inline u64 intel_pmu_lbr_tos(void)
|
||||
{
|
||||
u64 tos;
|
||||
|
||||
rdmsrl(x86_pmu.lbr_tos, tos);
|
||||
|
||||
return tos;
|
||||
}
|
||||
|
||||
static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
|
||||
{
|
||||
unsigned long mask = x86_pmu.lbr_nr - 1;
|
||||
u64 tos = intel_pmu_lbr_tos();
|
||||
int i;
|
||||
|
||||
for (i = 0; i < x86_pmu.lbr_nr; i++) {
|
||||
unsigned long lbr_idx = (tos - i) & mask;
|
||||
union {
|
||||
struct {
|
||||
u32 from;
|
||||
u32 to;
|
||||
};
|
||||
u64 lbr;
|
||||
} msr_lastbranch;
|
||||
|
||||
rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
|
||||
|
||||
cpuc->lbr_entries[i].from = msr_lastbranch.from;
|
||||
cpuc->lbr_entries[i].to = msr_lastbranch.to;
|
||||
cpuc->lbr_entries[i].flags = 0;
|
||||
}
|
||||
cpuc->lbr_stack.nr = i;
|
||||
}
|
||||
|
||||
#define LBR_FROM_FLAG_MISPRED (1ULL << 63)
|
||||
|
||||
/*
|
||||
* Due to lack of segmentation in Linux the effective address (offset)
|
||||
* is the same as the linear address, allowing us to merge the LIP and EIP
|
||||
* LBR formats.
|
||||
*/
|
||||
static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
|
||||
{
|
||||
unsigned long mask = x86_pmu.lbr_nr - 1;
|
||||
int lbr_format = x86_pmu.intel_cap.lbr_format;
|
||||
u64 tos = intel_pmu_lbr_tos();
|
||||
int i;
|
||||
|
||||
for (i = 0; i < x86_pmu.lbr_nr; i++) {
|
||||
unsigned long lbr_idx = (tos - i) & mask;
|
||||
u64 from, to, flags = 0;
|
||||
|
||||
rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
|
||||
rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
|
||||
|
||||
if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
|
||||
flags = !!(from & LBR_FROM_FLAG_MISPRED);
|
||||
from = (u64)((((s64)from) << 1) >> 1);
|
||||
}
|
||||
|
||||
cpuc->lbr_entries[i].from = from;
|
||||
cpuc->lbr_entries[i].to = to;
|
||||
cpuc->lbr_entries[i].flags = flags;
|
||||
}
|
||||
cpuc->lbr_stack.nr = i;
|
||||
}
|
||||
|
||||
static void intel_pmu_lbr_read(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
if (!cpuc->lbr_users)
|
||||
return;
|
||||
|
||||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
|
||||
intel_pmu_lbr_read_32(cpuc);
|
||||
else
|
||||
intel_pmu_lbr_read_64(cpuc);
|
||||
}
|
||||
|
||||
static void intel_pmu_lbr_init_core(void)
|
||||
{
|
||||
x86_pmu.lbr_nr = 4;
|
||||
x86_pmu.lbr_tos = 0x01c9;
|
||||
x86_pmu.lbr_from = 0x40;
|
||||
x86_pmu.lbr_to = 0x60;
|
||||
}
|
||||
|
||||
static void intel_pmu_lbr_init_nhm(void)
|
||||
{
|
||||
x86_pmu.lbr_nr = 16;
|
||||
x86_pmu.lbr_tos = 0x01c9;
|
||||
x86_pmu.lbr_from = 0x680;
|
||||
x86_pmu.lbr_to = 0x6c0;
|
||||
}
|
||||
|
||||
static void intel_pmu_lbr_init_atom(void)
|
||||
{
|
||||
x86_pmu.lbr_nr = 8;
|
||||
x86_pmu.lbr_tos = 0x01c9;
|
||||
x86_pmu.lbr_from = 0x40;
|
||||
x86_pmu.lbr_to = 0x60;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CPU_SUP_INTEL */
|
857
arch/x86/kernel/cpu/perf_event_p4.c
Normal file
857
arch/x86/kernel/cpu/perf_event_p4.c
Normal file
|
@ -0,0 +1,857 @@
|
|||
/*
|
||||
* Netburst Perfomance Events (P4, old Xeon)
|
||||
*
|
||||
* Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
|
||||
* Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
|
||||
*
|
||||
* For licencing details see kernel-base/COPYING
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_INTEL
|
||||
|
||||
#include <asm/perf_event_p4.h>
|
||||
|
||||
#define P4_CNTR_LIMIT 3
|
||||
/*
|
||||
* array indices: 0,1 - HT threads, used with HT enabled cpu
|
||||
*/
|
||||
struct p4_event_bind {
|
||||
unsigned int opcode; /* Event code and ESCR selector */
|
||||
unsigned int escr_msr[2]; /* ESCR MSR for this event */
|
||||
char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */
|
||||
};
|
||||
|
||||
struct p4_cache_event_bind {
|
||||
unsigned int metric_pebs;
|
||||
unsigned int metric_vert;
|
||||
};
|
||||
|
||||
#define P4_GEN_CACHE_EVENT_BIND(name) \
|
||||
[P4_CACHE__##name] = { \
|
||||
.metric_pebs = P4_PEBS__##name, \
|
||||
.metric_vert = P4_VERT__##name, \
|
||||
}
|
||||
|
||||
static struct p4_cache_event_bind p4_cache_event_bind_map[] = {
|
||||
P4_GEN_CACHE_EVENT_BIND(1stl_cache_load_miss_retired),
|
||||
P4_GEN_CACHE_EVENT_BIND(2ndl_cache_load_miss_retired),
|
||||
P4_GEN_CACHE_EVENT_BIND(dtlb_load_miss_retired),
|
||||
P4_GEN_CACHE_EVENT_BIND(dtlb_store_miss_retired),
|
||||
};
|
||||
|
||||
/*
|
||||
* Note that we don't use CCCR1 here, there is an
|
||||
* exception for P4_BSQ_ALLOCATION but we just have
|
||||
* no workaround
|
||||
*
|
||||
* consider this binding as resources which particular
|
||||
* event may borrow, it doesn't contain EventMask,
|
||||
* Tags and friends -- they are left to a caller
|
||||
*/
|
||||
static struct p4_event_bind p4_event_bind_map[] = {
|
||||
[P4_EVENT_TC_DELIVER_MODE] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_TC_DELIVER_MODE),
|
||||
.escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
|
||||
.cntr = { {4, 5, -1}, {6, 7, -1} },
|
||||
},
|
||||
[P4_EVENT_BPU_FETCH_REQUEST] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_BPU_FETCH_REQUEST),
|
||||
.escr_msr = { MSR_P4_BPU_ESCR0, MSR_P4_BPU_ESCR1 },
|
||||
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_ITLB_REFERENCE] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_ITLB_REFERENCE),
|
||||
.escr_msr = { MSR_P4_ITLB_ESCR0, MSR_P4_ITLB_ESCR1 },
|
||||
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_MEMORY_CANCEL] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_MEMORY_CANCEL),
|
||||
.escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_MEMORY_COMPLETE] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_MEMORY_COMPLETE),
|
||||
.escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_LOAD_PORT_REPLAY] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_LOAD_PORT_REPLAY),
|
||||
.escr_msr = { MSR_P4_SAAT_ESCR0, MSR_P4_SAAT_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_STORE_PORT_REPLAY] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_STORE_PORT_REPLAY),
|
||||
.escr_msr = { MSR_P4_SAAT_ESCR0 , MSR_P4_SAAT_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_MOB_LOAD_REPLAY] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_MOB_LOAD_REPLAY),
|
||||
.escr_msr = { MSR_P4_MOB_ESCR0, MSR_P4_MOB_ESCR1 },
|
||||
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_PAGE_WALK_TYPE] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_PAGE_WALK_TYPE),
|
||||
.escr_msr = { MSR_P4_PMH_ESCR0, MSR_P4_PMH_ESCR1 },
|
||||
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_BSQ_CACHE_REFERENCE] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_BSQ_CACHE_REFERENCE),
|
||||
.escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR1 },
|
||||
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_IOQ_ALLOCATION] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_IOQ_ALLOCATION),
|
||||
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_IOQ_ACTIVE_ENTRIES] = { /* shared ESCR */
|
||||
.opcode = P4_OPCODE(P4_EVENT_IOQ_ACTIVE_ENTRIES),
|
||||
.escr_msr = { MSR_P4_FSB_ESCR1, MSR_P4_FSB_ESCR1 },
|
||||
.cntr = { {2, -1, -1}, {3, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_FSB_DATA_ACTIVITY] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_FSB_DATA_ACTIVITY),
|
||||
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_BSQ_ALLOCATION] = { /* shared ESCR, broken CCCR1 */
|
||||
.opcode = P4_OPCODE(P4_EVENT_BSQ_ALLOCATION),
|
||||
.escr_msr = { MSR_P4_BSU_ESCR0, MSR_P4_BSU_ESCR0 },
|
||||
.cntr = { {0, -1, -1}, {1, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_BSQ_ACTIVE_ENTRIES] = { /* shared ESCR */
|
||||
.opcode = P4_OPCODE(P4_EVENT_BSQ_ACTIVE_ENTRIES),
|
||||
.escr_msr = { MSR_P4_BSU_ESCR1 , MSR_P4_BSU_ESCR1 },
|
||||
.cntr = { {2, -1, -1}, {3, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_SSE_INPUT_ASSIST] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_SSE_INPUT_ASSIST),
|
||||
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_PACKED_SP_UOP] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_PACKED_SP_UOP),
|
||||
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_PACKED_DP_UOP] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_PACKED_DP_UOP),
|
||||
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_SCALAR_SP_UOP] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_SCALAR_SP_UOP),
|
||||
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_SCALAR_DP_UOP] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_SCALAR_DP_UOP),
|
||||
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_64BIT_MMX_UOP] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_64BIT_MMX_UOP),
|
||||
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_128BIT_MMX_UOP] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_128BIT_MMX_UOP),
|
||||
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_X87_FP_UOP] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_X87_FP_UOP),
|
||||
.escr_msr = { MSR_P4_FIRM_ESCR0, MSR_P4_FIRM_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_TC_MISC] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_TC_MISC),
|
||||
.escr_msr = { MSR_P4_TC_ESCR0, MSR_P4_TC_ESCR1 },
|
||||
.cntr = { {4, 5, -1}, {6, 7, -1} },
|
||||
},
|
||||
[P4_EVENT_GLOBAL_POWER_EVENTS] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_GLOBAL_POWER_EVENTS),
|
||||
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_TC_MS_XFER] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_TC_MS_XFER),
|
||||
.escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
|
||||
.cntr = { {4, 5, -1}, {6, 7, -1} },
|
||||
},
|
||||
[P4_EVENT_UOP_QUEUE_WRITES] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_UOP_QUEUE_WRITES),
|
||||
.escr_msr = { MSR_P4_MS_ESCR0, MSR_P4_MS_ESCR1 },
|
||||
.cntr = { {4, 5, -1}, {6, 7, -1} },
|
||||
},
|
||||
[P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE),
|
||||
.escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR0 },
|
||||
.cntr = { {4, 5, -1}, {6, 7, -1} },
|
||||
},
|
||||
[P4_EVENT_RETIRED_BRANCH_TYPE] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_RETIRED_BRANCH_TYPE),
|
||||
.escr_msr = { MSR_P4_TBPU_ESCR0 , MSR_P4_TBPU_ESCR1 },
|
||||
.cntr = { {4, 5, -1}, {6, 7, -1} },
|
||||
},
|
||||
[P4_EVENT_RESOURCE_STALL] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_RESOURCE_STALL),
|
||||
.escr_msr = { MSR_P4_ALF_ESCR0, MSR_P4_ALF_ESCR1 },
|
||||
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||
},
|
||||
[P4_EVENT_WC_BUFFER] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_WC_BUFFER),
|
||||
.escr_msr = { MSR_P4_DAC_ESCR0, MSR_P4_DAC_ESCR1 },
|
||||
.cntr = { {8, 9, -1}, {10, 11, -1} },
|
||||
},
|
||||
[P4_EVENT_B2B_CYCLES] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_B2B_CYCLES),
|
||||
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_BNR] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_BNR),
|
||||
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_SNOOP] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_SNOOP),
|
||||
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_RESPONSE] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_RESPONSE),
|
||||
.escr_msr = { MSR_P4_FSB_ESCR0, MSR_P4_FSB_ESCR1 },
|
||||
.cntr = { {0, -1, -1}, {2, -1, -1} },
|
||||
},
|
||||
[P4_EVENT_FRONT_END_EVENT] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_FRONT_END_EVENT),
|
||||
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
|
||||
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||
},
|
||||
[P4_EVENT_EXECUTION_EVENT] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_EXECUTION_EVENT),
|
||||
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
|
||||
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||
},
|
||||
[P4_EVENT_REPLAY_EVENT] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_REPLAY_EVENT),
|
||||
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
|
||||
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||
},
|
||||
[P4_EVENT_INSTR_RETIRED] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_INSTR_RETIRED),
|
||||
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
|
||||
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||
},
|
||||
[P4_EVENT_UOPS_RETIRED] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_UOPS_RETIRED),
|
||||
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
|
||||
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||
},
|
||||
[P4_EVENT_UOP_TYPE] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_UOP_TYPE),
|
||||
.escr_msr = { MSR_P4_RAT_ESCR0, MSR_P4_RAT_ESCR1 },
|
||||
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||
},
|
||||
[P4_EVENT_BRANCH_RETIRED] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_BRANCH_RETIRED),
|
||||
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
|
||||
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||
},
|
||||
[P4_EVENT_MISPRED_BRANCH_RETIRED] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_MISPRED_BRANCH_RETIRED),
|
||||
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
|
||||
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||
},
|
||||
[P4_EVENT_X87_ASSIST] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_X87_ASSIST),
|
||||
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
|
||||
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||
},
|
||||
[P4_EVENT_MACHINE_CLEAR] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_MACHINE_CLEAR),
|
||||
.escr_msr = { MSR_P4_CRU_ESCR2, MSR_P4_CRU_ESCR3 },
|
||||
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||
},
|
||||
[P4_EVENT_INSTR_COMPLETED] = {
|
||||
.opcode = P4_OPCODE(P4_EVENT_INSTR_COMPLETED),
|
||||
.escr_msr = { MSR_P4_CRU_ESCR0, MSR_P4_CRU_ESCR1 },
|
||||
.cntr = { {12, 13, 16}, {14, 15, 17} },
|
||||
},
|
||||
};
|
||||
|
||||
#define P4_GEN_CACHE_EVENT(event, bit, cache_event) \
|
||||
p4_config_pack_escr(P4_ESCR_EVENT(event) | \
|
||||
P4_ESCR_EMASK_BIT(event, bit)) | \
|
||||
p4_config_pack_cccr(cache_event | \
|
||||
P4_CCCR_ESEL(P4_OPCODE_ESEL(P4_OPCODE(event))))
|
||||
|
||||
static __initconst const u64 p4_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
{
|
||||
[ C(L1D ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
|
||||
P4_CACHE__1stl_cache_load_miss_retired),
|
||||
},
|
||||
},
|
||||
[ C(LL ) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
|
||||
P4_CACHE__2ndl_cache_load_miss_retired),
|
||||
},
|
||||
},
|
||||
[ C(DTLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
|
||||
P4_CACHE__dtlb_load_miss_retired),
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = 0x0,
|
||||
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_REPLAY_EVENT, NBOGUS,
|
||||
P4_CACHE__dtlb_store_miss_retired),
|
||||
},
|
||||
},
|
||||
[ C(ITLB) ] = {
|
||||
[ C(OP_READ) ] = {
|
||||
[ C(RESULT_ACCESS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, HIT,
|
||||
P4_CACHE__itlb_reference_hit),
|
||||
[ C(RESULT_MISS) ] = P4_GEN_CACHE_EVENT(P4_EVENT_ITLB_REFERENCE, MISS,
|
||||
P4_CACHE__itlb_reference_miss),
|
||||
},
|
||||
[ C(OP_WRITE) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
[ C(OP_PREFETCH) ] = {
|
||||
[ C(RESULT_ACCESS) ] = -1,
|
||||
[ C(RESULT_MISS) ] = -1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static u64 p4_general_events[PERF_COUNT_HW_MAX] = {
|
||||
/* non-halted CPU clocks */
|
||||
[PERF_COUNT_HW_CPU_CYCLES] =
|
||||
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_GLOBAL_POWER_EVENTS) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_GLOBAL_POWER_EVENTS, RUNNING)),
|
||||
|
||||
/*
|
||||
* retired instructions
|
||||
* in a sake of simplicity we don't use the FSB tagging
|
||||
*/
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] =
|
||||
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_INSTR_RETIRED) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, NBOGUSNTAG) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_INSTR_RETIRED, BOGUSNTAG)),
|
||||
|
||||
/* cache hits */
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] =
|
||||
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITS) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITE) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_HITM) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITS) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITE) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_HITM)),
|
||||
|
||||
/* cache misses */
|
||||
[PERF_COUNT_HW_CACHE_MISSES] =
|
||||
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_BSQ_CACHE_REFERENCE) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_2ndL_MISS) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, RD_3rdL_MISS) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_BSQ_CACHE_REFERENCE, WR_2ndL_MISS)),
|
||||
|
||||
/* branch instructions retired */
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =
|
||||
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_RETIRED_BRANCH_TYPE) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CONDITIONAL) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, CALL) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, RETURN) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_RETIRED_BRANCH_TYPE, INDIRECT)),
|
||||
|
||||
/* mispredicted branches retired */
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] =
|
||||
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_MISPRED_BRANCH_RETIRED) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_MISPRED_BRANCH_RETIRED, NBOGUS)),
|
||||
|
||||
/* bus ready clocks (cpu is driving #DRDY_DRV\#DRDY_OWN): */
|
||||
[PERF_COUNT_HW_BUS_CYCLES] =
|
||||
p4_config_pack_escr(P4_ESCR_EVENT(P4_EVENT_FSB_DATA_ACTIVITY) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_DRV) |
|
||||
P4_ESCR_EMASK_BIT(P4_EVENT_FSB_DATA_ACTIVITY, DRDY_OWN)) |
|
||||
p4_config_pack_cccr(P4_CCCR_EDGE | P4_CCCR_COMPARE),
|
||||
};
|
||||
|
||||
static struct p4_event_bind *p4_config_get_bind(u64 config)
|
||||
{
|
||||
unsigned int evnt = p4_config_unpack_event(config);
|
||||
struct p4_event_bind *bind = NULL;
|
||||
|
||||
if (evnt < ARRAY_SIZE(p4_event_bind_map))
|
||||
bind = &p4_event_bind_map[evnt];
|
||||
|
||||
return bind;
|
||||
}
|
||||
|
||||
static u64 p4_pmu_event_map(int hw_event)
|
||||
{
|
||||
struct p4_event_bind *bind;
|
||||
unsigned int esel;
|
||||
u64 config;
|
||||
|
||||
config = p4_general_events[hw_event];
|
||||
bind = p4_config_get_bind(config);
|
||||
esel = P4_OPCODE_ESEL(bind->opcode);
|
||||
config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
static int p4_hw_config(struct perf_event *event)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
int rc = 0;
|
||||
unsigned int evnt;
|
||||
u32 escr, cccr;
|
||||
|
||||
/*
|
||||
* the reason we use cpu that early is that: if we get scheduled
|
||||
* first time on the same cpu -- we will not need swap thread
|
||||
* specific flags in config (and will save some cpu cycles)
|
||||
*/
|
||||
|
||||
cccr = p4_default_cccr_conf(cpu);
|
||||
escr = p4_default_escr_conf(cpu, event->attr.exclude_kernel,
|
||||
event->attr.exclude_user);
|
||||
event->hw.config = p4_config_pack_escr(escr) |
|
||||
p4_config_pack_cccr(cccr);
|
||||
|
||||
if (p4_ht_active() && p4_ht_thread(cpu))
|
||||
event->hw.config = p4_set_ht_bit(event->hw.config);
|
||||
|
||||
if (event->attr.type == PERF_TYPE_RAW) {
|
||||
|
||||
/* user data may have out-of-bound event index */
|
||||
evnt = p4_config_unpack_event(event->attr.config);
|
||||
if (evnt >= ARRAY_SIZE(p4_event_bind_map)) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't control raw events so it's up to the caller
|
||||
* to pass sane values (and we don't count the thread number
|
||||
* on HT machine but allow HT-compatible specifics to be
|
||||
* passed on)
|
||||
*
|
||||
* XXX: HT wide things should check perf_paranoid_cpu() &&
|
||||
* CAP_SYS_ADMIN
|
||||
*/
|
||||
event->hw.config |= event->attr.config &
|
||||
(p4_config_pack_escr(P4_ESCR_MASK_HT) |
|
||||
p4_config_pack_cccr(P4_CCCR_MASK_HT));
|
||||
}
|
||||
|
||||
rc = x86_setup_perfctr(event);
|
||||
out:
|
||||
put_cpu();
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline void p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
|
||||
{
|
||||
unsigned long dummy;
|
||||
|
||||
rdmsrl(hwc->config_base + hwc->idx, dummy);
|
||||
if (dummy & P4_CCCR_OVF) {
|
||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
|
||||
((u64)dummy) & ~P4_CCCR_OVF);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void p4_pmu_disable_event(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
/*
|
||||
* If event gets disabled while counter is in overflowed
|
||||
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
|
||||
* asserted again and again
|
||||
*/
|
||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
|
||||
(u64)(p4_config_unpack_cccr(hwc->config)) &
|
||||
~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
|
||||
}
|
||||
|
||||
static void p4_pmu_disable_all(void)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
p4_pmu_disable_event(event);
|
||||
}
|
||||
}
|
||||
|
||||
static void p4_pmu_enable_event(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int thread = p4_ht_config_thread(hwc->config);
|
||||
u64 escr_conf = p4_config_unpack_escr(p4_clear_ht_bit(hwc->config));
|
||||
unsigned int idx = p4_config_unpack_event(hwc->config);
|
||||
unsigned int idx_cache = p4_config_unpack_cache_event(hwc->config);
|
||||
struct p4_event_bind *bind;
|
||||
struct p4_cache_event_bind *bind_cache;
|
||||
u64 escr_addr, cccr;
|
||||
|
||||
bind = &p4_event_bind_map[idx];
|
||||
escr_addr = (u64)bind->escr_msr[thread];
|
||||
|
||||
/*
|
||||
* - we dont support cascaded counters yet
|
||||
* - and counter 1 is broken (erratum)
|
||||
*/
|
||||
WARN_ON_ONCE(p4_is_event_cascaded(hwc->config));
|
||||
WARN_ON_ONCE(hwc->idx == 1);
|
||||
|
||||
/* we need a real Event value */
|
||||
escr_conf &= ~P4_ESCR_EVENT_MASK;
|
||||
escr_conf |= P4_ESCR_EVENT(P4_OPCODE_EVNT(bind->opcode));
|
||||
|
||||
cccr = p4_config_unpack_cccr(hwc->config);
|
||||
|
||||
/*
|
||||
* it could be Cache event so that we need to
|
||||
* set metrics into additional MSRs
|
||||
*/
|
||||
BUILD_BUG_ON(P4_CACHE__MAX > P4_CCCR_CACHE_OPS_MASK);
|
||||
if (idx_cache > P4_CACHE__NONE &&
|
||||
idx_cache < ARRAY_SIZE(p4_cache_event_bind_map)) {
|
||||
bind_cache = &p4_cache_event_bind_map[idx_cache];
|
||||
(void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind_cache->metric_pebs);
|
||||
(void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind_cache->metric_vert);
|
||||
}
|
||||
|
||||
(void)checking_wrmsrl(escr_addr, escr_conf);
|
||||
(void)checking_wrmsrl(hwc->config_base + hwc->idx,
|
||||
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
|
||||
}
|
||||
|
||||
static void p4_pmu_enable_all(int added)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
p4_pmu_enable_event(event);
|
||||
}
|
||||
}
|
||||
|
||||
static int p4_pmu_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct perf_sample_data data;
|
||||
struct cpu_hw_events *cpuc;
|
||||
struct perf_event *event;
|
||||
struct hw_perf_event *hwc;
|
||||
int idx, handled = 0;
|
||||
u64 val;
|
||||
|
||||
data.addr = 0;
|
||||
data.raw = NULL;
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
|
||||
event = cpuc->events[idx];
|
||||
hwc = &event->hw;
|
||||
|
||||
WARN_ON_ONCE(hwc->idx != idx);
|
||||
|
||||
/*
|
||||
* FIXME: Redundant call, actually not needed
|
||||
* but just to check if we're screwed
|
||||
*/
|
||||
p4_pmu_clear_cccr_ovf(hwc);
|
||||
|
||||
val = x86_perf_event_update(event);
|
||||
if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* event overflow
|
||||
*/
|
||||
handled = 1;
|
||||
data.period = event->hw.last_period;
|
||||
|
||||
if (!x86_perf_event_set_period(event))
|
||||
continue;
|
||||
if (perf_event_overflow(event, 1, &data, regs))
|
||||
p4_pmu_disable_event(event);
|
||||
}
|
||||
|
||||
if (handled) {
|
||||
/* p4 quirk: unmask it again */
|
||||
apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
|
||||
inc_irq_stat(apic_perf_irqs);
|
||||
}
|
||||
|
||||
return handled;
|
||||
}
|
||||
|
||||
/*
|
||||
* swap thread specific fields according to a thread
|
||||
* we are going to run on
|
||||
*/
|
||||
static void p4_pmu_swap_config_ts(struct hw_perf_event *hwc, int cpu)
|
||||
{
|
||||
u32 escr, cccr;
|
||||
|
||||
/*
|
||||
* we either lucky and continue on same cpu or no HT support
|
||||
*/
|
||||
if (!p4_should_swap_ts(hwc->config, cpu))
|
||||
return;
|
||||
|
||||
/*
|
||||
* the event is migrated from an another logical
|
||||
* cpu, so we need to swap thread specific flags
|
||||
*/
|
||||
|
||||
escr = p4_config_unpack_escr(hwc->config);
|
||||
cccr = p4_config_unpack_cccr(hwc->config);
|
||||
|
||||
if (p4_ht_thread(cpu)) {
|
||||
cccr &= ~P4_CCCR_OVF_PMI_T0;
|
||||
cccr |= P4_CCCR_OVF_PMI_T1;
|
||||
if (escr & P4_ESCR_T0_OS) {
|
||||
escr &= ~P4_ESCR_T0_OS;
|
||||
escr |= P4_ESCR_T1_OS;
|
||||
}
|
||||
if (escr & P4_ESCR_T0_USR) {
|
||||
escr &= ~P4_ESCR_T0_USR;
|
||||
escr |= P4_ESCR_T1_USR;
|
||||
}
|
||||
hwc->config = p4_config_pack_escr(escr);
|
||||
hwc->config |= p4_config_pack_cccr(cccr);
|
||||
hwc->config |= P4_CONFIG_HT;
|
||||
} else {
|
||||
cccr &= ~P4_CCCR_OVF_PMI_T1;
|
||||
cccr |= P4_CCCR_OVF_PMI_T0;
|
||||
if (escr & P4_ESCR_T1_OS) {
|
||||
escr &= ~P4_ESCR_T1_OS;
|
||||
escr |= P4_ESCR_T0_OS;
|
||||
}
|
||||
if (escr & P4_ESCR_T1_USR) {
|
||||
escr &= ~P4_ESCR_T1_USR;
|
||||
escr |= P4_ESCR_T0_USR;
|
||||
}
|
||||
hwc->config = p4_config_pack_escr(escr);
|
||||
hwc->config |= p4_config_pack_cccr(cccr);
|
||||
hwc->config &= ~P4_CONFIG_HT;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* ESCR address hashing is tricky, ESCRs are not sequential
|
||||
* in memory but all starts from MSR_P4_BSU_ESCR0 (0x03e0) and
|
||||
* the metric between any ESCRs is laid in range [0xa0,0xe1]
|
||||
*
|
||||
* so we make ~70% filled hashtable
|
||||
*/
|
||||
|
||||
#define P4_ESCR_MSR_BASE 0x000003a0
|
||||
#define P4_ESCR_MSR_MAX 0x000003e1
|
||||
#define P4_ESCR_MSR_TABLE_SIZE (P4_ESCR_MSR_MAX - P4_ESCR_MSR_BASE + 1)
|
||||
#define P4_ESCR_MSR_IDX(msr) (msr - P4_ESCR_MSR_BASE)
|
||||
#define P4_ESCR_MSR_TABLE_ENTRY(msr) [P4_ESCR_MSR_IDX(msr)] = msr
|
||||
|
||||
static const unsigned int p4_escr_table[P4_ESCR_MSR_TABLE_SIZE] = {
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ALF_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BPU_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_BSU_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR2),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR3),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR4),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_CRU_ESCR5),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_DAC_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FIRM_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FLAME_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_FSB_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IQ_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IS_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_ITLB_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_IX_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MOB_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_MS_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_PMH_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_RAT_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SAAT_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_SSU_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TBPU_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_TC_ESCR1),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR0),
|
||||
P4_ESCR_MSR_TABLE_ENTRY(MSR_P4_U2L_ESCR1),
|
||||
};
|
||||
|
||||
static int p4_get_escr_idx(unsigned int addr)
|
||||
{
|
||||
unsigned int idx = P4_ESCR_MSR_IDX(addr);
|
||||
|
||||
if (unlikely(idx >= P4_ESCR_MSR_TABLE_SIZE ||
|
||||
!p4_escr_table[idx])) {
|
||||
WARN_ONCE(1, "P4 PMU: Wrong address passed: %x\n", addr);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static int p4_next_cntr(int thread, unsigned long *used_mask,
|
||||
struct p4_event_bind *bind)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < P4_CNTR_LIMIT; i++) {
|
||||
j = bind->cntr[thread][i];
|
||||
if (j != -1 && !test_bit(j, used_mask))
|
||||
return j;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int p4_pmu_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
|
||||
{
|
||||
unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||
unsigned long escr_mask[BITS_TO_LONGS(P4_ESCR_MSR_TABLE_SIZE)];
|
||||
int cpu = raw_smp_processor_id();
|
||||
struct hw_perf_event *hwc;
|
||||
struct p4_event_bind *bind;
|
||||
unsigned int i, thread, num;
|
||||
int cntr_idx, escr_idx;
|
||||
|
||||
bitmap_zero(used_mask, X86_PMC_IDX_MAX);
|
||||
bitmap_zero(escr_mask, P4_ESCR_MSR_TABLE_SIZE);
|
||||
|
||||
for (i = 0, num = n; i < n; i++, num--) {
|
||||
|
||||
hwc = &cpuc->event_list[i]->hw;
|
||||
thread = p4_ht_thread(cpu);
|
||||
bind = p4_config_get_bind(hwc->config);
|
||||
escr_idx = p4_get_escr_idx(bind->escr_msr[thread]);
|
||||
if (unlikely(escr_idx == -1))
|
||||
goto done;
|
||||
|
||||
if (hwc->idx != -1 && !p4_should_swap_ts(hwc->config, cpu)) {
|
||||
cntr_idx = hwc->idx;
|
||||
if (assign)
|
||||
assign[i] = hwc->idx;
|
||||
goto reserve;
|
||||
}
|
||||
|
||||
cntr_idx = p4_next_cntr(thread, used_mask, bind);
|
||||
if (cntr_idx == -1 || test_bit(escr_idx, escr_mask))
|
||||
goto done;
|
||||
|
||||
p4_pmu_swap_config_ts(hwc, cpu);
|
||||
if (assign)
|
||||
assign[i] = cntr_idx;
|
||||
reserve:
|
||||
set_bit(cntr_idx, used_mask);
|
||||
set_bit(escr_idx, escr_mask);
|
||||
}
|
||||
|
||||
done:
|
||||
return num ? -ENOSPC : 0;
|
||||
}
|
||||
|
||||
static __initconst const struct x86_pmu p4_pmu = {
|
||||
.name = "Netburst P4/Xeon",
|
||||
.handle_irq = p4_pmu_handle_irq,
|
||||
.disable_all = p4_pmu_disable_all,
|
||||
.enable_all = p4_pmu_enable_all,
|
||||
.enable = p4_pmu_enable_event,
|
||||
.disable = p4_pmu_disable_event,
|
||||
.eventsel = MSR_P4_BPU_CCCR0,
|
||||
.perfctr = MSR_P4_BPU_PERFCTR0,
|
||||
.event_map = p4_pmu_event_map,
|
||||
.max_events = ARRAY_SIZE(p4_general_events),
|
||||
.get_event_constraints = x86_get_event_constraints,
|
||||
/*
|
||||
* IF HT disabled we may need to use all
|
||||
* ARCH_P4_MAX_CCCR counters simulaneously
|
||||
* though leave it restricted at moment assuming
|
||||
* HT is on
|
||||
*/
|
||||
.num_counters = ARCH_P4_MAX_CCCR,
|
||||
.apic = 1,
|
||||
.cntval_bits = 40,
|
||||
.cntval_mask = (1ULL << 40) - 1,
|
||||
.max_period = (1ULL << 39) - 1,
|
||||
.hw_config = p4_hw_config,
|
||||
.schedule_events = p4_pmu_schedule_events,
|
||||
};
|
||||
|
||||
static __init int p4_pmu_init(void)
|
||||
{
|
||||
unsigned int low, high;
|
||||
|
||||
/* If we get stripped -- indexig fails */
|
||||
BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC);
|
||||
|
||||
rdmsr(MSR_IA32_MISC_ENABLE, low, high);
|
||||
if (!(low & (1 << 7))) {
|
||||
pr_cont("unsupported Netburst CPU model %d ",
|
||||
boot_cpu_data.x86_model);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
memcpy(hw_cache_event_ids, p4_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
pr_cont("Netburst events, ");
|
||||
|
||||
x86_pmu = p4_pmu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CPU_SUP_INTEL */
|
|
@ -27,24 +27,6 @@ static u64 p6_pmu_event_map(int hw_event)
|
|||
*/
|
||||
#define P6_NOP_EVENT 0x0000002EULL
|
||||
|
||||
static u64 p6_pmu_raw_event(u64 hw_event)
|
||||
{
|
||||
#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
|
||||
#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
||||
#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
|
||||
#define P6_EVNTSEL_INV_MASK 0x00800000ULL
|
||||
#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
|
||||
|
||||
#define P6_EVNTSEL_MASK \
|
||||
(P6_EVNTSEL_EVENT_MASK | \
|
||||
P6_EVNTSEL_UNIT_MASK | \
|
||||
P6_EVNTSEL_EDGE_MASK | \
|
||||
P6_EVNTSEL_INV_MASK | \
|
||||
P6_EVNTSEL_REG_MASK)
|
||||
|
||||
return hw_event & P6_EVNTSEL_MASK;
|
||||
}
|
||||
|
||||
static struct event_constraint p6_event_constraints[] =
|
||||
{
|
||||
INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
|
||||
|
@ -66,7 +48,7 @@ static void p6_pmu_disable_all(void)
|
|||
wrmsrl(MSR_P6_EVNTSEL0, val);
|
||||
}
|
||||
|
||||
static void p6_pmu_enable_all(void)
|
||||
static void p6_pmu_enable_all(int added)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
|
@ -102,22 +84,23 @@ static void p6_pmu_enable_event(struct perf_event *event)
|
|||
(void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
|
||||
}
|
||||
|
||||
static __initconst struct x86_pmu p6_pmu = {
|
||||
static __initconst const struct x86_pmu p6_pmu = {
|
||||
.name = "p6",
|
||||
.handle_irq = x86_pmu_handle_irq,
|
||||
.disable_all = p6_pmu_disable_all,
|
||||
.enable_all = p6_pmu_enable_all,
|
||||
.enable = p6_pmu_enable_event,
|
||||
.disable = p6_pmu_disable_event,
|
||||
.hw_config = x86_pmu_hw_config,
|
||||
.schedule_events = x86_schedule_events,
|
||||
.eventsel = MSR_P6_EVNTSEL0,
|
||||
.perfctr = MSR_P6_PERFCTR0,
|
||||
.event_map = p6_pmu_event_map,
|
||||
.raw_event = p6_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
|
||||
.apic = 1,
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.version = 0,
|
||||
.num_events = 2,
|
||||
.num_counters = 2,
|
||||
/*
|
||||
* Events have 40 bits implemented. However they are designed such
|
||||
* that bits [32-39] are sign extensions of bit 31. As such the
|
||||
|
@ -125,8 +108,8 @@ static __initconst struct x86_pmu p6_pmu = {
|
|||
*
|
||||
* See IA-32 Intel Architecture Software developer manual Vol 3B
|
||||
*/
|
||||
.event_bits = 32,
|
||||
.event_mask = (1ULL << 32) - 1,
|
||||
.cntval_bits = 32,
|
||||
.cntval_mask = (1ULL << 32) - 1,
|
||||
.get_event_constraints = x86_get_event_constraints,
|
||||
.event_constraints = p6_event_constraints,
|
||||
};
|
||||
|
|
1437
arch/x86/kernel/ds.c
1437
arch/x86/kernel/ds.c
File diff suppressed because it is too large
Load diff
|
@ -1,408 +0,0 @@
|
|||
/*
|
||||
* Debug Store support - selftest
|
||||
*
|
||||
*
|
||||
* Copyright (C) 2009 Intel Corporation.
|
||||
* Markus Metzger <markus.t.metzger@intel.com>, 2009
|
||||
*/
|
||||
|
||||
#include "ds_selftest.h"
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <asm/ds.h>
|
||||
|
||||
|
||||
#define BUFFER_SIZE 521 /* Intentionally chose an odd size. */
|
||||
#define SMALL_BUFFER_SIZE 24 /* A single bts entry. */
|
||||
|
||||
struct ds_selftest_bts_conf {
|
||||
struct bts_tracer *tracer;
|
||||
int error;
|
||||
int (*suspend)(struct bts_tracer *);
|
||||
int (*resume)(struct bts_tracer *);
|
||||
};
|
||||
|
||||
static int ds_selftest_bts_consistency(const struct bts_trace *trace)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (!trace) {
|
||||
printk(KERN_CONT "failed to access trace...");
|
||||
/* Bail out. Other tests are pointless. */
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!trace->read) {
|
||||
printk(KERN_CONT "bts read not available...");
|
||||
error = -1;
|
||||
}
|
||||
|
||||
/* Do some sanity checks on the trace configuration. */
|
||||
if (!trace->ds.n) {
|
||||
printk(KERN_CONT "empty bts buffer...");
|
||||
error = -1;
|
||||
}
|
||||
if (!trace->ds.size) {
|
||||
printk(KERN_CONT "bad bts trace setup...");
|
||||
error = -1;
|
||||
}
|
||||
if (trace->ds.end !=
|
||||
(char *)trace->ds.begin + (trace->ds.n * trace->ds.size)) {
|
||||
printk(KERN_CONT "bad bts buffer setup...");
|
||||
error = -1;
|
||||
}
|
||||
/*
|
||||
* We allow top in [begin; end], since its not clear when the
|
||||
* overflow adjustment happens: after the increment or before the
|
||||
* write.
|
||||
*/
|
||||
if ((trace->ds.top < trace->ds.begin) ||
|
||||
(trace->ds.end < trace->ds.top)) {
|
||||
printk(KERN_CONT "bts top out of bounds...");
|
||||
error = -1;
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int ds_selftest_bts_read(struct bts_tracer *tracer,
|
||||
const struct bts_trace *trace,
|
||||
const void *from, const void *to)
|
||||
{
|
||||
const unsigned char *at;
|
||||
|
||||
/*
|
||||
* Check a few things which do not belong to this test.
|
||||
* They should be covered by other tests.
|
||||
*/
|
||||
if (!trace)
|
||||
return -1;
|
||||
|
||||
if (!trace->read)
|
||||
return -1;
|
||||
|
||||
if (to < from)
|
||||
return -1;
|
||||
|
||||
if (from < trace->ds.begin)
|
||||
return -1;
|
||||
|
||||
if (trace->ds.end < to)
|
||||
return -1;
|
||||
|
||||
if (!trace->ds.size)
|
||||
return -1;
|
||||
|
||||
/* Now to the test itself. */
|
||||
for (at = from; (void *)at < to; at += trace->ds.size) {
|
||||
struct bts_struct bts;
|
||||
unsigned long index;
|
||||
int error;
|
||||
|
||||
if (((void *)at - trace->ds.begin) % trace->ds.size) {
|
||||
printk(KERN_CONT
|
||||
"read from non-integer index...");
|
||||
return -1;
|
||||
}
|
||||
index = ((void *)at - trace->ds.begin) / trace->ds.size;
|
||||
|
||||
memset(&bts, 0, sizeof(bts));
|
||||
error = trace->read(tracer, at, &bts);
|
||||
if (error < 0) {
|
||||
printk(KERN_CONT
|
||||
"error reading bts trace at [%lu] (0x%p)...",
|
||||
index, at);
|
||||
return error;
|
||||
}
|
||||
|
||||
switch (bts.qualifier) {
|
||||
case BTS_BRANCH:
|
||||
break;
|
||||
default:
|
||||
printk(KERN_CONT
|
||||
"unexpected bts entry %llu at [%lu] (0x%p)...",
|
||||
bts.qualifier, index, at);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ds_selftest_bts_cpu(void *arg)
|
||||
{
|
||||
struct ds_selftest_bts_conf *conf = arg;
|
||||
const struct bts_trace *trace;
|
||||
void *top;
|
||||
|
||||
if (IS_ERR(conf->tracer)) {
|
||||
conf->error = PTR_ERR(conf->tracer);
|
||||
conf->tracer = NULL;
|
||||
|
||||
printk(KERN_CONT
|
||||
"initialization failed (err: %d)...", conf->error);
|
||||
return;
|
||||
}
|
||||
|
||||
/* We should meanwhile have enough trace. */
|
||||
conf->error = conf->suspend(conf->tracer);
|
||||
if (conf->error < 0)
|
||||
return;
|
||||
|
||||
/* Let's see if we can access the trace. */
|
||||
trace = ds_read_bts(conf->tracer);
|
||||
|
||||
conf->error = ds_selftest_bts_consistency(trace);
|
||||
if (conf->error < 0)
|
||||
return;
|
||||
|
||||
/* If everything went well, we should have a few trace entries. */
|
||||
if (trace->ds.top == trace->ds.begin) {
|
||||
/*
|
||||
* It is possible but highly unlikely that we got a
|
||||
* buffer overflow and end up at exactly the same
|
||||
* position we started from.
|
||||
* Let's issue a warning, but continue.
|
||||
*/
|
||||
printk(KERN_CONT "no trace/overflow...");
|
||||
}
|
||||
|
||||
/* Let's try to read the trace we collected. */
|
||||
conf->error =
|
||||
ds_selftest_bts_read(conf->tracer, trace,
|
||||
trace->ds.begin, trace->ds.top);
|
||||
if (conf->error < 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Let's read the trace again.
|
||||
* Since we suspended tracing, we should get the same result.
|
||||
*/
|
||||
top = trace->ds.top;
|
||||
|
||||
trace = ds_read_bts(conf->tracer);
|
||||
conf->error = ds_selftest_bts_consistency(trace);
|
||||
if (conf->error < 0)
|
||||
return;
|
||||
|
||||
if (top != trace->ds.top) {
|
||||
printk(KERN_CONT "suspend not working...");
|
||||
conf->error = -1;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Let's collect some more trace - see if resume is working. */
|
||||
conf->error = conf->resume(conf->tracer);
|
||||
if (conf->error < 0)
|
||||
return;
|
||||
|
||||
conf->error = conf->suspend(conf->tracer);
|
||||
if (conf->error < 0)
|
||||
return;
|
||||
|
||||
trace = ds_read_bts(conf->tracer);
|
||||
|
||||
conf->error = ds_selftest_bts_consistency(trace);
|
||||
if (conf->error < 0)
|
||||
return;
|
||||
|
||||
if (trace->ds.top == top) {
|
||||
/*
|
||||
* It is possible but highly unlikely that we got a
|
||||
* buffer overflow and end up at exactly the same
|
||||
* position we started from.
|
||||
* Let's issue a warning and check the full trace.
|
||||
*/
|
||||
printk(KERN_CONT
|
||||
"no resume progress/overflow...");
|
||||
|
||||
conf->error =
|
||||
ds_selftest_bts_read(conf->tracer, trace,
|
||||
trace->ds.begin, trace->ds.end);
|
||||
} else if (trace->ds.top < top) {
|
||||
/*
|
||||
* We had a buffer overflow - the entire buffer should
|
||||
* contain trace records.
|
||||
*/
|
||||
conf->error =
|
||||
ds_selftest_bts_read(conf->tracer, trace,
|
||||
trace->ds.begin, trace->ds.end);
|
||||
} else {
|
||||
/*
|
||||
* It is quite likely that the buffer did not overflow.
|
||||
* Let's just check the delta trace.
|
||||
*/
|
||||
conf->error =
|
||||
ds_selftest_bts_read(conf->tracer, trace, top,
|
||||
trace->ds.top);
|
||||
}
|
||||
if (conf->error < 0)
|
||||
return;
|
||||
|
||||
conf->error = 0;
|
||||
}
|
||||
|
||||
static int ds_suspend_bts_wrap(struct bts_tracer *tracer)
|
||||
{
|
||||
ds_suspend_bts(tracer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ds_resume_bts_wrap(struct bts_tracer *tracer)
|
||||
{
|
||||
ds_resume_bts(tracer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ds_release_bts_noirq_wrap(void *tracer)
|
||||
{
|
||||
(void)ds_release_bts_noirq(tracer);
|
||||
}
|
||||
|
||||
static int ds_selftest_bts_bad_release_noirq(int cpu,
|
||||
struct bts_tracer *tracer)
|
||||
{
|
||||
int error = -EPERM;
|
||||
|
||||
/* Try to release the tracer on the wrong cpu. */
|
||||
get_cpu();
|
||||
if (cpu != smp_processor_id()) {
|
||||
error = ds_release_bts_noirq(tracer);
|
||||
if (error != -EPERM)
|
||||
printk(KERN_CONT "release on wrong cpu...");
|
||||
}
|
||||
put_cpu();
|
||||
|
||||
return error ? 0 : -1;
|
||||
}
|
||||
|
||||
static int ds_selftest_bts_bad_request_cpu(int cpu, void *buffer)
|
||||
{
|
||||
struct bts_tracer *tracer;
|
||||
int error;
|
||||
|
||||
/* Try to request cpu tracing while task tracing is active. */
|
||||
tracer = ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE, NULL,
|
||||
(size_t)-1, BTS_KERNEL);
|
||||
error = PTR_ERR(tracer);
|
||||
if (!IS_ERR(tracer)) {
|
||||
ds_release_bts(tracer);
|
||||
error = 0;
|
||||
}
|
||||
|
||||
if (error != -EPERM)
|
||||
printk(KERN_CONT "cpu/task tracing overlap...");
|
||||
|
||||
return error ? 0 : -1;
|
||||
}
|
||||
|
||||
static int ds_selftest_bts_bad_request_task(void *buffer)
|
||||
{
|
||||
struct bts_tracer *tracer;
|
||||
int error;
|
||||
|
||||
/* Try to request cpu tracing while task tracing is active. */
|
||||
tracer = ds_request_bts_task(current, buffer, BUFFER_SIZE, NULL,
|
||||
(size_t)-1, BTS_KERNEL);
|
||||
error = PTR_ERR(tracer);
|
||||
if (!IS_ERR(tracer)) {
|
||||
error = 0;
|
||||
ds_release_bts(tracer);
|
||||
}
|
||||
|
||||
if (error != -EPERM)
|
||||
printk(KERN_CONT "task/cpu tracing overlap...");
|
||||
|
||||
return error ? 0 : -1;
|
||||
}
|
||||
|
||||
int ds_selftest_bts(void)
|
||||
{
|
||||
struct ds_selftest_bts_conf conf;
|
||||
unsigned char buffer[BUFFER_SIZE], *small_buffer;
|
||||
unsigned long irq;
|
||||
int cpu;
|
||||
|
||||
printk(KERN_INFO "[ds] bts selftest...");
|
||||
conf.error = 0;
|
||||
|
||||
small_buffer = (unsigned char *)ALIGN((unsigned long)buffer, 8) + 8;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
conf.suspend = ds_suspend_bts_wrap;
|
||||
conf.resume = ds_resume_bts_wrap;
|
||||
conf.tracer =
|
||||
ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE,
|
||||
NULL, (size_t)-1, BTS_KERNEL);
|
||||
ds_selftest_bts_cpu(&conf);
|
||||
if (conf.error >= 0)
|
||||
conf.error = ds_selftest_bts_bad_request_task(buffer);
|
||||
ds_release_bts(conf.tracer);
|
||||
if (conf.error < 0)
|
||||
goto out;
|
||||
|
||||
conf.suspend = ds_suspend_bts_noirq;
|
||||
conf.resume = ds_resume_bts_noirq;
|
||||
conf.tracer =
|
||||
ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE,
|
||||
NULL, (size_t)-1, BTS_KERNEL);
|
||||
smp_call_function_single(cpu, ds_selftest_bts_cpu, &conf, 1);
|
||||
if (conf.error >= 0) {
|
||||
conf.error =
|
||||
ds_selftest_bts_bad_release_noirq(cpu,
|
||||
conf.tracer);
|
||||
/* We must not release the tracer twice. */
|
||||
if (conf.error < 0)
|
||||
conf.tracer = NULL;
|
||||
}
|
||||
if (conf.error >= 0)
|
||||
conf.error = ds_selftest_bts_bad_request_task(buffer);
|
||||
smp_call_function_single(cpu, ds_release_bts_noirq_wrap,
|
||||
conf.tracer, 1);
|
||||
if (conf.error < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
conf.suspend = ds_suspend_bts_wrap;
|
||||
conf.resume = ds_resume_bts_wrap;
|
||||
conf.tracer =
|
||||
ds_request_bts_task(current, buffer, BUFFER_SIZE,
|
||||
NULL, (size_t)-1, BTS_KERNEL);
|
||||
ds_selftest_bts_cpu(&conf);
|
||||
if (conf.error >= 0)
|
||||
conf.error = ds_selftest_bts_bad_request_cpu(0, buffer);
|
||||
ds_release_bts(conf.tracer);
|
||||
if (conf.error < 0)
|
||||
goto out;
|
||||
|
||||
conf.suspend = ds_suspend_bts_noirq;
|
||||
conf.resume = ds_resume_bts_noirq;
|
||||
conf.tracer =
|
||||
ds_request_bts_task(current, small_buffer, SMALL_BUFFER_SIZE,
|
||||
NULL, (size_t)-1, BTS_KERNEL);
|
||||
local_irq_save(irq);
|
||||
ds_selftest_bts_cpu(&conf);
|
||||
if (conf.error >= 0)
|
||||
conf.error = ds_selftest_bts_bad_request_cpu(0, buffer);
|
||||
ds_release_bts_noirq(conf.tracer);
|
||||
local_irq_restore(irq);
|
||||
if (conf.error < 0)
|
||||
goto out;
|
||||
|
||||
conf.error = 0;
|
||||
out:
|
||||
put_online_cpus();
|
||||
printk(KERN_CONT "%s.\n", (conf.error ? "failed" : "passed"));
|
||||
|
||||
return conf.error;
|
||||
}
|
||||
|
||||
int ds_selftest_pebs(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
/*
|
||||
* Debug Store support - selftest
|
||||
*
|
||||
*
|
||||
* Copyright (C) 2009 Intel Corporation.
|
||||
* Markus Metzger <markus.t.metzger@intel.com>, 2009
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_X86_DS_SELFTEST
|
||||
extern int ds_selftest_bts(void);
|
||||
extern int ds_selftest_pebs(void);
|
||||
#else
|
||||
static inline int ds_selftest_bts(void) { return 0; }
|
||||
static inline int ds_selftest_pebs(void) { return 0; }
|
||||
#endif
|
|
@ -224,11 +224,6 @@ unsigned __kprobes long oops_begin(void)
|
|||
int cpu;
|
||||
unsigned long flags;
|
||||
|
||||
/* notify the hw-branch tracer so it may disable tracing and
|
||||
add the last trace to the trace buffer -
|
||||
the earlier this happens, the more useful the trace. */
|
||||
trace_hw_branch_oops();
|
||||
|
||||
oops_enter();
|
||||
|
||||
/* racy, but better than risking deadlock. */
|
||||
|
|
|
@ -188,26 +188,17 @@ static int get_hbp_len(u8 hbp_len)
|
|||
return len_in_bytes;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for virtual address in user space.
|
||||
*/
|
||||
int arch_check_va_in_userspace(unsigned long va, u8 hbp_len)
|
||||
{
|
||||
unsigned int len;
|
||||
|
||||
len = get_hbp_len(hbp_len);
|
||||
|
||||
return (va <= TASK_SIZE - len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for virtual address in kernel space.
|
||||
*/
|
||||
static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
|
||||
int arch_check_bp_in_kernelspace(struct perf_event *bp)
|
||||
{
|
||||
unsigned int len;
|
||||
unsigned long va;
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
|
||||
len = get_hbp_len(hbp_len);
|
||||
va = info->address;
|
||||
len = get_hbp_len(info->len);
|
||||
|
||||
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
|
||||
}
|
||||
|
@ -300,8 +291,7 @@ static int arch_build_bp_info(struct perf_event *bp)
|
|||
/*
|
||||
* Validate the arch-specific HW Breakpoint register settings
|
||||
*/
|
||||
int arch_validate_hwbkpt_settings(struct perf_event *bp,
|
||||
struct task_struct *tsk)
|
||||
int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||
{
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
unsigned int align;
|
||||
|
@ -314,16 +304,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
|
|||
|
||||
ret = -EINVAL;
|
||||
|
||||
if (info->type == X86_BREAKPOINT_EXECUTE)
|
||||
/*
|
||||
* Ptrace-refactoring code
|
||||
* For now, we'll allow instruction breakpoint only for user-space
|
||||
* addresses
|
||||
*/
|
||||
if ((!arch_check_va_in_userspace(info->address, info->len)) &&
|
||||
info->len != X86_BREAKPOINT_EXECUTE)
|
||||
return ret;
|
||||
|
||||
switch (info->len) {
|
||||
case X86_BREAKPOINT_LEN_1:
|
||||
align = 0;
|
||||
|
@ -350,15 +330,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
|
|||
if (info->address & align)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check that the virtual address is in the proper range */
|
||||
if (tsk) {
|
||||
if (!arch_check_va_in_userspace(info->address, info->len))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
if (!arch_check_va_in_kernelspace(info->address, info->len))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -422,14 +422,22 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
|||
|
||||
static void __kprobes clear_btf(void)
|
||||
{
|
||||
if (test_thread_flag(TIF_DEBUGCTLMSR))
|
||||
update_debugctlmsr(0);
|
||||
if (test_thread_flag(TIF_BLOCKSTEP)) {
|
||||
unsigned long debugctl = get_debugctlmsr();
|
||||
|
||||
debugctl &= ~DEBUGCTLMSR_BTF;
|
||||
update_debugctlmsr(debugctl);
|
||||
}
|
||||
}
|
||||
|
||||
static void __kprobes restore_btf(void)
|
||||
{
|
||||
if (test_thread_flag(TIF_DEBUGCTLMSR))
|
||||
update_debugctlmsr(current->thread.debugctlmsr);
|
||||
if (test_thread_flag(TIF_BLOCKSTEP)) {
|
||||
unsigned long debugctl = get_debugctlmsr();
|
||||
|
||||
debugctl |= DEBUGCTLMSR_BTF;
|
||||
update_debugctlmsr(debugctl);
|
||||
}
|
||||
}
|
||||
|
||||
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include <asm/idle.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/i387.h>
|
||||
#include <asm/ds.h>
|
||||
#include <asm/debugreg.h>
|
||||
|
||||
unsigned long idle_halt;
|
||||
|
@ -50,8 +49,6 @@ void free_thread_xstate(struct task_struct *tsk)
|
|||
kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
|
||||
tsk->thread.xstate = NULL;
|
||||
}
|
||||
|
||||
WARN(tsk->thread.ds_ctx, "leaking DS context\n");
|
||||
}
|
||||
|
||||
void free_thread_info(struct thread_info *ti)
|
||||
|
@ -198,11 +195,16 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|||
prev = &prev_p->thread;
|
||||
next = &next_p->thread;
|
||||
|
||||
if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
|
||||
test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
|
||||
ds_switch_to(prev_p, next_p);
|
||||
else if (next->debugctlmsr != prev->debugctlmsr)
|
||||
update_debugctlmsr(next->debugctlmsr);
|
||||
if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
|
||||
test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
|
||||
unsigned long debugctl = get_debugctlmsr();
|
||||
|
||||
debugctl &= ~DEBUGCTLMSR_BTF;
|
||||
if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
|
||||
debugctl |= DEBUGCTLMSR_BTF;
|
||||
|
||||
update_debugctlmsr(debugctl);
|
||||
}
|
||||
|
||||
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
|
||||
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
|
||||
|
|
|
@ -55,7 +55,6 @@
|
|||
#include <asm/cpu.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/ds.h>
|
||||
#include <asm/debugreg.h>
|
||||
|
||||
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
|
||||
|
@ -238,13 +237,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||
kfree(p->thread.io_bitmap_ptr);
|
||||
p->thread.io_bitmap_max = 0;
|
||||
}
|
||||
|
||||
clear_tsk_thread_flag(p, TIF_DS_AREA_MSR);
|
||||
p->thread.ds_ctx = NULL;
|
||||
|
||||
clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
|
||||
p->thread.debugctlmsr = 0;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,6 @@
|
|||
#include <asm/ia32.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/syscalls.h>
|
||||
#include <asm/ds.h>
|
||||
#include <asm/debugreg.h>
|
||||
|
||||
asmlinkage extern void ret_from_fork(void);
|
||||
|
@ -313,13 +312,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
clear_tsk_thread_flag(p, TIF_DS_AREA_MSR);
|
||||
p->thread.ds_ctx = NULL;
|
||||
|
||||
clear_tsk_thread_flag(p, TIF_DEBUGCTLMSR);
|
||||
p->thread.debugctlmsr = 0;
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
if (err && p->thread.io_bitmap_ptr) {
|
||||
|
|
|
@ -2,9 +2,6 @@
|
|||
/*
|
||||
* Pentium III FXSR, SSE support
|
||||
* Gareth Hughes <gareth@valinux.com>, May 2000
|
||||
*
|
||||
* BTS tracing
|
||||
* Markus Metzger <markus.t.metzger@intel.com>, Dec 2007
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
@ -22,7 +19,6 @@
|
|||
#include <linux/audit.h>
|
||||
#include <linux/seccomp.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
|
||||
|
@ -36,7 +32,6 @@
|
|||
#include <asm/desc.h>
|
||||
#include <asm/prctl.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/ds.h>
|
||||
#include <asm/hw_breakpoint.h>
|
||||
|
||||
#include "tls.h"
|
||||
|
@ -693,7 +688,7 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
|
|||
struct perf_event_attr attr;
|
||||
|
||||
if (!t->ptrace_bps[nr]) {
|
||||
hw_breakpoint_init(&attr);
|
||||
ptrace_breakpoint_init(&attr);
|
||||
/*
|
||||
* Put stub len and type to register (reserve) an inactive but
|
||||
* correct bp
|
||||
|
@ -789,342 +784,6 @@ static int ioperm_get(struct task_struct *target,
|
|||
0, IO_BITMAP_BYTES);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_PTRACE_BTS
|
||||
/*
|
||||
* A branch trace store context.
|
||||
*
|
||||
* Contexts may only be installed by ptrace_bts_config() and only for
|
||||
* ptraced tasks.
|
||||
*
|
||||
* Contexts are destroyed when the tracee is detached from the tracer.
|
||||
* The actual destruction work requires interrupts enabled, so the
|
||||
* work is deferred and will be scheduled during __ptrace_unlink().
|
||||
*
|
||||
* Contexts hold an additional task_struct reference on the traced
|
||||
* task, as well as a reference on the tracer's mm.
|
||||
*
|
||||
* Ptrace already holds a task_struct for the duration of ptrace operations,
|
||||
* but since destruction is deferred, it may be executed after both
|
||||
* tracer and tracee exited.
|
||||
*/
|
||||
struct bts_context {
|
||||
/* The branch trace handle. */
|
||||
struct bts_tracer *tracer;
|
||||
|
||||
/* The buffer used to store the branch trace and its size. */
|
||||
void *buffer;
|
||||
unsigned int size;
|
||||
|
||||
/* The mm that paid for the above buffer. */
|
||||
struct mm_struct *mm;
|
||||
|
||||
/* The task this context belongs to. */
|
||||
struct task_struct *task;
|
||||
|
||||
/* The signal to send on a bts buffer overflow. */
|
||||
unsigned int bts_ovfl_signal;
|
||||
|
||||
/* The work struct to destroy a context. */
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
static int alloc_bts_buffer(struct bts_context *context, unsigned int size)
|
||||
{
|
||||
void *buffer = NULL;
|
||||
int err = -ENOMEM;
|
||||
|
||||
err = account_locked_memory(current->mm, current->signal->rlim, size);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
buffer = kzalloc(size, GFP_KERNEL);
|
||||
if (!buffer)
|
||||
goto out_refund;
|
||||
|
||||
context->buffer = buffer;
|
||||
context->size = size;
|
||||
context->mm = get_task_mm(current);
|
||||
|
||||
return 0;
|
||||
|
||||
out_refund:
|
||||
refund_locked_memory(current->mm, size);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline void free_bts_buffer(struct bts_context *context)
|
||||
{
|
||||
if (!context->buffer)
|
||||
return;
|
||||
|
||||
kfree(context->buffer);
|
||||
context->buffer = NULL;
|
||||
|
||||
refund_locked_memory(context->mm, context->size);
|
||||
context->size = 0;
|
||||
|
||||
mmput(context->mm);
|
||||
context->mm = NULL;
|
||||
}
|
||||
|
||||
static void free_bts_context_work(struct work_struct *w)
|
||||
{
|
||||
struct bts_context *context;
|
||||
|
||||
context = container_of(w, struct bts_context, work);
|
||||
|
||||
ds_release_bts(context->tracer);
|
||||
put_task_struct(context->task);
|
||||
free_bts_buffer(context);
|
||||
kfree(context);
|
||||
}
|
||||
|
||||
static inline void free_bts_context(struct bts_context *context)
|
||||
{
|
||||
INIT_WORK(&context->work, free_bts_context_work);
|
||||
schedule_work(&context->work);
|
||||
}
|
||||
|
||||
static inline struct bts_context *alloc_bts_context(struct task_struct *task)
|
||||
{
|
||||
struct bts_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (context) {
|
||||
context->task = task;
|
||||
task->bts = context;
|
||||
|
||||
get_task_struct(task);
|
||||
}
|
||||
|
||||
return context;
|
||||
}
|
||||
|
||||
static int ptrace_bts_read_record(struct task_struct *child, size_t index,
|
||||
struct bts_struct __user *out)
|
||||
{
|
||||
struct bts_context *context;
|
||||
const struct bts_trace *trace;
|
||||
struct bts_struct bts;
|
||||
const unsigned char *at;
|
||||
int error;
|
||||
|
||||
context = child->bts;
|
||||
if (!context)
|
||||
return -ESRCH;
|
||||
|
||||
trace = ds_read_bts(context->tracer);
|
||||
if (!trace)
|
||||
return -ESRCH;
|
||||
|
||||
at = trace->ds.top - ((index + 1) * trace->ds.size);
|
||||
if ((void *)at < trace->ds.begin)
|
||||
at += (trace->ds.n * trace->ds.size);
|
||||
|
||||
if (!trace->read)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
error = trace->read(context->tracer, at, &bts);
|
||||
if (error < 0)
|
||||
return error;
|
||||
|
||||
if (copy_to_user(out, &bts, sizeof(bts)))
|
||||
return -EFAULT;
|
||||
|
||||
return sizeof(bts);
|
||||
}
|
||||
|
||||
static int ptrace_bts_drain(struct task_struct *child,
|
||||
long size,
|
||||
struct bts_struct __user *out)
|
||||
{
|
||||
struct bts_context *context;
|
||||
const struct bts_trace *trace;
|
||||
const unsigned char *at;
|
||||
int error, drained = 0;
|
||||
|
||||
context = child->bts;
|
||||
if (!context)
|
||||
return -ESRCH;
|
||||
|
||||
trace = ds_read_bts(context->tracer);
|
||||
if (!trace)
|
||||
return -ESRCH;
|
||||
|
||||
if (!trace->read)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (size < (trace->ds.top - trace->ds.begin))
|
||||
return -EIO;
|
||||
|
||||
for (at = trace->ds.begin; (void *)at < trace->ds.top;
|
||||
out++, drained++, at += trace->ds.size) {
|
||||
struct bts_struct bts;
|
||||
|
||||
error = trace->read(context->tracer, at, &bts);
|
||||
if (error < 0)
|
||||
return error;
|
||||
|
||||
if (copy_to_user(out, &bts, sizeof(bts)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
|
||||
|
||||
error = ds_reset_bts(context->tracer);
|
||||
if (error < 0)
|
||||
return error;
|
||||
|
||||
return drained;
|
||||
}
|
||||
|
||||
static int ptrace_bts_config(struct task_struct *child,
|
||||
long cfg_size,
|
||||
const struct ptrace_bts_config __user *ucfg)
|
||||
{
|
||||
struct bts_context *context;
|
||||
struct ptrace_bts_config cfg;
|
||||
unsigned int flags = 0;
|
||||
|
||||
if (cfg_size < sizeof(cfg))
|
||||
return -EIO;
|
||||
|
||||
if (copy_from_user(&cfg, ucfg, sizeof(cfg)))
|
||||
return -EFAULT;
|
||||
|
||||
context = child->bts;
|
||||
if (!context)
|
||||
context = alloc_bts_context(child);
|
||||
if (!context)
|
||||
return -ENOMEM;
|
||||
|
||||
if (cfg.flags & PTRACE_BTS_O_SIGNAL) {
|
||||
if (!cfg.signal)
|
||||
return -EINVAL;
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
context->bts_ovfl_signal = cfg.signal;
|
||||
}
|
||||
|
||||
ds_release_bts(context->tracer);
|
||||
context->tracer = NULL;
|
||||
|
||||
if ((cfg.flags & PTRACE_BTS_O_ALLOC) && (cfg.size != context->size)) {
|
||||
int err;
|
||||
|
||||
free_bts_buffer(context);
|
||||
if (!cfg.size)
|
||||
return 0;
|
||||
|
||||
err = alloc_bts_buffer(context, cfg.size);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (cfg.flags & PTRACE_BTS_O_TRACE)
|
||||
flags |= BTS_USER;
|
||||
|
||||
if (cfg.flags & PTRACE_BTS_O_SCHED)
|
||||
flags |= BTS_TIMESTAMPS;
|
||||
|
||||
context->tracer =
|
||||
ds_request_bts_task(child, context->buffer, context->size,
|
||||
NULL, (size_t)-1, flags);
|
||||
if (unlikely(IS_ERR(context->tracer))) {
|
||||
int error = PTR_ERR(context->tracer);
|
||||
|
||||
free_bts_buffer(context);
|
||||
context->tracer = NULL;
|
||||
return error;
|
||||
}
|
||||
|
||||
return sizeof(cfg);
|
||||
}
|
||||
|
||||
static int ptrace_bts_status(struct task_struct *child,
|
||||
long cfg_size,
|
||||
struct ptrace_bts_config __user *ucfg)
|
||||
{
|
||||
struct bts_context *context;
|
||||
const struct bts_trace *trace;
|
||||
struct ptrace_bts_config cfg;
|
||||
|
||||
context = child->bts;
|
||||
if (!context)
|
||||
return -ESRCH;
|
||||
|
||||
if (cfg_size < sizeof(cfg))
|
||||
return -EIO;
|
||||
|
||||
trace = ds_read_bts(context->tracer);
|
||||
if (!trace)
|
||||
return -ESRCH;
|
||||
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
cfg.size = trace->ds.end - trace->ds.begin;
|
||||
cfg.signal = context->bts_ovfl_signal;
|
||||
cfg.bts_size = sizeof(struct bts_struct);
|
||||
|
||||
if (cfg.signal)
|
||||
cfg.flags |= PTRACE_BTS_O_SIGNAL;
|
||||
|
||||
if (trace->ds.flags & BTS_USER)
|
||||
cfg.flags |= PTRACE_BTS_O_TRACE;
|
||||
|
||||
if (trace->ds.flags & BTS_TIMESTAMPS)
|
||||
cfg.flags |= PTRACE_BTS_O_SCHED;
|
||||
|
||||
if (copy_to_user(ucfg, &cfg, sizeof(cfg)))
|
||||
return -EFAULT;
|
||||
|
||||
return sizeof(cfg);
|
||||
}
|
||||
|
||||
static int ptrace_bts_clear(struct task_struct *child)
|
||||
{
|
||||
struct bts_context *context;
|
||||
const struct bts_trace *trace;
|
||||
|
||||
context = child->bts;
|
||||
if (!context)
|
||||
return -ESRCH;
|
||||
|
||||
trace = ds_read_bts(context->tracer);
|
||||
if (!trace)
|
||||
return -ESRCH;
|
||||
|
||||
memset(trace->ds.begin, 0, trace->ds.n * trace->ds.size);
|
||||
|
||||
return ds_reset_bts(context->tracer);
|
||||
}
|
||||
|
||||
static int ptrace_bts_size(struct task_struct *child)
|
||||
{
|
||||
struct bts_context *context;
|
||||
const struct bts_trace *trace;
|
||||
|
||||
context = child->bts;
|
||||
if (!context)
|
||||
return -ESRCH;
|
||||
|
||||
trace = ds_read_bts(context->tracer);
|
||||
if (!trace)
|
||||
return -ESRCH;
|
||||
|
||||
return (trace->ds.top - trace->ds.begin) / trace->ds.size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from __ptrace_unlink() after the child has been moved back
|
||||
* to its original parent.
|
||||
*/
|
||||
void ptrace_bts_untrace(struct task_struct *child)
|
||||
{
|
||||
if (unlikely(child->bts)) {
|
||||
free_bts_context(child->bts);
|
||||
child->bts = NULL;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_X86_PTRACE_BTS */
|
||||
|
||||
/*
|
||||
* Called by kernel/ptrace.c when detaching..
|
||||
*
|
||||
|
@ -1252,39 +911,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
|||
break;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These bits need more cooking - not enabled yet:
|
||||
*/
|
||||
#ifdef CONFIG_X86_PTRACE_BTS
|
||||
case PTRACE_BTS_CONFIG:
|
||||
ret = ptrace_bts_config
|
||||
(child, data, (struct ptrace_bts_config __user *)addr);
|
||||
break;
|
||||
|
||||
case PTRACE_BTS_STATUS:
|
||||
ret = ptrace_bts_status
|
||||
(child, data, (struct ptrace_bts_config __user *)addr);
|
||||
break;
|
||||
|
||||
case PTRACE_BTS_SIZE:
|
||||
ret = ptrace_bts_size(child);
|
||||
break;
|
||||
|
||||
case PTRACE_BTS_GET:
|
||||
ret = ptrace_bts_read_record
|
||||
(child, data, (struct bts_struct __user *) addr);
|
||||
break;
|
||||
|
||||
case PTRACE_BTS_CLEAR:
|
||||
ret = ptrace_bts_clear(child);
|
||||
break;
|
||||
|
||||
case PTRACE_BTS_DRAIN:
|
||||
ret = ptrace_bts_drain
|
||||
(child, data, (struct bts_struct __user *) addr);
|
||||
break;
|
||||
#endif /* CONFIG_X86_PTRACE_BTS */
|
||||
|
||||
default:
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
break;
|
||||
|
@ -1544,14 +1170,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
|||
|
||||
case PTRACE_GET_THREAD_AREA:
|
||||
case PTRACE_SET_THREAD_AREA:
|
||||
#ifdef CONFIG_X86_PTRACE_BTS
|
||||
case PTRACE_BTS_CONFIG:
|
||||
case PTRACE_BTS_STATUS:
|
||||
case PTRACE_BTS_SIZE:
|
||||
case PTRACE_BTS_GET:
|
||||
case PTRACE_BTS_CLEAR:
|
||||
case PTRACE_BTS_DRAIN:
|
||||
#endif /* CONFIG_X86_PTRACE_BTS */
|
||||
return arch_ptrace(child, request, addr, data);
|
||||
|
||||
default:
|
||||
|
|
|
@ -157,22 +157,6 @@ static int enable_single_step(struct task_struct *child)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Install this value in MSR_IA32_DEBUGCTLMSR whenever child is running.
|
||||
*/
|
||||
static void write_debugctlmsr(struct task_struct *child, unsigned long val)
|
||||
{
|
||||
if (child->thread.debugctlmsr == val)
|
||||
return;
|
||||
|
||||
child->thread.debugctlmsr = val;
|
||||
|
||||
if (child != current)
|
||||
return;
|
||||
|
||||
update_debugctlmsr(val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable single or block step.
|
||||
*/
|
||||
|
@ -186,15 +170,17 @@ static void enable_step(struct task_struct *child, bool block)
|
|||
* that uses user-mode single stepping itself.
|
||||
*/
|
||||
if (enable_single_step(child) && block) {
|
||||
set_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
|
||||
write_debugctlmsr(child,
|
||||
child->thread.debugctlmsr | DEBUGCTLMSR_BTF);
|
||||
} else {
|
||||
write_debugctlmsr(child,
|
||||
child->thread.debugctlmsr & ~DEBUGCTLMSR_BTF);
|
||||
unsigned long debugctl = get_debugctlmsr();
|
||||
|
||||
if (!child->thread.debugctlmsr)
|
||||
clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
|
||||
debugctl |= DEBUGCTLMSR_BTF;
|
||||
update_debugctlmsr(debugctl);
|
||||
set_tsk_thread_flag(child, TIF_BLOCKSTEP);
|
||||
} else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
|
||||
unsigned long debugctl = get_debugctlmsr();
|
||||
|
||||
debugctl &= ~DEBUGCTLMSR_BTF;
|
||||
update_debugctlmsr(debugctl);
|
||||
clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -213,11 +199,13 @@ void user_disable_single_step(struct task_struct *child)
|
|||
/*
|
||||
* Make sure block stepping (BTF) is disabled.
|
||||
*/
|
||||
write_debugctlmsr(child,
|
||||
child->thread.debugctlmsr & ~DEBUGCTLMSR_BTF);
|
||||
if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) {
|
||||
unsigned long debugctl = get_debugctlmsr();
|
||||
|
||||
if (!child->thread.debugctlmsr)
|
||||
clear_tsk_thread_flag(child, TIF_DEBUGCTLMSR);
|
||||
debugctl &= ~DEBUGCTLMSR_BTF;
|
||||
update_debugctlmsr(debugctl);
|
||||
clear_tsk_thread_flag(child, TIF_BLOCKSTEP);
|
||||
}
|
||||
|
||||
/* Always clear TIF_SINGLESTEP... */
|
||||
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
|
|
|
@ -543,11 +543,11 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
|
|||
|
||||
/* DR6 may or may not be cleared by the CPU */
|
||||
set_debugreg(0, 6);
|
||||
|
||||
/*
|
||||
* The processor cleared BTF, so don't mark that we need it set.
|
||||
*/
|
||||
clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR);
|
||||
tsk->thread.debugctlmsr = 0;
|
||||
clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
|
||||
|
||||
/* Store the virtualized DR6 value */
|
||||
tsk->thread.debugreg6 = dr6;
|
||||
|
|
|
@ -3660,8 +3660,11 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
|
|||
|
||||
/* We need to handle NMIs before interrupts are enabled */
|
||||
if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
|
||||
(exit_intr_info & INTR_INFO_VALID_MASK))
|
||||
(exit_intr_info & INTR_INFO_VALID_MASK)) {
|
||||
kvm_before_handle_nmi(&vmx->vcpu);
|
||||
asm("int $2");
|
||||
kvm_after_handle_nmi(&vmx->vcpu);
|
||||
}
|
||||
|
||||
idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <linux/user-return-notifier.h>
|
||||
#include <linux/srcu.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <trace/events/kvm.h>
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define CREATE_TRACE_POINTS
|
||||
|
@ -3743,6 +3744,51 @@ static void kvm_timer_init(void)
|
|||
}
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
|
||||
|
||||
static int kvm_is_in_guest(void)
|
||||
{
|
||||
return percpu_read(current_vcpu) != NULL;
|
||||
}
|
||||
|
||||
static int kvm_is_user_mode(void)
|
||||
{
|
||||
int user_mode = 3;
|
||||
|
||||
if (percpu_read(current_vcpu))
|
||||
user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
|
||||
|
||||
return user_mode != 0;
|
||||
}
|
||||
|
||||
static unsigned long kvm_get_guest_ip(void)
|
||||
{
|
||||
unsigned long ip = 0;
|
||||
|
||||
if (percpu_read(current_vcpu))
|
||||
ip = kvm_rip_read(percpu_read(current_vcpu));
|
||||
|
||||
return ip;
|
||||
}
|
||||
|
||||
static struct perf_guest_info_callbacks kvm_guest_cbs = {
|
||||
.is_in_guest = kvm_is_in_guest,
|
||||
.is_user_mode = kvm_is_user_mode,
|
||||
.get_guest_ip = kvm_get_guest_ip,
|
||||
};
|
||||
|
||||
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
percpu_write(current_vcpu, vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
|
||||
|
||||
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
percpu_write(current_vcpu, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
|
||||
|
||||
int kvm_arch_init(void *opaque)
|
||||
{
|
||||
int r;
|
||||
|
@ -3779,6 +3825,8 @@ int kvm_arch_init(void *opaque)
|
|||
|
||||
kvm_timer_init();
|
||||
|
||||
perf_register_guest_info_callbacks(&kvm_guest_cbs);
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
@ -3787,6 +3835,8 @@ int kvm_arch_init(void *opaque)
|
|||
|
||||
void kvm_arch_exit(void)
|
||||
{
|
||||
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
||||
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
|
||||
CPUFREQ_TRANSITION_NOTIFIER);
|
||||
|
|
|
@ -65,4 +65,7 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
|
|||
return kvm_read_cr0_bits(vcpu, X86_CR0_PG);
|
||||
}
|
||||
|
||||
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
|
||||
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -20,7 +20,7 @@ lib-y := delay.o
|
|||
lib-y += thunk_$(BITS).o
|
||||
lib-y += usercopy_$(BITS).o getuser.o putuser.o
|
||||
lib-y += memcpy_$(BITS).o
|
||||
lib-$(CONFIG_KPROBES) += insn.o inat.o
|
||||
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
|
||||
|
||||
obj-y += msr.o msr-reg.o msr-reg-export.o
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ ENTRY(call_rwsem_down_write_failed)
|
|||
ENDPROC(call_rwsem_down_write_failed)
|
||||
|
||||
ENTRY(call_rwsem_wake)
|
||||
decw %dx /* do nothing if still outstanding active readers */
|
||||
decl %edx /* do nothing if still outstanding active readers */
|
||||
jnz 1f
|
||||
save_common_regs
|
||||
movq %rax,%rdi
|
||||
|
|
|
@ -448,6 +448,20 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
|
|||
static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
|
||||
static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
|
||||
|
||||
void __init fixup_early_ioremap(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
|
||||
if (prev_map[i]) {
|
||||
WARN_ON(1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
early_ioremap_init();
|
||||
}
|
||||
|
||||
static int __init check_early_ioremap_leak(void)
|
||||
{
|
||||
int count = 0;
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <asm/e820.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
unsigned int __VMALLOC_RESERVE = 128 << 20;
|
||||
|
||||
|
@ -128,6 +129,7 @@ static int __init parse_reservetop(char *arg)
|
|||
|
||||
address = memparse(arg, &arg);
|
||||
reserve_top_address(address);
|
||||
fixup_early_ioremap();
|
||||
return 0;
|
||||
}
|
||||
early_param("reservetop", parse_reservetop);
|
||||
|
|
|
@ -239,11 +239,11 @@ static void arch_perfmon_setup_counters(void)
|
|||
if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
|
||||
current_cpu_data.x86_model == 15) {
|
||||
eax.split.version_id = 2;
|
||||
eax.split.num_events = 2;
|
||||
eax.split.num_counters = 2;
|
||||
eax.split.bit_width = 40;
|
||||
}
|
||||
|
||||
num_counters = eax.split.num_events;
|
||||
num_counters = eax.split.num_counters;
|
||||
|
||||
op_arch_perfmon_spec.num_counters = num_counters;
|
||||
op_arch_perfmon_spec.num_controls = num_counters;
|
||||
|
|
|
@ -324,6 +324,7 @@ struct dma_async_tx_descriptor *
|
|||
async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
|
||||
struct page **blocks, struct async_submit_ctl *submit)
|
||||
{
|
||||
void *scribble = submit->scribble;
|
||||
int non_zero_srcs, i;
|
||||
|
||||
BUG_ON(faila == failb);
|
||||
|
@ -332,11 +333,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
|
|||
|
||||
pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
|
||||
|
||||
/* we need to preserve the contents of 'blocks' for the async
|
||||
* case, so punt to synchronous if a scribble buffer is not available
|
||||
/* if a dma resource is not available or a scribble buffer is not
|
||||
* available punt to the synchronous path. In the 'dma not
|
||||
* available' case be sure to use the scribble buffer to
|
||||
* preserve the content of 'blocks' as the caller intended.
|
||||
*/
|
||||
if (!submit->scribble) {
|
||||
void **ptrs = (void **) blocks;
|
||||
if (!async_dma_find_channel(DMA_PQ) || !scribble) {
|
||||
void **ptrs = scribble ? scribble : (void **) blocks;
|
||||
|
||||
async_tx_quiesce(&submit->depend_tx);
|
||||
for (i = 0; i < disks; i++)
|
||||
|
@ -406,11 +409,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
|
|||
|
||||
pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
|
||||
|
||||
/* we need to preserve the contents of 'blocks' for the async
|
||||
* case, so punt to synchronous if a scribble buffer is not available
|
||||
/* if a dma resource is not available or a scribble buffer is not
|
||||
* available punt to the synchronous path. In the 'dma not
|
||||
* available' case be sure to use the scribble buffer to
|
||||
* preserve the content of 'blocks' as the caller intended.
|
||||
*/
|
||||
if (!scribble) {
|
||||
void **ptrs = (void **) blocks;
|
||||
if (!async_dma_find_channel(DMA_PQ) || !scribble) {
|
||||
void **ptrs = scribble ? scribble : (void **) blocks;
|
||||
|
||||
async_tx_quiesce(&submit->depend_tx);
|
||||
for (i = 0; i < disks; i++)
|
||||
|
|
|
@ -424,7 +424,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
|
|||
PCMCIA_DEVICE_PROD_ID12("Hyperstone", "Model1", 0x3d5b9ef5, 0xca6ab420),
|
||||
PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
|
||||
PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
|
||||
PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x3e520e17),
|
||||
PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 1GB", 0x2e6d1829, 0x55d5bffb),
|
||||
PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF CARD 4GB", 0x2e6d1829, 0x531e7d10),
|
||||
PCMCIA_DEVICE_PROD_ID12("KINGSTON", "CF8GB", 0x2e6d1829, 0xacbe682e),
|
||||
PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
|
||||
|
@ -446,7 +446,7 @@ static struct pcmcia_device_id pcmcia_devices[] = {
|
|||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x9351e59d),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF133", 0x709b1bf1, 0x7558f133),
|
||||
PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS8GCF133", 0x709b1bf1, 0xb2f89b47),
|
||||
PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
|
||||
PCMCIA_DEVICE_PROD_ID12("WEIDA", "TWTTI", 0xcc7cf69c, 0x212bb918),
|
||||
|
|
|
@ -879,8 +879,8 @@ static int isicom_open(struct tty_struct *tty, struct file *filp)
|
|||
if (tport == NULL)
|
||||
return -ENODEV;
|
||||
port = container_of(tport, struct isi_port, port);
|
||||
card = &isi_card[BOARD(tty->index)];
|
||||
|
||||
tty->driver_data = port;
|
||||
return tty_port_open(tport, tty, filp);
|
||||
}
|
||||
|
||||
|
@ -936,7 +936,12 @@ static void isicom_shutdown(struct tty_port *port)
|
|||
static void isicom_close(struct tty_struct *tty, struct file *filp)
|
||||
{
|
||||
struct isi_port *ip = tty->driver_data;
|
||||
struct tty_port *port = &ip->port;
|
||||
struct tty_port *port;
|
||||
|
||||
if (ip == NULL)
|
||||
return;
|
||||
|
||||
port = &ip->port;
|
||||
if (isicom_paranoia_check(ip, tty->name, "isicom_close"))
|
||||
return;
|
||||
tty_port_close(port, tty, filp);
|
||||
|
|
|
@ -827,6 +827,8 @@ static int stli_open(struct tty_struct *tty, struct file *filp)
|
|||
return -ENODEV;
|
||||
if (portp->devnr < 1)
|
||||
return -ENODEV;
|
||||
|
||||
tty->driver_data = portp;
|
||||
return tty_port_open(&portp->port, tty, filp);
|
||||
}
|
||||
|
||||
|
|
|
@ -1011,6 +1011,7 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
|
|||
if (!info->ioaddr)
|
||||
return -ENODEV;
|
||||
|
||||
tty->driver_data = info;
|
||||
return tty_port_open(&info->port, tty, filp);
|
||||
}
|
||||
|
||||
|
@ -1074,7 +1075,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
|
|||
struct mxser_port *info = tty->driver_data;
|
||||
struct tty_port *port = &info->port;
|
||||
|
||||
if (tty->index == MXSER_PORTS)
|
||||
if (tty->index == MXSER_PORTS || info == NULL)
|
||||
return;
|
||||
if (tty_port_close_start(port, tty, filp) == 0)
|
||||
return;
|
||||
|
|
|
@ -909,6 +909,7 @@ static int rc_open(struct tty_struct *tty, struct file *filp)
|
|||
if (error)
|
||||
return error;
|
||||
|
||||
tty->driver_data = port;
|
||||
return tty_port_open(&port->port, tty, filp);
|
||||
}
|
||||
|
||||
|
|
|
@ -724,7 +724,6 @@ static int stl_open(struct tty_struct *tty, struct file *filp)
|
|||
{
|
||||
struct stlport *portp;
|
||||
struct stlbrd *brdp;
|
||||
struct tty_port *port;
|
||||
unsigned int minordev, brdnr, panelnr;
|
||||
int portnr;
|
||||
|
||||
|
@ -754,7 +753,8 @@ static int stl_open(struct tty_struct *tty, struct file *filp)
|
|||
portp = brdp->panels[panelnr]->ports[portnr];
|
||||
if (portp == NULL)
|
||||
return -ENODEV;
|
||||
port = &portp->port;
|
||||
|
||||
tty->driver_data = portp;
|
||||
return tty_port_open(&portp->port, tty, filp);
|
||||
|
||||
}
|
||||
|
@ -841,7 +841,8 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
|
|||
pr_debug("stl_close(tty=%p,filp=%p)\n", tty, filp);
|
||||
|
||||
portp = tty->driver_data;
|
||||
BUG_ON(portp == NULL);
|
||||
if(portp == NULL)
|
||||
return;
|
||||
tty_port_close(&portp->port, tty, filp);
|
||||
}
|
||||
|
||||
|
|
|
@ -1359,3 +1359,5 @@ module_exit(txx9dmac_exit);
|
|||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("TXx9 DMA Controller driver");
|
||||
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
|
||||
MODULE_ALIAS("platform:txx9dmac");
|
||||
MODULE_ALIAS("platform:txx9dmac-chan");
|
||||
|
|
|
@ -294,7 +294,6 @@ static void amd_decode_ls_mce(u64 mc3_status)
|
|||
void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
|
||||
{
|
||||
u32 ec = ERROR_CODE(regs->nbsl);
|
||||
u32 xec = EXT_ERROR_CODE(regs->nbsl);
|
||||
|
||||
if (!handle_errors)
|
||||
return;
|
||||
|
@ -324,7 +323,7 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
|
|||
pr_cont("\n");
|
||||
}
|
||||
|
||||
pr_emerg("%s.\n", EXT_ERR_MSG(xec));
|
||||
pr_emerg("%s.\n", EXT_ERR_MSG(regs->nbsl));
|
||||
|
||||
if (BUS_ERROR(ec) && nb_bus_decoder)
|
||||
nb_bus_decoder(node_id, regs);
|
||||
|
@ -374,7 +373,7 @@ static int amd_decode_mce(struct notifier_block *nb, unsigned long val,
|
|||
((m->status & MCI_STATUS_PCC) ? "yes" : "no"));
|
||||
|
||||
/* do the two bits[14:13] together */
|
||||
ecc = m->status & (3ULL << 45);
|
||||
ecc = (m->status >> 45) & 0x3;
|
||||
if (ecc)
|
||||
pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U"));
|
||||
|
||||
|
|
|
@ -416,7 +416,8 @@ static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
|
|||
return 0;
|
||||
|
||||
free_sd:
|
||||
sysfs_put(pdesc->value_sd);
|
||||
if (pdesc)
|
||||
sysfs_put(pdesc->value_sd);
|
||||
free_id:
|
||||
idr_remove(&pdesc_idr, id);
|
||||
desc->flags &= GPIO_FLAGS_MASK;
|
||||
|
|
|
@ -77,7 +77,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
|
|||
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
|
||||
(offset + size))
|
||||
break;
|
||||
if (!agpmem)
|
||||
if (&agpmem->head == &dev->agp->memory)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
|
|
|
@ -354,7 +354,10 @@ static struct bin_attribute edid_attr = {
|
|||
int drm_sysfs_connector_add(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
int ret = 0, i, j;
|
||||
int attr_cnt = 0;
|
||||
int opt_cnt = 0;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
/* We shouldn't get called more than once for the same connector */
|
||||
BUG_ON(device_is_registered(&connector->kdev));
|
||||
|
@ -377,8 +380,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
|
|||
|
||||
/* Standard attributes */
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) {
|
||||
ret = device_create_file(&connector->kdev, &connector_attrs[i]);
|
||||
for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
|
||||
ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
|
||||
if (ret)
|
||||
goto err_out_files;
|
||||
}
|
||||
|
@ -394,8 +397,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
|
|||
case DRM_MODE_CONNECTOR_SVIDEO:
|
||||
case DRM_MODE_CONNECTOR_Component:
|
||||
case DRM_MODE_CONNECTOR_TV:
|
||||
for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) {
|
||||
ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]);
|
||||
for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
|
||||
ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
|
||||
if (ret)
|
||||
goto err_out_files;
|
||||
}
|
||||
|
@ -414,10 +417,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
|
|||
return 0;
|
||||
|
||||
err_out_files:
|
||||
if (i > 0)
|
||||
for (j = 0; j < i; j++)
|
||||
device_remove_file(&connector->kdev,
|
||||
&connector_attrs[i]);
|
||||
for (i = 0; i < opt_cnt; i++)
|
||||
device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
|
||||
for (i = 0; i < attr_cnt; i++)
|
||||
device_remove_file(&connector->kdev, &connector_attrs[i]);
|
||||
device_unregister(&connector->kdev);
|
||||
|
||||
out:
|
||||
|
|
|
@ -2912,7 +2912,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2
|
|||
UCHAR ucTV_BootUpDefaultStandard;
|
||||
UCHAR ucExt_TV_ASIC_ID;
|
||||
UCHAR ucExt_TV_ASIC_SlaveAddr;
|
||||
ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];
|
||||
ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING_V1_2];
|
||||
}ATOM_ANALOG_TV_INFO_V1_2;
|
||||
|
||||
typedef struct _ATOM_DPCD_INFO
|
||||
|
|
|
@ -2975,7 +2975,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
|
|||
|
||||
for (i = 0; i < track->num_cb; i++) {
|
||||
if (track->cb[i].robj == NULL) {
|
||||
if (!(track->fastfill || track->color_channel_mask ||
|
||||
if (!(track->zb_cb_clear || track->color_channel_mask ||
|
||||
track->blend_read_enable)) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ struct r100_cs_track {
|
|||
struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
|
||||
bool z_enabled;
|
||||
bool separate_cube;
|
||||
bool fastfill;
|
||||
bool zb_cb_clear;
|
||||
bool blend_read_enable;
|
||||
};
|
||||
|
||||
|
|
|
@ -1044,7 +1044,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
|||
break;
|
||||
case 0x4d1c:
|
||||
/* ZB_BW_CNTL */
|
||||
track->fastfill = !!(idx_value & (1 << 2));
|
||||
track->zb_cb_clear = !!(idx_value & (1 << 5));
|
||||
break;
|
||||
case 0x4e04:
|
||||
/* RB3D_BLENDCNTL */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue