Merge 3.12-rc3 into usb-next
We want the USB fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
df9b17f586
230 changed files with 2350 additions and 1250 deletions
3
CREDITS
3
CREDITS
|
@ -2808,8 +2808,7 @@ S: Ottawa, Ontario
|
|||
S: Canada K2P 0X8
|
||||
|
||||
N: Mikael Pettersson
|
||||
E: mikpe@it.uu.se
|
||||
W: http://user.it.uu.se/~mikpe/linux/
|
||||
E: mikpelinux@gmail.com
|
||||
D: Miscellaneous fixes
|
||||
|
||||
N: Reed H. Petty
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
* Samsung Exynos specific extensions to the Synopsis Designware Mobile
|
||||
* Samsung Exynos specific extensions to the Synopsys Designware Mobile
|
||||
Storage Host Controller
|
||||
|
||||
The Synopsis designware mobile storage host controller is used to interface
|
||||
The Synopsys designware mobile storage host controller is used to interface
|
||||
a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
|
||||
differences between the core Synopsis dw mshc controller properties described
|
||||
by synopsis-dw-mshc.txt and the properties used by the Samsung Exynos specific
|
||||
extensions to the Synopsis Designware Mobile Storage Host Controller.
|
||||
differences between the core Synopsys dw mshc controller properties described
|
||||
by synopsys-dw-mshc.txt and the properties used by the Samsung Exynos specific
|
||||
extensions to the Synopsys Designware Mobile Storage Host Controller.
|
||||
|
||||
Required Properties:
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
* Rockchip specific extensions to the Synopsis Designware Mobile
|
||||
* Rockchip specific extensions to the Synopsys Designware Mobile
|
||||
Storage Host Controller
|
||||
|
||||
The Synopsis designware mobile storage host controller is used to interface
|
||||
The Synopsys designware mobile storage host controller is used to interface
|
||||
a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
|
||||
differences between the core Synopsis dw mshc controller properties described
|
||||
by synopsis-dw-mshc.txt and the properties used by the Rockchip specific
|
||||
extensions to the Synopsis Designware Mobile Storage Host Controller.
|
||||
differences between the core Synopsys dw mshc controller properties described
|
||||
by synopsys-dw-mshc.txt and the properties used by the Rockchip specific
|
||||
extensions to the Synopsys Designware Mobile Storage Host Controller.
|
||||
|
||||
Required Properties:
|
||||
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
* Synopsis Designware Mobile Storage Host Controller
|
||||
* Synopsys Designware Mobile Storage Host Controller
|
||||
|
||||
The Synopsis designware mobile storage host controller is used to interface
|
||||
The Synopsys designware mobile storage host controller is used to interface
|
||||
a SoC with storage medium such as eMMC or SD/MMC cards. This file documents
|
||||
differences between the core mmc properties described by mmc.txt and the
|
||||
properties used by the Synopsis Designware Mobile Storage Host Controller.
|
||||
properties used by the Synopsys Designware Mobile Storage Host Controller.
|
||||
|
||||
Required Properties:
|
||||
|
||||
* compatible: should be
|
||||
- snps,dw-mshc: for controllers compliant with synopsis dw-mshc.
|
||||
- snps,dw-mshc: for controllers compliant with synopsys dw-mshc.
|
||||
* #address-cells: should be 1.
|
||||
* #size-cells: should be 0.
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
* Synopsis Designware PCIe interface
|
||||
* Synopsys Designware PCIe interface
|
||||
|
||||
Required properties:
|
||||
- compatible: should contain "snps,dw-pcie" to identify the
|
||||
|
|
|
@ -3485,6 +3485,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
the unplug protocol
|
||||
never -- do not unplug even if version check succeeds
|
||||
|
||||
xen_nopvspin [X86,XEN]
|
||||
Disables the ticketlock slowpath using Xen PV
|
||||
optimizations.
|
||||
|
||||
xirc2ps_cs= [NET,PCMCIA]
|
||||
Format:
|
||||
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
|
||||
|
|
|
@ -296,6 +296,12 @@ Cirrus Logic CS4206/4207
|
|||
imac27 IMac 27 Inch
|
||||
auto BIOS setup (default)
|
||||
|
||||
Cirrus Logic CS4208
|
||||
===================
|
||||
mba6 MacBook Air 6,1 and 6,2
|
||||
gpio0 Enable GPIO 0 amp
|
||||
auto BIOS setup (default)
|
||||
|
||||
VIA VT17xx/VT18xx/VT20xx
|
||||
========================
|
||||
auto BIOS setup (default)
|
||||
|
|
20
MAINTAINERS
20
MAINTAINERS
|
@ -1812,7 +1812,8 @@ S: Supported
|
|||
F: drivers/net/ethernet/broadcom/bnx2x/
|
||||
|
||||
BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
|
||||
M: Christian Daudt <csd@broadcom.com>
|
||||
M: Christian Daudt <bcm@fixthebug.org>
|
||||
L: bcm-kernel-feedback-list@broadcom.com
|
||||
T: git git://git.github.com/broadcom/bcm11351
|
||||
S: Maintained
|
||||
F: arch/arm/mach-bcm/
|
||||
|
@ -2639,6 +2640,18 @@ F: include/linux/device-mapper.h
|
|||
F: include/linux/dm-*.h
|
||||
F: include/uapi/linux/dm-*.h
|
||||
|
||||
DIGI NEO AND CLASSIC PCI PRODUCTS
|
||||
M: Lidza Louina <lidza.louina@gmail.com>
|
||||
L: driverdev-devel@linuxdriverproject.org
|
||||
S: Maintained
|
||||
F: drivers/staging/dgnc/
|
||||
|
||||
DIGI EPCA PCI PRODUCTS
|
||||
M: Lidza Louina <lidza.louina@gmail.com>
|
||||
L: driverdev-devel@linuxdriverproject.org
|
||||
S: Maintained
|
||||
F: drivers/staging/dgap/
|
||||
|
||||
DIOLAN U2C-12 I2C DRIVER
|
||||
M: Guenter Roeck <linux@roeck-us.net>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
|
@ -6603,7 +6616,7 @@ S: Obsolete
|
|||
F: drivers/net/wireless/prism54/
|
||||
|
||||
PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
|
||||
M: Mikael Pettersson <mikpe@it.uu.se>
|
||||
M: Mikael Pettersson <mikpelinux@gmail.com>
|
||||
L: linux-ide@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/ata/sata_promise.*
|
||||
|
@ -8732,9 +8745,8 @@ F: Documentation/hid/hiddev.txt
|
|||
F: drivers/hid/usbhid/
|
||||
|
||||
USB/IP DRIVERS
|
||||
M: Matt Mooney <mfm@muteddisk.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: drivers/staging/usbip/
|
||||
|
||||
USB ISP116X DRIVER
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = One Giant Leap for Frogkind
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -286,9 +286,6 @@ config HAVE_PERF_USER_STACK_DUMP
|
|||
config HAVE_ARCH_JUMP_LABEL
|
||||
bool
|
||||
|
||||
config HAVE_ARCH_MUTEX_CPU_RELAX
|
||||
bool
|
||||
|
||||
config HAVE_RCU_TABLE_FREE
|
||||
bool
|
||||
|
||||
|
|
|
@ -2217,8 +2217,7 @@ config NEON
|
|||
|
||||
config KERNEL_MODE_NEON
|
||||
bool "Support for NEON in kernel mode"
|
||||
default n
|
||||
depends on NEON
|
||||
depends on NEON && AEABI
|
||||
help
|
||||
Say Y to include support for NEON in kernel mode.
|
||||
|
||||
|
|
|
@ -148,7 +148,7 @@ AES_Te:
|
|||
@ const AES_KEY *key) {
|
||||
.align 5
|
||||
ENTRY(AES_encrypt)
|
||||
sub r3,pc,#8 @ AES_encrypt
|
||||
adr r3,AES_encrypt
|
||||
stmdb sp!,{r1,r4-r12,lr}
|
||||
mov r12,r0 @ inp
|
||||
mov r11,r2
|
||||
|
@ -381,7 +381,7 @@ _armv4_AES_encrypt:
|
|||
.align 5
|
||||
ENTRY(private_AES_set_encrypt_key)
|
||||
_armv4_AES_set_encrypt_key:
|
||||
sub r3,pc,#8 @ AES_set_encrypt_key
|
||||
adr r3,_armv4_AES_set_encrypt_key
|
||||
teq r0,#0
|
||||
moveq r0,#-1
|
||||
beq .Labrt
|
||||
|
@ -843,7 +843,7 @@ AES_Td:
|
|||
@ const AES_KEY *key) {
|
||||
.align 5
|
||||
ENTRY(AES_decrypt)
|
||||
sub r3,pc,#8 @ AES_decrypt
|
||||
adr r3,AES_decrypt
|
||||
stmdb sp!,{r1,r4-r12,lr}
|
||||
mov r12,r0 @ inp
|
||||
mov r11,r2
|
||||
|
|
|
@ -19,6 +19,13 @@
|
|||
#include <asm/unified.h>
|
||||
#include <asm/compiler.h>
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
#include <asm-generic/uaccess-unaligned.h>
|
||||
#else
|
||||
#define __get_user_unaligned __get_user
|
||||
#define __put_user_unaligned __put_user
|
||||
#endif
|
||||
|
||||
#define VERIFY_READ 0
|
||||
#define VERIFY_WRITE 1
|
||||
|
||||
|
|
|
@ -442,10 +442,10 @@ local_restart:
|
|||
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
|
||||
|
||||
add r1, sp, #S_OFF
|
||||
cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
|
||||
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
|
||||
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
|
||||
bcs arm_syscall
|
||||
2: mov why, #0 @ no longer a real syscall
|
||||
mov why, #0 @ no longer a real syscall
|
||||
b sys_ni_syscall @ not private func
|
||||
|
||||
#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
|
||||
|
|
|
@ -329,10 +329,10 @@
|
|||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
.if \save
|
||||
stmdb sp!, {r0-r3, ip, lr}
|
||||
bl user_exit
|
||||
bl context_tracking_user_exit
|
||||
ldmia sp!, {r0-r3, ip, lr}
|
||||
.else
|
||||
bl user_exit
|
||||
bl context_tracking_user_exit
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
@ -341,10 +341,10 @@
|
|||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
.if \save
|
||||
stmdb sp!, {r0-r3, ip, lr}
|
||||
bl user_enter
|
||||
bl context_tracking_user_enter
|
||||
ldmia sp!, {r0-r3, ip, lr}
|
||||
.else
|
||||
bl user_enter
|
||||
bl context_tracking_user_enter
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
|
|
@ -187,7 +187,7 @@
|
|||
|
||||
/*
|
||||
* MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
|
||||
* pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
|
||||
* pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
|
||||
* has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
|
||||
* cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
|
||||
*/
|
||||
|
|
|
@ -308,12 +308,10 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
|
|||
{
|
||||
int i;
|
||||
|
||||
/* Make sure that gcc doesn't leave the empty loop body. */
|
||||
for (i = 0; i < nelems; i++, sg++) {
|
||||
if (cpu_needs_post_dma_flush(dev))
|
||||
if (cpu_needs_post_dma_flush(dev))
|
||||
for (i = 0; i < nelems; i++, sg++)
|
||||
__dma_sync(sg_page(sg), sg->offset, sg->length,
|
||||
direction);
|
||||
}
|
||||
}
|
||||
|
||||
static void mips_dma_sync_sg_for_device(struct device *dev,
|
||||
|
@ -321,12 +319,10 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
|
|||
{
|
||||
int i;
|
||||
|
||||
/* Make sure that gcc doesn't leave the empty loop body. */
|
||||
for (i = 0; i < nelems; i++, sg++) {
|
||||
if (!plat_device_is_coherent(dev))
|
||||
if (!plat_device_is_coherent(dev))
|
||||
for (i = 0; i < nelems; i++, sg++)
|
||||
__dma_sync(sg_page(sg), sg->offset, sg->length,
|
||||
direction);
|
||||
}
|
||||
}
|
||||
|
||||
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
|
|
|
@ -14,53 +14,9 @@
|
|||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/of.h> /* linux/of.h gets to determine #include ordering */
|
||||
|
||||
#ifndef _ASM_OPENRISC_PROM_H
|
||||
#define _ASM_OPENRISC_PROM_H
|
||||
#ifdef __KERNEL__
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/platform_device.h>
|
||||
#define HAVE_ARCH_DEVTREE_FIXUPS
|
||||
|
||||
/* Other Prototypes */
|
||||
extern int early_uartlite_console(void);
|
||||
|
||||
/* Parse the ibm,dma-window property of an OF node into the busno, phys and
|
||||
* size parameters.
|
||||
*/
|
||||
void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
|
||||
unsigned long *busno, unsigned long *phys, unsigned long *size);
|
||||
|
||||
extern void kdump_move_device_tree(void);
|
||||
|
||||
/* Get the MAC address */
|
||||
extern const void *of_get_mac_address(struct device_node *np);
|
||||
|
||||
/**
|
||||
* of_irq_map_pci - Resolve the interrupt for a PCI device
|
||||
* @pdev: the device whose interrupt is to be resolved
|
||||
* @out_irq: structure of_irq filled by this function
|
||||
*
|
||||
* This function resolves the PCI interrupt for a given PCI device. If a
|
||||
* device-node exists for a given pci_dev, it will use normal OF tree
|
||||
* walking. If not, it will implement standard swizzling and walk up the
|
||||
* PCI tree until an device-node is found, at which point it will finish
|
||||
* resolving using the OF tree walking.
|
||||
*/
|
||||
struct pci_dev;
|
||||
extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_OPENRISC_PROM_H */
|
||||
|
|
|
@ -74,7 +74,7 @@ src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
|
|||
src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
|
||||
src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
|
||||
|
||||
src-plat-y := of.c
|
||||
src-plat-y := of.c epapr.c
|
||||
src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
|
||||
treeboot-walnut.c cuboot-acadia.c \
|
||||
cuboot-kilauea.c simpleboot.c \
|
||||
|
@ -97,7 +97,7 @@ src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
|
|||
prpmc2800.c
|
||||
src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
|
||||
src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
|
||||
src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c
|
||||
src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
|
||||
|
||||
src-wlib := $(sort $(src-wlib-y))
|
||||
src-plat := $(sort $(src-plat-y))
|
||||
|
|
9
arch/powerpc/boot/epapr-wrapper.c
Normal file
9
arch/powerpc/boot/epapr-wrapper.c
Normal file
|
@ -0,0 +1,9 @@
|
|||
extern void epapr_platform_init(unsigned long r3, unsigned long r4,
|
||||
unsigned long r5, unsigned long r6,
|
||||
unsigned long r7);
|
||||
|
||||
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||
unsigned long r6, unsigned long r7)
|
||||
{
|
||||
epapr_platform_init(r3, r4, r5, r6, r7);
|
||||
}
|
|
@ -48,8 +48,8 @@ static void platform_fixups(void)
|
|||
fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size);
|
||||
}
|
||||
|
||||
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||
unsigned long r6, unsigned long r7)
|
||||
void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||
unsigned long r6, unsigned long r7)
|
||||
{
|
||||
epapr_magic = r6;
|
||||
ima_size = r7;
|
||||
|
|
|
@ -26,6 +26,9 @@
|
|||
|
||||
static unsigned long claim_base;
|
||||
|
||||
void epapr_platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||
unsigned long r6, unsigned long r7);
|
||||
|
||||
static void *of_try_claim(unsigned long size)
|
||||
{
|
||||
unsigned long addr = 0;
|
||||
|
@ -61,7 +64,7 @@ static void of_image_hdr(const void *hdr)
|
|||
}
|
||||
}
|
||||
|
||||
void platform_init(unsigned long a1, unsigned long a2, void *promptr)
|
||||
static void of_platform_init(unsigned long a1, unsigned long a2, void *promptr)
|
||||
{
|
||||
platform_ops.image_hdr = of_image_hdr;
|
||||
platform_ops.malloc = of_try_claim;
|
||||
|
@ -81,3 +84,14 @@ void platform_init(unsigned long a1, unsigned long a2, void *promptr)
|
|||
loader_info.initrd_size = a2;
|
||||
}
|
||||
}
|
||||
|
||||
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
|
||||
unsigned long r6, unsigned long r7)
|
||||
{
|
||||
/* Detect OF vs. ePAPR boot */
|
||||
if (r5)
|
||||
of_platform_init(r3, r4, (void *)r5);
|
||||
else
|
||||
epapr_platform_init(r3, r4, r5, r6, r7);
|
||||
}
|
||||
|
||||
|
|
|
@ -148,18 +148,18 @@ make_space=y
|
|||
|
||||
case "$platform" in
|
||||
pseries)
|
||||
platformo=$object/of.o
|
||||
platformo="$object/of.o $object/epapr.o"
|
||||
link_address='0x4000000'
|
||||
;;
|
||||
maple)
|
||||
platformo=$object/of.o
|
||||
platformo="$object/of.o $object/epapr.o"
|
||||
link_address='0x400000'
|
||||
;;
|
||||
pmac|chrp)
|
||||
platformo=$object/of.o
|
||||
platformo="$object/of.o $object/epapr.o"
|
||||
;;
|
||||
coff)
|
||||
platformo="$object/crt0.o $object/of.o"
|
||||
platformo="$object/crt0.o $object/of.o $object/epapr.o"
|
||||
lds=$object/zImage.coff.lds
|
||||
link_address='0x500000'
|
||||
pie=
|
||||
|
@ -253,6 +253,7 @@ treeboot-iss4xx-mpic)
|
|||
platformo="$object/treeboot-iss4xx.o"
|
||||
;;
|
||||
epapr)
|
||||
platformo="$object/epapr.o $object/epapr-wrapper.o"
|
||||
link_address='0x20000000'
|
||||
pie=-pie
|
||||
;;
|
||||
|
|
|
@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
|
|||
|
||||
extern void irq_ctx_init(void);
|
||||
extern void call_do_softirq(struct thread_info *tp);
|
||||
extern int call_handle_irq(int irq, void *p1,
|
||||
struct thread_info *tp, void *func);
|
||||
extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
|
||||
extern void do_IRQ(struct pt_regs *regs);
|
||||
extern void __do_irq(struct pt_regs *regs);
|
||||
|
||||
int irq_choose_cpu(const struct cpumask *mask);
|
||||
|
||||
|
|
|
@ -149,8 +149,6 @@ typedef struct {
|
|||
|
||||
struct thread_struct {
|
||||
unsigned long ksp; /* Kernel stack pointer */
|
||||
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
unsigned long ksp_vsid;
|
||||
#endif
|
||||
|
@ -162,6 +160,7 @@ struct thread_struct {
|
|||
#endif
|
||||
#ifdef CONFIG_PPC32
|
||||
void *pgdir; /* root of page-table tree */
|
||||
unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
||||
/*
|
||||
|
@ -321,7 +320,6 @@ struct thread_struct {
|
|||
#else
|
||||
#define INIT_THREAD { \
|
||||
.ksp = INIT_SP, \
|
||||
.ksp_limit = INIT_SP_LIMIT, \
|
||||
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
|
||||
.fs = KERNEL_DS, \
|
||||
.fpr = {{0}}, \
|
||||
|
|
|
@ -80,10 +80,11 @@ int main(void)
|
|||
DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr));
|
||||
#else
|
||||
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
|
||||
DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
|
||||
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
DEFINE(KSP, offsetof(struct thread_struct, ksp));
|
||||
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
|
||||
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
|
||||
#ifdef CONFIG_BOOKE
|
||||
DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
|
||||
|
|
|
@ -441,50 +441,6 @@ void migrate_irqs(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline void handle_one_irq(unsigned int irq)
|
||||
{
|
||||
struct thread_info *curtp, *irqtp;
|
||||
unsigned long saved_sp_limit;
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (!desc)
|
||||
return;
|
||||
|
||||
/* Switch to the irq stack to handle this */
|
||||
curtp = current_thread_info();
|
||||
irqtp = hardirq_ctx[smp_processor_id()];
|
||||
|
||||
if (curtp == irqtp) {
|
||||
/* We're already on the irq stack, just handle it */
|
||||
desc->handle_irq(irq, desc);
|
||||
return;
|
||||
}
|
||||
|
||||
saved_sp_limit = current->thread.ksp_limit;
|
||||
|
||||
irqtp->task = curtp->task;
|
||||
irqtp->flags = 0;
|
||||
|
||||
/* Copy the softirq bits in preempt_count so that the
|
||||
* softirq checks work in the hardirq context. */
|
||||
irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
|
||||
(curtp->preempt_count & SOFTIRQ_MASK);
|
||||
|
||||
current->thread.ksp_limit = (unsigned long)irqtp +
|
||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
||||
|
||||
call_handle_irq(irq, desc, irqtp, desc->handle_irq);
|
||||
current->thread.ksp_limit = saved_sp_limit;
|
||||
irqtp->task = NULL;
|
||||
|
||||
/* Set any flag that may have been set on the
|
||||
* alternate stack
|
||||
*/
|
||||
if (irqtp->flags)
|
||||
set_bits(irqtp->flags, &curtp->flags);
|
||||
}
|
||||
|
||||
static inline void check_stack_overflow(void)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
|
@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
void do_IRQ(struct pt_regs *regs)
|
||||
void __do_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
struct irq_desc *desc;
|
||||
unsigned int irq;
|
||||
|
||||
irq_enter();
|
||||
|
@ -519,18 +475,56 @@ void do_IRQ(struct pt_regs *regs)
|
|||
*/
|
||||
irq = ppc_md.get_irq();
|
||||
|
||||
/* We can hard enable interrupts now */
|
||||
/* We can hard enable interrupts now to allow perf interrupts */
|
||||
may_hard_irq_enable();
|
||||
|
||||
/* And finally process it */
|
||||
if (irq != NO_IRQ)
|
||||
handle_one_irq(irq);
|
||||
else
|
||||
if (unlikely(irq == NO_IRQ))
|
||||
__get_cpu_var(irq_stat).spurious_irqs++;
|
||||
else {
|
||||
desc = irq_to_desc(irq);
|
||||
if (likely(desc))
|
||||
desc->handle_irq(irq, desc);
|
||||
}
|
||||
|
||||
trace_irq_exit(regs);
|
||||
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
void do_IRQ(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
struct thread_info *curtp, *irqtp;
|
||||
|
||||
/* Switch to the irq stack to handle this */
|
||||
curtp = current_thread_info();
|
||||
irqtp = hardirq_ctx[raw_smp_processor_id()];
|
||||
|
||||
/* Already there ? */
|
||||
if (unlikely(curtp == irqtp)) {
|
||||
__do_irq(regs);
|
||||
set_irq_regs(old_regs);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Prepare the thread_info in the irq stack */
|
||||
irqtp->task = curtp->task;
|
||||
irqtp->flags = 0;
|
||||
|
||||
/* Copy the preempt_count so that the [soft]irq checks work. */
|
||||
irqtp->preempt_count = curtp->preempt_count;
|
||||
|
||||
/* Switch stack and call */
|
||||
call_do_irq(regs, irqtp);
|
||||
|
||||
/* Restore stack limit */
|
||||
irqtp->task = NULL;
|
||||
|
||||
/* Copy back updates to the thread_info */
|
||||
if (irqtp->flags)
|
||||
set_bits(irqtp->flags, &curtp->flags);
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
|
@ -592,28 +586,22 @@ void irq_ctx_init(void)
|
|||
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
|
||||
tp = softirq_ctx[i];
|
||||
tp->cpu = i;
|
||||
tp->preempt_count = 0;
|
||||
|
||||
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
|
||||
tp = hardirq_ctx[i];
|
||||
tp->cpu = i;
|
||||
tp->preempt_count = HARDIRQ_OFFSET;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void do_softirq_onstack(void)
|
||||
{
|
||||
struct thread_info *curtp, *irqtp;
|
||||
unsigned long saved_sp_limit = current->thread.ksp_limit;
|
||||
|
||||
curtp = current_thread_info();
|
||||
irqtp = softirq_ctx[smp_processor_id()];
|
||||
irqtp->task = curtp->task;
|
||||
irqtp->flags = 0;
|
||||
current->thread.ksp_limit = (unsigned long)irqtp +
|
||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
||||
call_do_softirq(irqtp);
|
||||
current->thread.ksp_limit = saved_sp_limit;
|
||||
irqtp->task = NULL;
|
||||
|
||||
/* Set any flag that may have been set on the
|
||||
|
|
|
@ -36,26 +36,41 @@
|
|||
|
||||
.text
|
||||
|
||||
/*
|
||||
* We store the saved ksp_limit in the unused part
|
||||
* of the STACK_FRAME_OVERHEAD
|
||||
*/
|
||||
_GLOBAL(call_do_softirq)
|
||||
mflr r0
|
||||
stw r0,4(r1)
|
||||
lwz r10,THREAD+KSP_LIMIT(r2)
|
||||
addi r11,r3,THREAD_INFO_GAP
|
||||
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
|
||||
mr r1,r3
|
||||
stw r10,8(r1)
|
||||
stw r11,THREAD+KSP_LIMIT(r2)
|
||||
bl __do_softirq
|
||||
lwz r10,8(r1)
|
||||
lwz r1,0(r1)
|
||||
lwz r0,4(r1)
|
||||
stw r10,THREAD+KSP_LIMIT(r2)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(call_handle_irq)
|
||||
_GLOBAL(call_do_irq)
|
||||
mflr r0
|
||||
stw r0,4(r1)
|
||||
mtctr r6
|
||||
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
|
||||
mr r1,r5
|
||||
bctrl
|
||||
lwz r10,THREAD+KSP_LIMIT(r2)
|
||||
addi r11,r3,THREAD_INFO_GAP
|
||||
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
|
||||
mr r1,r4
|
||||
stw r10,8(r1)
|
||||
stw r11,THREAD+KSP_LIMIT(r2)
|
||||
bl __do_irq
|
||||
lwz r10,8(r1)
|
||||
lwz r1,0(r1)
|
||||
lwz r0,4(r1)
|
||||
stw r10,THREAD+KSP_LIMIT(r2)
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
|
|
|
@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
|
|||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(call_handle_irq)
|
||||
ld r8,0(r6)
|
||||
_GLOBAL(call_do_irq)
|
||||
mflr r0
|
||||
std r0,16(r1)
|
||||
mtctr r8
|
||||
stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
|
||||
mr r1,r5
|
||||
bctrl
|
||||
stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
|
||||
mr r1,r4
|
||||
bl .__do_irq
|
||||
ld r1,0(r1)
|
||||
ld r0,16(r1)
|
||||
mtlr r0
|
||||
|
|
|
@ -1000,9 +1000,10 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
kregs = (struct pt_regs *) sp;
|
||||
sp -= STACK_FRAME_OVERHEAD;
|
||||
p->thread.ksp = sp;
|
||||
#ifdef CONFIG_PPC32
|
||||
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
|
||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
||||
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
p->thread.ptrace_bps[0] = NULL;
|
||||
#endif
|
||||
|
|
|
@ -196,6 +196,8 @@ static int __initdata mem_reserve_cnt;
|
|||
|
||||
static cell_t __initdata regbuf[1024];
|
||||
|
||||
static bool rtas_has_query_cpu_stopped;
|
||||
|
||||
|
||||
/*
|
||||
* Error results ... some OF calls will return "-1" on error, some
|
||||
|
@ -1574,6 +1576,11 @@ static void __init prom_instantiate_rtas(void)
|
|||
prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
|
||||
&val, sizeof(val));
|
||||
|
||||
/* Check if it supports "query-cpu-stopped-state" */
|
||||
if (prom_getprop(rtas_node, "query-cpu-stopped-state",
|
||||
&val, sizeof(val)) != PROM_ERROR)
|
||||
rtas_has_query_cpu_stopped = true;
|
||||
|
||||
#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
|
||||
/* PowerVN takeover hack */
|
||||
prom_rtas_data = base;
|
||||
|
@ -1815,6 +1822,18 @@ static void __init prom_hold_cpus(void)
|
|||
= (void *) LOW_ADDR(__secondary_hold_acknowledge);
|
||||
unsigned long secondary_hold = LOW_ADDR(__secondary_hold);
|
||||
|
||||
/*
|
||||
* On pseries, if RTAS supports "query-cpu-stopped-state",
|
||||
* we skip this stage, the CPUs will be started by the
|
||||
* kernel using RTAS.
|
||||
*/
|
||||
if ((of_platform == PLATFORM_PSERIES ||
|
||||
of_platform == PLATFORM_PSERIES_LPAR) &&
|
||||
rtas_has_query_cpu_stopped) {
|
||||
prom_printf("prom_hold_cpus: skipped\n");
|
||||
return;
|
||||
}
|
||||
|
||||
prom_debug("prom_hold_cpus: start...\n");
|
||||
prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop);
|
||||
prom_debug(" 1) *spinloop = 0x%x\n", *spinloop);
|
||||
|
@ -3011,6 +3030,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
|
|||
* On non-powermacs, put all CPUs in spin-loops.
|
||||
*
|
||||
* PowerMacs use a different mechanism to spin CPUs
|
||||
*
|
||||
* (This must be done after instanciating RTAS)
|
||||
*/
|
||||
if (of_platform != PLATFORM_POWERMAC &&
|
||||
of_platform != PLATFORM_OPAL)
|
||||
|
|
|
@ -1505,6 +1505,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
|||
*/
|
||||
if ((ra == 1) && !(regs->msr & MSR_PR) \
|
||||
&& (val3 >= (regs->gpr[1] - STACK_INT_FRAME_SIZE))) {
|
||||
#ifdef CONFIG_PPC32
|
||||
/*
|
||||
* Check if we will touch kernel sack overflow
|
||||
*/
|
||||
|
@ -1513,7 +1514,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
|
|||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC32 */
|
||||
/*
|
||||
* Check if we already set since that means we'll
|
||||
* lose the previous value.
|
||||
|
|
|
@ -233,17 +233,23 @@ static void __init smp_init_pseries(void)
|
|||
|
||||
alloc_bootmem_cpumask_var(&of_spin_mask);
|
||||
|
||||
/* Mark threads which are still spinning in hold loops. */
|
||||
if (cpu_has_feature(CPU_FTR_SMT)) {
|
||||
for_each_present_cpu(i) {
|
||||
if (cpu_thread_in_core(i) == 0)
|
||||
cpumask_set_cpu(i, of_spin_mask);
|
||||
}
|
||||
} else {
|
||||
cpumask_copy(of_spin_mask, cpu_present_mask);
|
||||
}
|
||||
/*
|
||||
* Mark threads which are still spinning in hold loops
|
||||
*
|
||||
* We know prom_init will not have started them if RTAS supports
|
||||
* query-cpu-stopped-state.
|
||||
*/
|
||||
if (rtas_token("query-cpu-stopped-state") == RTAS_UNKNOWN_SERVICE) {
|
||||
if (cpu_has_feature(CPU_FTR_SMT)) {
|
||||
for_each_present_cpu(i) {
|
||||
if (cpu_thread_in_core(i) == 0)
|
||||
cpumask_set_cpu(i, of_spin_mask);
|
||||
}
|
||||
} else
|
||||
cpumask_copy(of_spin_mask, cpu_present_mask);
|
||||
|
||||
cpumask_clear_cpu(boot_cpuid, of_spin_mask);
|
||||
cpumask_clear_cpu(boot_cpuid, of_spin_mask);
|
||||
}
|
||||
|
||||
/* Non-lpar has additional take/give timebase */
|
||||
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
|
||||
|
|
|
@ -93,6 +93,7 @@ config S390
|
|||
select ARCH_INLINE_WRITE_UNLOCK_IRQ
|
||||
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
|
||||
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select BUILDTIME_EXTABLE_SORT
|
||||
select CLONE_BACKWARDS2
|
||||
|
@ -102,7 +103,6 @@ config S390
|
|||
select GENERIC_TIME_VSYSCALL_OLD
|
||||
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
|
||||
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
|
||||
select HAVE_ARCH_MUTEX_CPU_RELAX
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT
|
||||
|
|
|
@ -7,5 +7,3 @@
|
|||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
|
||||
#define arch_mutex_cpu_relax() barrier()
|
||||
|
|
|
@ -198,6 +198,8 @@ static inline void cpu_relax(void)
|
|||
barrier();
|
||||
}
|
||||
|
||||
#define arch_mutex_cpu_relax() barrier()
|
||||
|
||||
static inline void psw_set_key(unsigned int key)
|
||||
{
|
||||
asm volatile("spka 0(%0)" : : "d" (key));
|
||||
|
|
|
@ -44,6 +44,11 @@ extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
|
|||
extern int arch_spin_trylock_retry(arch_spinlock_t *);
|
||||
extern void arch_spin_relax(arch_spinlock_t *lock);
|
||||
|
||||
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
||||
{
|
||||
return lock.owner_cpu == 0;
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lp)
|
||||
{
|
||||
int old;
|
||||
|
|
|
@ -79,30 +79,38 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn)
|
|||
return get_phys_to_machine(pfn) != INVALID_P2M_ENTRY;
|
||||
}
|
||||
|
||||
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||
static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return mfn;
|
||||
|
||||
if (unlikely(mfn >= machine_to_phys_nr)) {
|
||||
pfn = ~0;
|
||||
goto try_override;
|
||||
}
|
||||
pfn = 0;
|
||||
if (unlikely(mfn >= machine_to_phys_nr))
|
||||
return ~0;
|
||||
|
||||
/*
|
||||
* The array access can fail (e.g., device space beyond end of RAM).
|
||||
* In such cases it doesn't matter what we return (we return garbage),
|
||||
* but we must handle the fault without crashing!
|
||||
*/
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
try_override:
|
||||
/* ret might be < 0 if there are no entries in the m2p for mfn */
|
||||
if (ret < 0)
|
||||
pfn = ~0;
|
||||
else if (get_phys_to_machine(pfn) != mfn)
|
||||
return ~0;
|
||||
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return mfn;
|
||||
|
||||
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||
if (get_phys_to_machine(pfn) != mfn) {
|
||||
/*
|
||||
* If this appears to be a foreign mfn (because the pfn
|
||||
* doesn't map back to the mfn), then check the local override
|
||||
|
@ -111,6 +119,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
|
|||
* m2p_find_override_pfn returns ~0 if it doesn't find anything.
|
||||
*/
|
||||
pfn = m2p_find_override_pfn(mfn, ~0);
|
||||
}
|
||||
|
||||
/*
|
||||
* pfn is ~0 if there are no entries in the m2p for mfn or if the
|
||||
|
|
|
@ -1506,7 +1506,7 @@ static int __init init_hw_perf_events(void)
|
|||
err = amd_pmu_init();
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
err = -ENOTSUPP;
|
||||
}
|
||||
if (err != 0) {
|
||||
pr_cont("no PMU driver, software events only.\n");
|
||||
|
@ -1883,9 +1883,9 @@ static struct pmu pmu = {
|
|||
|
||||
void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
|
||||
{
|
||||
userpg->cap_usr_time = 0;
|
||||
userpg->cap_usr_time_zero = 0;
|
||||
userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
|
||||
userpg->cap_user_time = 0;
|
||||
userpg->cap_user_time_zero = 0;
|
||||
userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
|
||||
userpg->pmc_width = x86_pmu.cntval_bits;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
||||
|
@ -1894,13 +1894,13 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
|
|||
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
|
||||
return;
|
||||
|
||||
userpg->cap_usr_time = 1;
|
||||
userpg->cap_user_time = 1;
|
||||
userpg->time_mult = this_cpu_read(cyc2ns);
|
||||
userpg->time_shift = CYC2NS_SCALE_FACTOR;
|
||||
userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
|
||||
|
||||
if (sched_clock_stable && !check_tsc_disabled()) {
|
||||
userpg->cap_usr_time_zero = 1;
|
||||
userpg->cap_user_time_zero = 1;
|
||||
userpg->time_zero = this_cpu_read(cyc2ns_offset);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2325,6 +2325,7 @@ __init int intel_pmu_init(void)
|
|||
break;
|
||||
|
||||
case 55: /* Atom 22nm "Silvermont" */
|
||||
case 77: /* Avoton "Silvermont" */
|
||||
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
|
||||
|
|
|
@ -2706,14 +2706,14 @@ static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
|
|||
box->hrtimer.function = uncore_pmu_hrtimer;
|
||||
}
|
||||
|
||||
struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cpu)
|
||||
static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int node)
|
||||
{
|
||||
struct intel_uncore_box *box;
|
||||
int i, size;
|
||||
|
||||
size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
|
||||
|
||||
box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
|
||||
box = kzalloc_node(size, GFP_KERNEL, node);
|
||||
if (!box)
|
||||
return NULL;
|
||||
|
||||
|
@ -3031,7 +3031,7 @@ static int uncore_validate_group(struct intel_uncore_pmu *pmu,
|
|||
struct intel_uncore_box *fake_box;
|
||||
int ret = -EINVAL, n;
|
||||
|
||||
fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
|
||||
fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE);
|
||||
if (!fake_box)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3294,7 +3294,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
|
|||
}
|
||||
|
||||
type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
|
||||
box = uncore_alloc_box(type, 0);
|
||||
box = uncore_alloc_box(type, NUMA_NO_NODE);
|
||||
if (!box)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3499,7 +3499,7 @@ static int uncore_cpu_prepare(int cpu, int phys_id)
|
|||
if (pmu->func_id < 0)
|
||||
pmu->func_id = j;
|
||||
|
||||
box = uncore_alloc_box(type, cpu);
|
||||
box = uncore_alloc_box(type, cpu_to_node(cpu));
|
||||
if (!box)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -216,6 +216,7 @@ int apply_microcode_amd(int cpu)
|
|||
/* need to apply patch? */
|
||||
if (rev >= mc_amd->hdr.patch_id) {
|
||||
c->microcode = rev;
|
||||
uci->cpu_sig.rev = rev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -352,12 +352,28 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
|
|||
},
|
||||
{ /* Handle problems with rebooting on the Precision M6600. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Dell OptiPlex 990",
|
||||
.ident = "Dell Precision M6600",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on the Dell PowerEdge C6100. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Dell PowerEdge C6100",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
|
||||
},
|
||||
},
|
||||
{ /* Some C6100 machines were shipped with vendor being 'Dell'. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Dell PowerEdge C6100",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "C6100"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -912,10 +912,13 @@ void __init efi_enter_virtual_mode(void)
|
|||
|
||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||
md = p;
|
||||
if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
|
||||
md->type != EFI_BOOT_SERVICES_CODE &&
|
||||
md->type != EFI_BOOT_SERVICES_DATA)
|
||||
continue;
|
||||
if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
|
||||
#ifdef CONFIG_X86_64
|
||||
if (md->type != EFI_BOOT_SERVICES_CODE &&
|
||||
md->type != EFI_BOOT_SERVICES_DATA)
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
|
||||
size = md->num_pages << EFI_PAGE_SHIFT;
|
||||
end = md->phys_addr + size;
|
||||
|
|
|
@ -879,7 +879,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
|
|||
unsigned long uninitialized_var(address);
|
||||
unsigned level;
|
||||
pte_t *ptep = NULL;
|
||||
int ret = 0;
|
||||
|
||||
pfn = page_to_pfn(page);
|
||||
if (!PageHighMem(page)) {
|
||||
|
@ -926,8 +925,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
|
|||
* frontend pages while they are being shared with the backend,
|
||||
* because mfn_to_pfn (that ends up being called by GUPF) will
|
||||
* return the backend pfn rather than the frontend pfn. */
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
if (ret == 0 && get_phys_to_machine(pfn) == mfn)
|
||||
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||
if (get_phys_to_machine(pfn) == mfn)
|
||||
set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
|
||||
|
||||
return 0;
|
||||
|
@ -942,7 +941,6 @@ int m2p_remove_override(struct page *page,
|
|||
unsigned long uninitialized_var(address);
|
||||
unsigned level;
|
||||
pte_t *ptep = NULL;
|
||||
int ret = 0;
|
||||
|
||||
pfn = page_to_pfn(page);
|
||||
mfn = get_phys_to_machine(pfn);
|
||||
|
@ -1029,8 +1027,8 @@ int m2p_remove_override(struct page *page,
|
|||
* the original pfn causes mfn_to_pfn(mfn) to return the frontend
|
||||
* pfn again. */
|
||||
mfn &= ~FOREIGN_FRAME_BIT;
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
|
||||
pfn = mfn_to_pfn_no_overrides(mfn);
|
||||
if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
|
||||
m2p_find_override(mfn) == NULL)
|
||||
set_phys_to_machine(pfn, mfn);
|
||||
|
||||
|
|
|
@ -259,6 +259,14 @@ void xen_uninit_lock_cpu(int cpu)
|
|||
}
|
||||
|
||||
|
||||
/*
|
||||
* Our init of PV spinlocks is split in two init functions due to us
|
||||
* using paravirt patching and jump labels patching and having to do
|
||||
* all of this before SMP code is invoked.
|
||||
*
|
||||
* The paravirt patching needs to be done _before_ the alternative asm code
|
||||
* is started, otherwise we would not patch the core kernel code.
|
||||
*/
|
||||
void __init xen_init_spinlocks(void)
|
||||
{
|
||||
|
||||
|
@ -267,12 +275,26 @@ void __init xen_init_spinlocks(void)
|
|||
return;
|
||||
}
|
||||
|
||||
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
||||
|
||||
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
|
||||
pv_lock_ops.unlock_kick = xen_unlock_kick;
|
||||
}
|
||||
|
||||
/*
|
||||
* While the jump_label init code needs to happend _after_ the jump labels are
|
||||
* enabled and before SMP is started. Hence we use pre-SMP initcall level
|
||||
* init. We cannot do it in xen_init_spinlocks as that is done before
|
||||
* jump labels are activated.
|
||||
*/
|
||||
static __init int xen_init_spinlocks_jump(void)
|
||||
{
|
||||
if (!xen_pvspin)
|
||||
return 0;
|
||||
|
||||
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(xen_init_spinlocks_jump);
|
||||
|
||||
static __init int xen_parse_nopvspin(char *arg)
|
||||
{
|
||||
xen_pvspin = false;
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <linux/ipmi.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/pnp.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
MODULE_AUTHOR("Zhao Yakui");
|
||||
MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
|
||||
|
@ -57,7 +58,7 @@ struct acpi_ipmi_device {
|
|||
struct list_head head;
|
||||
/* the IPMI request message list */
|
||||
struct list_head tx_msg_list;
|
||||
struct mutex tx_msg_lock;
|
||||
spinlock_t tx_msg_lock;
|
||||
acpi_handle handle;
|
||||
struct pnp_dev *pnp_dev;
|
||||
ipmi_user_t user_interface;
|
||||
|
@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
|
|||
struct kernel_ipmi_msg *msg;
|
||||
struct acpi_ipmi_buffer *buffer;
|
||||
struct acpi_ipmi_device *device;
|
||||
unsigned long flags;
|
||||
|
||||
msg = &tx_msg->tx_message;
|
||||
/*
|
||||
|
@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
|
|||
|
||||
/* Get the msgid */
|
||||
device = tx_msg->device;
|
||||
mutex_lock(&device->tx_msg_lock);
|
||||
spin_lock_irqsave(&device->tx_msg_lock, flags);
|
||||
device->curr_msgid++;
|
||||
tx_msg->tx_msgid = device->curr_msgid;
|
||||
mutex_unlock(&device->tx_msg_lock);
|
||||
spin_unlock_irqrestore(&device->tx_msg_lock, flags);
|
||||
}
|
||||
|
||||
static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
|
||||
|
@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
|
|||
int msg_found = 0;
|
||||
struct acpi_ipmi_msg *tx_msg;
|
||||
struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
|
||||
unsigned long flags;
|
||||
|
||||
if (msg->user != ipmi_device->user_interface) {
|
||||
dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
|
||||
|
@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
|
|||
ipmi_free_recv_msg(msg);
|
||||
return;
|
||||
}
|
||||
mutex_lock(&ipmi_device->tx_msg_lock);
|
||||
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
|
||||
list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
|
||||
if (msg->msgid == tx_msg->tx_msgid) {
|
||||
msg_found = 1;
|
||||
|
@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
|
|||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&ipmi_device->tx_msg_lock);
|
||||
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
|
||||
if (!msg_found) {
|
||||
dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
|
||||
"returned.\n", msg->msgid);
|
||||
|
@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
|
|||
struct acpi_ipmi_device *ipmi_device = handler_context;
|
||||
int err, rem_time;
|
||||
acpi_status status;
|
||||
unsigned long flags;
|
||||
/*
|
||||
* IPMI opregion message.
|
||||
* IPMI message is firstly written to the BMC and system software
|
||||
|
@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
|
|||
return AE_NO_MEMORY;
|
||||
|
||||
acpi_format_ipmi_msg(tx_msg, address, value);
|
||||
mutex_lock(&ipmi_device->tx_msg_lock);
|
||||
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
|
||||
list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
|
||||
mutex_unlock(&ipmi_device->tx_msg_lock);
|
||||
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
|
||||
err = ipmi_request_settime(ipmi_device->user_interface,
|
||||
&tx_msg->addr,
|
||||
tx_msg->tx_msgid,
|
||||
|
@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
|
|||
status = AE_OK;
|
||||
|
||||
end_label:
|
||||
mutex_lock(&ipmi_device->tx_msg_lock);
|
||||
spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
|
||||
list_del(&tx_msg->head);
|
||||
mutex_unlock(&ipmi_device->tx_msg_lock);
|
||||
spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
|
||||
kfree(tx_msg);
|
||||
return status;
|
||||
}
|
||||
|
@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
|
|||
|
||||
INIT_LIST_HEAD(&ipmi_device->head);
|
||||
|
||||
mutex_init(&ipmi_device->tx_msg_lock);
|
||||
spin_lock_init(&ipmi_device->tx_msg_lock);
|
||||
INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
|
||||
ipmi_install_space_handler(ipmi_device);
|
||||
|
||||
|
|
|
@ -1121,7 +1121,7 @@ int acpi_bus_register_driver(struct acpi_driver *driver)
|
|||
EXPORT_SYMBOL(acpi_bus_register_driver);
|
||||
|
||||
/**
|
||||
* acpi_bus_unregister_driver - unregisters a driver with the APIC bus
|
||||
* acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
|
||||
* @driver: driver to unregister
|
||||
*
|
||||
* Unregisters a driver with the ACPI bus. Searches the namespace for all
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* sata_promise.c - Promise SATA
|
||||
*
|
||||
* Maintained by: Tejun Heo <tj@kernel.org>
|
||||
* Mikael Pettersson <mikpe@it.uu.se>
|
||||
* Mikael Pettersson
|
||||
* Please ALWAYS copy linux-ide@vger.kernel.org
|
||||
* on emails.
|
||||
*
|
||||
|
|
|
@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(device_move);
|
|||
*/
|
||||
void device_shutdown(void)
|
||||
{
|
||||
struct device *dev;
|
||||
struct device *dev, *parent;
|
||||
|
||||
spin_lock(&devices_kset->list_lock);
|
||||
/*
|
||||
|
@ -2034,7 +2034,7 @@ void device_shutdown(void)
|
|||
* prevent it from being freed because parent's
|
||||
* lock is to be held
|
||||
*/
|
||||
get_device(dev->parent);
|
||||
parent = get_device(dev->parent);
|
||||
get_device(dev);
|
||||
/*
|
||||
* Make sure the device is off the kset list, in the
|
||||
|
@ -2044,8 +2044,8 @@ void device_shutdown(void)
|
|||
spin_unlock(&devices_kset->list_lock);
|
||||
|
||||
/* hold lock to avoid race with probe/release */
|
||||
if (dev->parent)
|
||||
device_lock(dev->parent);
|
||||
if (parent)
|
||||
device_lock(parent);
|
||||
device_lock(dev);
|
||||
|
||||
/* Don't allow any more runtime suspends */
|
||||
|
@ -2063,11 +2063,11 @@ void device_shutdown(void)
|
|||
}
|
||||
|
||||
device_unlock(dev);
|
||||
if (dev->parent)
|
||||
device_unlock(dev->parent);
|
||||
if (parent)
|
||||
device_unlock(parent);
|
||||
|
||||
put_device(dev);
|
||||
put_device(dev->parent);
|
||||
put_device(parent);
|
||||
|
||||
spin_lock(&devices_kset->list_lock);
|
||||
}
|
||||
|
|
|
@ -1189,6 +1189,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
|
|||
int err;
|
||||
u32 cp;
|
||||
|
||||
memset(&arg64, 0, sizeof(arg64));
|
||||
err = 0;
|
||||
err |=
|
||||
copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
|
||||
|
|
|
@ -1193,6 +1193,7 @@ static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned in
|
|||
ida_pci_info_struct pciinfo;
|
||||
|
||||
if (!arg) return -EINVAL;
|
||||
memset(&pciinfo, 0, sizeof(pciinfo));
|
||||
pciinfo.bus = host->pci_dev->bus->number;
|
||||
pciinfo.dev_fn = host->pci_dev->devfn;
|
||||
pciinfo.board_id = host->board_id;
|
||||
|
|
|
@ -142,32 +142,6 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
|||
return length;
|
||||
}
|
||||
|
||||
ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct tpm_chip *chip = dev_get_drvdata(dev);
|
||||
struct tpm_private *priv = TPM_VPRIV(chip);
|
||||
u8 locality = priv->shr->locality;
|
||||
|
||||
return sprintf(buf, "%d\n", locality);
|
||||
}
|
||||
|
||||
ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct tpm_chip *chip = dev_get_drvdata(dev);
|
||||
struct tpm_private *priv = TPM_VPRIV(chip);
|
||||
u8 val;
|
||||
|
||||
int rv = kstrtou8(buf, 0, &val);
|
||||
if (rv)
|
||||
return rv;
|
||||
|
||||
priv->shr->locality = val;
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static const struct file_operations vtpm_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.llseek = no_llseek,
|
||||
|
@ -188,8 +162,6 @@ static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
|
|||
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
|
||||
static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
|
||||
static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
|
||||
static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
|
||||
tpm_store_locality);
|
||||
|
||||
static struct attribute *vtpm_attrs[] = {
|
||||
&dev_attr_pubek.attr,
|
||||
|
@ -202,7 +174,6 @@ static struct attribute *vtpm_attrs[] = {
|
|||
&dev_attr_cancel.attr,
|
||||
&dev_attr_durations.attr,
|
||||
&dev_attr_timeouts.attr,
|
||||
&dev_attr_locality.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -210,8 +181,6 @@ static struct attribute_group vtpm_attr_grp = {
|
|||
.attrs = vtpm_attrs,
|
||||
};
|
||||
|
||||
#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
|
||||
|
||||
static const struct tpm_vendor_specific tpm_vtpm = {
|
||||
.status = vtpm_status,
|
||||
.recv = vtpm_recv,
|
||||
|
@ -224,11 +193,6 @@ static const struct tpm_vendor_specific tpm_vtpm = {
|
|||
.miscdev = {
|
||||
.fops = &vtpm_ops,
|
||||
},
|
||||
.duration = {
|
||||
TPM_LONG_TIMEOUT,
|
||||
TPM_LONG_TIMEOUT,
|
||||
TPM_LONG_TIMEOUT,
|
||||
},
|
||||
};
|
||||
|
||||
static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
|
||||
|
|
|
@ -26,6 +26,7 @@ config DW_APB_TIMER_OF
|
|||
|
||||
config ARMADA_370_XP_TIMER
|
||||
bool
|
||||
select CLKSRC_OF
|
||||
|
||||
config ORION_TIMER
|
||||
select CLKSRC_OF
|
||||
|
|
|
@ -30,6 +30,9 @@ void __init clocksource_of_init(void)
|
|||
clocksource_of_init_fn init_func;
|
||||
|
||||
for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
|
||||
if (!of_device_is_available(np))
|
||||
continue;
|
||||
|
||||
init_func = match->data;
|
||||
init_func(np);
|
||||
}
|
||||
|
|
|
@ -301,7 +301,7 @@ static void em_sti_register_clockevent(struct em_sti_priv *p)
|
|||
ced->name = dev_name(&p->pdev->dev);
|
||||
ced->features = CLOCK_EVT_FEAT_ONESHOT;
|
||||
ced->rating = 200;
|
||||
ced->cpumask = cpumask_of(0);
|
||||
ced->cpumask = cpu_possible_mask;
|
||||
ced->set_next_event = em_sti_clock_event_next;
|
||||
ced->set_mode = em_sti_clock_event_mode;
|
||||
|
||||
|
|
|
@ -428,7 +428,6 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
|
|||
evt->irq);
|
||||
return -EIO;
|
||||
}
|
||||
irq_set_affinity(evt->irq, cpumask_of(cpu));
|
||||
} else {
|
||||
enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
|
||||
}
|
||||
|
@ -449,6 +448,7 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
|
|||
unsigned long action, void *hcpu)
|
||||
{
|
||||
struct mct_clock_event_device *mevt;
|
||||
unsigned int cpu;
|
||||
|
||||
/*
|
||||
* Grab cpu pointer in each case to avoid spurious
|
||||
|
@ -459,6 +459,12 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self,
|
|||
mevt = this_cpu_ptr(&percpu_mct_tick);
|
||||
exynos4_local_timer_setup(&mevt->evt);
|
||||
break;
|
||||
case CPU_ONLINE:
|
||||
cpu = (unsigned long)hcpu;
|
||||
if (mct_int_type == MCT_INT_SPI)
|
||||
irq_set_affinity(mct_irqs[MCT_L0_IRQ + cpu],
|
||||
cpumask_of(cpu));
|
||||
break;
|
||||
case CPU_DYING:
|
||||
mevt = this_cpu_ptr(&percpu_mct_tick);
|
||||
exynos4_local_timer_stop(&mevt->evt);
|
||||
|
@ -500,6 +506,8 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
|
|||
&percpu_mct_tick);
|
||||
WARN(err, "MCT: can't request IRQ %d (%d)\n",
|
||||
mct_irqs[MCT_L0_IRQ], err);
|
||||
} else {
|
||||
irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
|
||||
}
|
||||
|
||||
err = register_cpu_notifier(&exynos4_mct_cpu_nb);
|
||||
|
|
|
@ -986,6 +986,10 @@ static int __init acpi_cpufreq_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
/* don't keep reloading if cpufreq_driver exists */
|
||||
if (cpufreq_get_current_driver())
|
||||
return 0;
|
||||
|
||||
if (acpi_disabled)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1460,6 +1460,9 @@ unsigned int cpufreq_get(unsigned int cpu)
|
|||
{
|
||||
unsigned int ret_freq = 0;
|
||||
|
||||
if (cpufreq_disabled() || !cpufreq_driver)
|
||||
return -ENOENT;
|
||||
|
||||
if (!down_read_trylock(&cpufreq_rwsem))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -457,7 +457,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
|
|||
opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
|
||||
err_put_node:
|
||||
of_node_put(np);
|
||||
dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__);
|
||||
dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -707,8 +707,7 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
|
|||
reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
|
||||
break;
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
/* disable audio and video ports */
|
||||
reg_write(encoder, REG_ENA_AP, 0x00);
|
||||
/* disable video ports */
|
||||
reg_write(encoder, REG_ENA_VP_0, 0x00);
|
||||
reg_write(encoder, REG_ENA_VP_1, 0x00);
|
||||
reg_write(encoder, REG_ENA_VP_2, 0x00);
|
||||
|
|
|
@ -4800,10 +4800,10 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
|
||||
if (!mutex_trylock(&dev->struct_mutex)) {
|
||||
if (!mutex_is_locked_by(&dev->struct_mutex, current))
|
||||
return SHRINK_STOP;
|
||||
return 0;
|
||||
|
||||
if (dev_priv->mm.shrinker_no_lock_stealing)
|
||||
return SHRINK_STOP;
|
||||
return 0;
|
||||
|
||||
unlock = false;
|
||||
}
|
||||
|
@ -4901,10 +4901,10 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
|||
|
||||
if (!mutex_trylock(&dev->struct_mutex)) {
|
||||
if (!mutex_is_locked_by(&dev->struct_mutex, current))
|
||||
return 0;
|
||||
return SHRINK_STOP;
|
||||
|
||||
if (dev_priv->mm.shrinker_no_lock_stealing)
|
||||
return 0;
|
||||
return SHRINK_STOP;
|
||||
|
||||
unlock = false;
|
||||
}
|
||||
|
|
|
@ -143,8 +143,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
|
|||
|
||||
/* Seek the first printf which is hits start position */
|
||||
if (e->pos < e->start) {
|
||||
len = vsnprintf(NULL, 0, f, args);
|
||||
if (!__i915_error_seek(e, len))
|
||||
va_list tmp;
|
||||
|
||||
va_copy(tmp, args);
|
||||
if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -4775,6 +4775,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
|
|||
|
||||
pipeconf = 0;
|
||||
|
||||
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
|
||||
I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
|
||||
pipeconf |= PIPECONF_ENABLE;
|
||||
|
||||
if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
|
||||
/* Enable pixel doubling when the dot clock is > 90% of the (display)
|
||||
* core speed.
|
||||
|
|
|
@ -588,7 +588,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
|
|||
DRM_DEBUG_KMS("aux_ch native nack\n");
|
||||
return -EREMOTEIO;
|
||||
case AUX_NATIVE_REPLY_DEFER:
|
||||
udelay(100);
|
||||
/*
|
||||
* For now, just give more slack to branch devices. We
|
||||
* could check the DPCD for I2C bit rate capabilities,
|
||||
* and if available, adjust the interval. We could also
|
||||
* be more careful with DP-to-Legacy adapters where a
|
||||
* long legacy cable may force very low I2C bit rates.
|
||||
*/
|
||||
if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
|
||||
DP_DWN_STRM_PORT_PRESENT)
|
||||
usleep_range(500, 600);
|
||||
else
|
||||
usleep_range(300, 400);
|
||||
continue;
|
||||
default:
|
||||
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
|
||||
|
|
|
@ -916,6 +916,14 @@ intel_tv_compute_config(struct intel_encoder *encoder,
|
|||
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
|
||||
pipe_config->pipe_bpp = 8*3;
|
||||
|
||||
/* TV has it's own notion of sync and other mode flags, so clear them. */
|
||||
pipe_config->adjusted_mode.flags = 0;
|
||||
|
||||
/*
|
||||
* FIXME: We don't check whether the input mode is actually what we want
|
||||
* or whether userspace is doing something stupid.
|
||||
*/
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
#include "msm_drv.h"
|
||||
#include "mdp4_kms.h"
|
||||
|
||||
#include <mach/iommu.h>
|
||||
|
||||
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
|
||||
|
||||
static int mdp4_hw_init(struct msm_kms *kms)
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
#include "msm_drv.h"
|
||||
#include "msm_gpu.h"
|
||||
|
||||
#include <mach/iommu.h>
|
||||
|
||||
static void msm_fb_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
|
@ -62,6 +60,8 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
|
|||
int i, ret;
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
/* TODO maybe some day msm iommu won't require this hack: */
|
||||
struct device *msm_iommu_get_ctx(const char *ctx_name);
|
||||
struct device *ctx = msm_iommu_get_ctx(names[i]);
|
||||
if (!ctx)
|
||||
continue;
|
||||
|
@ -199,7 +199,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
|
|||
* imx drm driver on iMX5
|
||||
*/
|
||||
dev_err(dev->dev, "failed to load kms\n");
|
||||
ret = PTR_ERR(priv->kms);
|
||||
ret = PTR_ERR(kms);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -697,7 +697,7 @@ static struct drm_driver msm_driver = {
|
|||
.gem_vm_ops = &vm_ops,
|
||||
.dumb_create = msm_gem_dumb_create,
|
||||
.dumb_map_offset = msm_gem_dumb_map_offset,
|
||||
.dumb_destroy = msm_gem_dumb_destroy,
|
||||
.dumb_destroy = drm_gem_dumb_destroy,
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
.debugfs_init = msm_debugfs_init,
|
||||
.debugfs_cleanup = msm_debugfs_cleanup,
|
||||
|
|
|
@ -319,13 +319,6 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
|||
MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
|
||||
}
|
||||
|
||||
int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
|
||||
uint32_t handle)
|
||||
{
|
||||
/* No special work needed, drop the reference and see what falls out */
|
||||
return drm_gem_handle_delete(file, handle);
|
||||
}
|
||||
|
||||
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset)
|
||||
{
|
||||
|
|
|
@ -1168,6 +1168,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
|
|||
{ 25000, 30000, RADEON_SCLK_UP }
|
||||
};
|
||||
|
||||
void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
|
||||
u32 *max_clock)
|
||||
{
|
||||
u32 i, clock = 0;
|
||||
|
||||
if ((table == NULL) || (table->count == 0)) {
|
||||
*max_clock = clock;
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < table->count; i++) {
|
||||
if (clock < table->entries[i].clk)
|
||||
clock = table->entries[i].clk;
|
||||
}
|
||||
*max_clock = clock;
|
||||
}
|
||||
|
||||
void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
|
||||
u32 clock, u16 max_voltage, u16 *voltage)
|
||||
{
|
||||
|
@ -2080,6 +2097,7 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
bool disable_mclk_switching;
|
||||
u32 mclk, sclk;
|
||||
u16 vddc, vddci;
|
||||
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
|
||||
|
||||
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
|
||||
btc_dpm_vblank_too_short(rdev))
|
||||
|
@ -2121,6 +2139,39 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
ps->low.vddci = max_limits->vddci;
|
||||
}
|
||||
|
||||
/* limit clocks to max supported clocks based on voltage dependency tables */
|
||||
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
|
||||
&max_sclk_vddc);
|
||||
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
|
||||
&max_mclk_vddci);
|
||||
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
|
||||
&max_mclk_vddc);
|
||||
|
||||
if (max_sclk_vddc) {
|
||||
if (ps->low.sclk > max_sclk_vddc)
|
||||
ps->low.sclk = max_sclk_vddc;
|
||||
if (ps->medium.sclk > max_sclk_vddc)
|
||||
ps->medium.sclk = max_sclk_vddc;
|
||||
if (ps->high.sclk > max_sclk_vddc)
|
||||
ps->high.sclk = max_sclk_vddc;
|
||||
}
|
||||
if (max_mclk_vddci) {
|
||||
if (ps->low.mclk > max_mclk_vddci)
|
||||
ps->low.mclk = max_mclk_vddci;
|
||||
if (ps->medium.mclk > max_mclk_vddci)
|
||||
ps->medium.mclk = max_mclk_vddci;
|
||||
if (ps->high.mclk > max_mclk_vddci)
|
||||
ps->high.mclk = max_mclk_vddci;
|
||||
}
|
||||
if (max_mclk_vddc) {
|
||||
if (ps->low.mclk > max_mclk_vddc)
|
||||
ps->low.mclk = max_mclk_vddc;
|
||||
if (ps->medium.mclk > max_mclk_vddc)
|
||||
ps->medium.mclk = max_mclk_vddc;
|
||||
if (ps->high.mclk > max_mclk_vddc)
|
||||
ps->high.mclk = max_mclk_vddc;
|
||||
}
|
||||
|
||||
/* XXX validate the min clocks required for display */
|
||||
|
||||
if (disable_mclk_switching) {
|
||||
|
|
|
@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
|
|||
struct rv7xx_pl *pl);
|
||||
void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
|
||||
u32 clock, u16 max_voltage, u16 *voltage);
|
||||
void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
|
||||
u32 *max_clock);
|
||||
void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
|
||||
u16 max_vddc, u16 max_vddci,
|
||||
u16 *vddc, u16 *vddci);
|
||||
|
|
|
@ -146,6 +146,8 @@ static const struct ci_pt_config_reg didt_config_ci[] =
|
|||
};
|
||||
|
||||
extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
|
||||
extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
|
||||
u32 *max_clock);
|
||||
extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
|
||||
u32 arb_freq_src, u32 arb_freq_dest);
|
||||
extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
|
||||
|
@ -712,6 +714,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
struct radeon_clock_and_voltage_limits *max_limits;
|
||||
bool disable_mclk_switching;
|
||||
u32 sclk, mclk;
|
||||
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
|
||||
int i;
|
||||
|
||||
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
|
||||
|
@ -739,6 +742,29 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
}
|
||||
}
|
||||
|
||||
/* limit clocks to max supported clocks based on voltage dependency tables */
|
||||
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
|
||||
&max_sclk_vddc);
|
||||
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
|
||||
&max_mclk_vddci);
|
||||
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
|
||||
&max_mclk_vddc);
|
||||
|
||||
for (i = 0; i < ps->performance_level_count; i++) {
|
||||
if (max_sclk_vddc) {
|
||||
if (ps->performance_levels[i].sclk > max_sclk_vddc)
|
||||
ps->performance_levels[i].sclk = max_sclk_vddc;
|
||||
}
|
||||
if (max_mclk_vddci) {
|
||||
if (ps->performance_levels[i].mclk > max_mclk_vddci)
|
||||
ps->performance_levels[i].mclk = max_mclk_vddci;
|
||||
}
|
||||
if (max_mclk_vddc) {
|
||||
if (ps->performance_levels[i].mclk > max_mclk_vddc)
|
||||
ps->performance_levels[i].mclk = max_mclk_vddc;
|
||||
}
|
||||
}
|
||||
|
||||
/* XXX validate the min clocks required for display */
|
||||
|
||||
if (disable_mclk_switching) {
|
||||
|
|
|
@ -2845,10 +2845,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
|
|||
rdev->config.cik.tile_config |= (3 << 0);
|
||||
break;
|
||||
}
|
||||
if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
|
||||
rdev->config.cik.tile_config |= 1 << 4;
|
||||
else
|
||||
rdev->config.cik.tile_config |= 0 << 4;
|
||||
rdev->config.cik.tile_config |=
|
||||
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
|
||||
rdev->config.cik.tile_config |=
|
||||
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
|
||||
rdev->config.cik.tile_config |=
|
||||
|
@ -4456,8 +4454,8 @@ static int cik_mc_init(struct radeon_device *rdev)
|
|||
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
|
||||
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
|
||||
/* size in MB on si */
|
||||
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
|
||||
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
|
||||
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
|
||||
rdev->mc.visible_vram_size = rdev->mc.aper_size;
|
||||
si_vram_gtt_location(rdev, &rdev->mc);
|
||||
radeon_update_bandwidth_info(rdev);
|
||||
|
@ -4735,12 +4733,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
|
|||
u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
|
||||
u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
|
||||
u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
|
||||
char *block = (char *)&mc_client;
|
||||
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
|
||||
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
|
||||
|
||||
printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
|
||||
printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
|
||||
protections, vmid, addr,
|
||||
(status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
|
||||
block, mc_id);
|
||||
block, mc_client, mc_id);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -787,6 +787,7 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
bool disable_mclk_switching;
|
||||
u32 mclk, sclk;
|
||||
u16 vddc, vddci;
|
||||
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
|
||||
int i;
|
||||
|
||||
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
|
||||
|
@ -813,6 +814,29 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
}
|
||||
}
|
||||
|
||||
/* limit clocks to max supported clocks based on voltage dependency tables */
|
||||
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
|
||||
&max_sclk_vddc);
|
||||
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
|
||||
&max_mclk_vddci);
|
||||
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
|
||||
&max_mclk_vddc);
|
||||
|
||||
for (i = 0; i < ps->performance_level_count; i++) {
|
||||
if (max_sclk_vddc) {
|
||||
if (ps->performance_levels[i].sclk > max_sclk_vddc)
|
||||
ps->performance_levels[i].sclk = max_sclk_vddc;
|
||||
}
|
||||
if (max_mclk_vddci) {
|
||||
if (ps->performance_levels[i].mclk > max_mclk_vddci)
|
||||
ps->performance_levels[i].mclk = max_mclk_vddci;
|
||||
}
|
||||
if (max_mclk_vddc) {
|
||||
if (ps->performance_levels[i].mclk > max_mclk_vddc)
|
||||
ps->performance_levels[i].mclk = max_mclk_vddc;
|
||||
}
|
||||
}
|
||||
|
||||
/* XXX validate the min clocks required for display */
|
||||
|
||||
if (disable_mclk_switching) {
|
||||
|
|
|
@ -2933,9 +2933,11 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
|
|||
seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
|
||||
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
|
||||
seq_printf(m, "%u dwords in ring\n", count);
|
||||
for (j = 0; j <= count; j++) {
|
||||
i = (rdp + j) & ring->ptr_mask;
|
||||
seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
|
||||
if (ring->ready) {
|
||||
for (j = 0; j <= count; j++) {
|
||||
i = (rdp + j) & ring->ptr_mask;
|
||||
seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1084,7 +1084,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
|
|||
rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
|
||||
le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
|
||||
rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
|
||||
le16_to_cpu(limits->entries[i].usVoltage);
|
||||
le16_to_cpu(entry->usVoltage);
|
||||
entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
|
||||
((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
|
||||
}
|
||||
|
|
|
@ -257,10 +257,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
|
|||
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
|
||||
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||
*/
|
||||
if (ASIC_IS_DCE3(rdev)) {
|
||||
/* according to the reg specs, this should DCE3.2 only, but in
|
||||
* practice it seems to cover DCE3.0 as well.
|
||||
*/
|
||||
if (ASIC_IS_DCE32(rdev)) {
|
||||
if (dig->dig_encoder == 0) {
|
||||
dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
|
||||
dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
|
||||
|
@ -276,8 +273,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
|
|||
WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
|
||||
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
|
||||
}
|
||||
} else if (ASIC_IS_DCE3(rdev)) {
|
||||
/* according to the reg specs, this should DCE3.2 only, but in
|
||||
* practice it seems to cover DCE3.0/3.1 as well.
|
||||
*/
|
||||
if (dig->dig_encoder == 0) {
|
||||
WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
|
||||
WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
|
||||
WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
|
||||
} else {
|
||||
WREG32(DCCG_AUDIO_DTO1_PHASE, base_rate * 100);
|
||||
WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
|
||||
WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
|
||||
}
|
||||
} else {
|
||||
/* according to the reg specs, this should be DCE2.0 and DCE3.0 */
|
||||
/* according to the reg specs, this should be DCE2.0 and DCE3.0/3.1 */
|
||||
WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
|
||||
AUDIO_DTO_MODULE(clock / 10));
|
||||
}
|
||||
|
|
|
@ -1004,6 +1004,8 @@ static struct radeon_asic rv6xx_asic = {
|
|||
.wait_for_vblank = &avivo_wait_for_vblank,
|
||||
.set_backlight_level = &atombios_set_backlight_level,
|
||||
.get_backlight_level = &atombios_get_backlight_level,
|
||||
.hdmi_enable = &r600_hdmi_enable,
|
||||
.hdmi_setmode = &r600_hdmi_setmode,
|
||||
},
|
||||
.copy = {
|
||||
.blit = &r600_copy_cpdma,
|
||||
|
|
|
@ -1367,6 +1367,7 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
|
|||
int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
|
||||
uint16_t data_offset, size;
|
||||
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
|
||||
struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
|
||||
uint8_t frev, crev;
|
||||
int i, num_indices;
|
||||
|
||||
|
@ -1378,18 +1379,21 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
|
|||
|
||||
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
|
||||
sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
|
||||
|
||||
ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
|
||||
((u8 *)&ss_info->asSS_Info[0]);
|
||||
for (i = 0; i < num_indices; i++) {
|
||||
if (ss_info->asSS_Info[i].ucSS_Id == id) {
|
||||
if (ss_assign->ucSS_Id == id) {
|
||||
ss->percentage =
|
||||
le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
|
||||
ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
|
||||
ss->step = ss_info->asSS_Info[i].ucSS_Step;
|
||||
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
|
||||
ss->range = ss_info->asSS_Info[i].ucSS_Range;
|
||||
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
|
||||
le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
|
||||
ss->type = ss_assign->ucSpreadSpectrumType;
|
||||
ss->step = ss_assign->ucSS_Step;
|
||||
ss->delay = ss_assign->ucSS_Delay;
|
||||
ss->range = ss_assign->ucSS_Range;
|
||||
ss->refdiv = ss_assign->ucRecommendedRef_Div;
|
||||
return true;
|
||||
}
|
||||
ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
|
||||
((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
@ -1477,6 +1481,12 @@ union asic_ss_info {
|
|||
struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
|
||||
};
|
||||
|
||||
union asic_ss_assignment {
|
||||
struct _ATOM_ASIC_SS_ASSIGNMENT v1;
|
||||
struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
|
||||
struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
|
||||
};
|
||||
|
||||
bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
|
||||
struct radeon_atom_ss *ss,
|
||||
int id, u32 clock)
|
||||
|
@ -1485,6 +1495,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
|
|||
int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
|
||||
uint16_t data_offset, size;
|
||||
union asic_ss_info *ss_info;
|
||||
union asic_ss_assignment *ss_assign;
|
||||
uint8_t frev, crev;
|
||||
int i, num_indices;
|
||||
|
||||
|
@ -1509,45 +1520,52 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
|
|||
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
|
||||
sizeof(ATOM_ASIC_SS_ASSIGNMENT);
|
||||
|
||||
ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
|
||||
for (i = 0; i < num_indices; i++) {
|
||||
if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
|
||||
(clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
|
||||
if ((ss_assign->v1.ucClockIndication == id) &&
|
||||
(clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
|
||||
ss->percentage =
|
||||
le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
|
||||
ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
|
||||
ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
|
||||
le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
|
||||
ss->type = ss_assign->v1.ucSpreadSpectrumMode;
|
||||
ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
|
||||
return true;
|
||||
}
|
||||
ss_assign = (union asic_ss_assignment *)
|
||||
((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
|
||||
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
|
||||
ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
|
||||
for (i = 0; i < num_indices; i++) {
|
||||
if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
|
||||
(clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
|
||||
if ((ss_assign->v2.ucClockIndication == id) &&
|
||||
(clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
|
||||
ss->percentage =
|
||||
le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
|
||||
ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
|
||||
ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
|
||||
le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
|
||||
ss->type = ss_assign->v2.ucSpreadSpectrumMode;
|
||||
ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
|
||||
if ((crev == 2) &&
|
||||
((id == ASIC_INTERNAL_ENGINE_SS) ||
|
||||
(id == ASIC_INTERNAL_MEMORY_SS)))
|
||||
ss->rate /= 100;
|
||||
return true;
|
||||
}
|
||||
ss_assign = (union asic_ss_assignment *)
|
||||
((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
|
||||
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
|
||||
ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
|
||||
for (i = 0; i < num_indices; i++) {
|
||||
if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
|
||||
(clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
|
||||
if ((ss_assign->v3.ucClockIndication == id) &&
|
||||
(clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
|
||||
ss->percentage =
|
||||
le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
|
||||
ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
|
||||
ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
|
||||
le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
|
||||
ss->type = ss_assign->v3.ucSpreadSpectrumMode;
|
||||
ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
|
||||
if ((id == ASIC_INTERNAL_ENGINE_SS) ||
|
||||
(id == ASIC_INTERNAL_MEMORY_SS))
|
||||
ss->rate /= 100;
|
||||
|
@ -1555,6 +1573,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
|
|||
radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
|
||||
return true;
|
||||
}
|
||||
ss_assign = (union asic_ss_assignment *)
|
||||
((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -85,8 +85,9 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
|||
VRAM, also but everything into VRAM on AGP cards to avoid
|
||||
image corruptions */
|
||||
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
|
||||
(i == 0 || p->rdev->flags & RADEON_IS_AGP)) {
|
||||
/* TODO: is this still needed for NI+ ? */
|
||||
p->rdev->family < CHIP_PALM &&
|
||||
(i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
|
||||
|
||||
p->relocs[i].lobj.domain =
|
||||
RADEON_GEM_DOMAIN_VRAM;
|
||||
|
||||
|
|
|
@ -1320,13 +1320,22 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
return r;
|
||||
}
|
||||
if ((radeon_testing & 1)) {
|
||||
radeon_test_moves(rdev);
|
||||
if (rdev->accel_working)
|
||||
radeon_test_moves(rdev);
|
||||
else
|
||||
DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
|
||||
}
|
||||
if ((radeon_testing & 2)) {
|
||||
radeon_test_syncing(rdev);
|
||||
if (rdev->accel_working)
|
||||
radeon_test_syncing(rdev);
|
||||
else
|
||||
DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
|
||||
}
|
||||
if (radeon_benchmarking) {
|
||||
radeon_benchmark(rdev, radeon_benchmarking);
|
||||
if (rdev->accel_working)
|
||||
radeon_benchmark(rdev, radeon_benchmarking);
|
||||
else
|
||||
DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1002,7 +1002,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
|
|||
{
|
||||
/* set up the default clocks if the MC ucode is loaded */
|
||||
if ((rdev->family >= CHIP_BARTS) &&
|
||||
(rdev->family <= CHIP_HAINAN) &&
|
||||
(rdev->family <= CHIP_CAYMAN) &&
|
||||
rdev->mc_fw) {
|
||||
if (rdev->pm.default_vddc)
|
||||
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
|
||||
|
@ -1046,7 +1046,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
|
|||
if (ret) {
|
||||
DRM_ERROR("radeon: dpm resume failed\n");
|
||||
if ((rdev->family >= CHIP_BARTS) &&
|
||||
(rdev->family <= CHIP_HAINAN) &&
|
||||
(rdev->family <= CHIP_CAYMAN) &&
|
||||
rdev->mc_fw) {
|
||||
if (rdev->pm.default_vddc)
|
||||
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
|
||||
|
@ -1097,7 +1097,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
|
|||
radeon_pm_init_profile(rdev);
|
||||
/* set up the default clocks if the MC ucode is loaded */
|
||||
if ((rdev->family >= CHIP_BARTS) &&
|
||||
(rdev->family <= CHIP_HAINAN) &&
|
||||
(rdev->family <= CHIP_CAYMAN) &&
|
||||
rdev->mc_fw) {
|
||||
if (rdev->pm.default_vddc)
|
||||
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
|
||||
|
@ -1183,7 +1183,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
|
|||
if (ret) {
|
||||
rdev->pm.dpm_enabled = false;
|
||||
if ((rdev->family >= CHIP_BARTS) &&
|
||||
(rdev->family <= CHIP_HAINAN) &&
|
||||
(rdev->family <= CHIP_CAYMAN) &&
|
||||
rdev->mc_fw) {
|
||||
if (rdev->pm.default_vddc)
|
||||
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
|
||||
|
|
|
@ -839,9 +839,11 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
|
|||
* packet that is the root issue
|
||||
*/
|
||||
i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
|
||||
for (j = 0; j <= (count + 32); j++) {
|
||||
seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
|
||||
i = (i + 1) & ring->ptr_mask;
|
||||
if (ring->ready) {
|
||||
for (j = 0; j <= (count + 32); j++) {
|
||||
seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
|
||||
i = (i + 1) & ring->ptr_mask;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -476,8 +476,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* TODO: is this still necessary on NI+ ? */
|
||||
if ((cmd == 0 || cmd == 0x3) &&
|
||||
if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) &&
|
||||
(start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
|
||||
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
|
||||
start, end);
|
||||
|
|
|
@ -2910,6 +2910,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
bool disable_sclk_switching = false;
|
||||
u32 mclk, sclk;
|
||||
u16 vddc, vddci;
|
||||
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
|
||||
int i;
|
||||
|
||||
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
|
||||
|
@ -2943,6 +2944,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
}
|
||||
}
|
||||
|
||||
/* limit clocks to max supported clocks based on voltage dependency tables */
|
||||
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
|
||||
&max_sclk_vddc);
|
||||
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
|
||||
&max_mclk_vddci);
|
||||
btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
|
||||
&max_mclk_vddc);
|
||||
|
||||
for (i = 0; i < ps->performance_level_count; i++) {
|
||||
if (max_sclk_vddc) {
|
||||
if (ps->performance_levels[i].sclk > max_sclk_vddc)
|
||||
ps->performance_levels[i].sclk = max_sclk_vddc;
|
||||
}
|
||||
if (max_mclk_vddci) {
|
||||
if (ps->performance_levels[i].mclk > max_mclk_vddci)
|
||||
ps->performance_levels[i].mclk = max_mclk_vddci;
|
||||
}
|
||||
if (max_mclk_vddc) {
|
||||
if (ps->performance_levels[i].mclk > max_mclk_vddc)
|
||||
ps->performance_levels[i].mclk = max_mclk_vddc;
|
||||
}
|
||||
}
|
||||
|
||||
/* XXX validate the min clocks required for display */
|
||||
|
||||
if (disable_mclk_switching) {
|
||||
|
|
|
@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
|
|||
/* enable VCPU clock */
|
||||
WREG32(UVD_VCPU_CNTL, 1 << 9);
|
||||
|
||||
/* enable UMC */
|
||||
WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
|
||||
/* enable UMC and NC0 */
|
||||
WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13)));
|
||||
|
||||
/* boot up the VCPU */
|
||||
WREG32(UVD_SOFT_RESET, 0);
|
||||
|
|
|
@ -195,7 +195,7 @@ int vmbus_connect(void)
|
|||
|
||||
do {
|
||||
ret = vmbus_negotiate_version(msginfo, version);
|
||||
if (ret)
|
||||
if (ret == -ETIMEDOUT)
|
||||
goto cleanup;
|
||||
|
||||
if (vmbus_connection.conn_state == CONNECTED)
|
||||
|
|
|
@ -32,13 +32,17 @@
|
|||
/*
|
||||
* Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
|
||||
*/
|
||||
#define WS2008_SRV_MAJOR 1
|
||||
#define WS2008_SRV_MINOR 0
|
||||
#define WS2008_SRV_VERSION (WS2008_SRV_MAJOR << 16 | WS2008_SRV_MINOR)
|
||||
|
||||
#define WIN7_SRV_MAJOR 3
|
||||
#define WIN7_SRV_MINOR 0
|
||||
#define WIN7_SRV_MAJOR_MINOR (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
|
||||
#define WIN7_SRV_VERSION (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
|
||||
|
||||
#define WIN8_SRV_MAJOR 4
|
||||
#define WIN8_SRV_MINOR 0
|
||||
#define WIN8_SRV_MAJOR_MINOR (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
|
||||
#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
|
||||
|
||||
/*
|
||||
* Global state maintained for transaction that is being processed.
|
||||
|
@ -587,6 +591,8 @@ void hv_kvp_onchannelcallback(void *context)
|
|||
|
||||
struct icmsg_hdr *icmsghdrp;
|
||||
struct icmsg_negotiate *negop = NULL;
|
||||
int util_fw_version;
|
||||
int kvp_srv_version;
|
||||
|
||||
if (kvp_transaction.active) {
|
||||
/*
|
||||
|
@ -606,17 +612,26 @@ void hv_kvp_onchannelcallback(void *context)
|
|||
|
||||
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
|
||||
/*
|
||||
* We start with win8 version and if the host cannot
|
||||
* support that we use the previous version.
|
||||
* Based on the host, select appropriate
|
||||
* framework and service versions we will
|
||||
* negotiate.
|
||||
*/
|
||||
if (vmbus_prep_negotiate_resp(icmsghdrp, negop,
|
||||
recv_buffer, UTIL_FW_MAJOR_MINOR,
|
||||
WIN8_SRV_MAJOR_MINOR))
|
||||
goto done;
|
||||
|
||||
switch (vmbus_proto_version) {
|
||||
case (VERSION_WS2008):
|
||||
util_fw_version = UTIL_WS2K8_FW_VERSION;
|
||||
kvp_srv_version = WS2008_SRV_VERSION;
|
||||
break;
|
||||
case (VERSION_WIN7):
|
||||
util_fw_version = UTIL_FW_VERSION;
|
||||
kvp_srv_version = WIN7_SRV_VERSION;
|
||||
break;
|
||||
default:
|
||||
util_fw_version = UTIL_FW_VERSION;
|
||||
kvp_srv_version = WIN8_SRV_VERSION;
|
||||
}
|
||||
vmbus_prep_negotiate_resp(icmsghdrp, negop,
|
||||
recv_buffer, UTIL_FW_MAJOR_MINOR,
|
||||
WIN7_SRV_MAJOR_MINOR);
|
||||
recv_buffer, util_fw_version,
|
||||
kvp_srv_version);
|
||||
|
||||
} else {
|
||||
kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
|
||||
|
@ -649,7 +664,6 @@ void hv_kvp_onchannelcallback(void *context)
|
|||
return;
|
||||
|
||||
}
|
||||
done:
|
||||
|
||||
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
|
||||
| ICMSGHDRFLAG_RESPONSE;
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
|
||||
#define VSS_MAJOR 5
|
||||
#define VSS_MINOR 0
|
||||
#define VSS_MAJOR_MINOR (VSS_MAJOR << 16 | VSS_MINOR)
|
||||
#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
|
||||
|
||||
|
||||
|
||||
|
@ -190,8 +190,8 @@ void hv_vss_onchannelcallback(void *context)
|
|||
|
||||
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
|
||||
vmbus_prep_negotiate_resp(icmsghdrp, negop,
|
||||
recv_buffer, UTIL_FW_MAJOR_MINOR,
|
||||
VSS_MAJOR_MINOR);
|
||||
recv_buffer, UTIL_FW_VERSION,
|
||||
VSS_VERSION);
|
||||
} else {
|
||||
vss_msg = (struct hv_vss_msg *)&recv_buffer[
|
||||
sizeof(struct vmbuspipe_hdr) +
|
||||
|
|
|
@ -28,17 +28,32 @@
|
|||
#include <linux/reboot.h>
|
||||
#include <linux/hyperv.h>
|
||||
|
||||
#define SHUTDOWN_MAJOR 3
|
||||
#define SHUTDOWN_MINOR 0
|
||||
#define SHUTDOWN_MAJOR_MINOR (SHUTDOWN_MAJOR << 16 | SHUTDOWN_MINOR)
|
||||
|
||||
#define TIMESYNCH_MAJOR 3
|
||||
#define TIMESYNCH_MINOR 0
|
||||
#define TIMESYNCH_MAJOR_MINOR (TIMESYNCH_MAJOR << 16 | TIMESYNCH_MINOR)
|
||||
#define SD_MAJOR 3
|
||||
#define SD_MINOR 0
|
||||
#define SD_VERSION (SD_MAJOR << 16 | SD_MINOR)
|
||||
|
||||
#define HEARTBEAT_MAJOR 3
|
||||
#define HEARTBEAT_MINOR 0
|
||||
#define HEARTBEAT_MAJOR_MINOR (HEARTBEAT_MAJOR << 16 | HEARTBEAT_MINOR)
|
||||
#define SD_WS2008_MAJOR 1
|
||||
#define SD_WS2008_VERSION (SD_WS2008_MAJOR << 16 | SD_MINOR)
|
||||
|
||||
#define TS_MAJOR 3
|
||||
#define TS_MINOR 0
|
||||
#define TS_VERSION (TS_MAJOR << 16 | TS_MINOR)
|
||||
|
||||
#define TS_WS2008_MAJOR 1
|
||||
#define TS_WS2008_VERSION (TS_WS2008_MAJOR << 16 | TS_MINOR)
|
||||
|
||||
#define HB_MAJOR 3
|
||||
#define HB_MINOR 0
|
||||
#define HB_VERSION (HB_MAJOR << 16 | HB_MINOR)
|
||||
|
||||
#define HB_WS2008_MAJOR 1
|
||||
#define HB_WS2008_VERSION (HB_WS2008_MAJOR << 16 | HB_MINOR)
|
||||
|
||||
static int sd_srv_version;
|
||||
static int ts_srv_version;
|
||||
static int hb_srv_version;
|
||||
static int util_fw_version;
|
||||
|
||||
static void shutdown_onchannelcallback(void *context);
|
||||
static struct hv_util_service util_shutdown = {
|
||||
|
@ -99,8 +114,8 @@ static void shutdown_onchannelcallback(void *context)
|
|||
|
||||
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
|
||||
vmbus_prep_negotiate_resp(icmsghdrp, negop,
|
||||
shut_txf_buf, UTIL_FW_MAJOR_MINOR,
|
||||
SHUTDOWN_MAJOR_MINOR);
|
||||
shut_txf_buf, util_fw_version,
|
||||
sd_srv_version);
|
||||
} else {
|
||||
shutdown_msg =
|
||||
(struct shutdown_msg_data *)&shut_txf_buf[
|
||||
|
@ -216,6 +231,7 @@ static void timesync_onchannelcallback(void *context)
|
|||
struct icmsg_hdr *icmsghdrp;
|
||||
struct ictimesync_data *timedatap;
|
||||
u8 *time_txf_buf = util_timesynch.recv_buffer;
|
||||
struct icmsg_negotiate *negop = NULL;
|
||||
|
||||
vmbus_recvpacket(channel, time_txf_buf,
|
||||
PAGE_SIZE, &recvlen, &requestid);
|
||||
|
@ -225,9 +241,10 @@ static void timesync_onchannelcallback(void *context)
|
|||
sizeof(struct vmbuspipe_hdr)];
|
||||
|
||||
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
|
||||
vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf,
|
||||
UTIL_FW_MAJOR_MINOR,
|
||||
TIMESYNCH_MAJOR_MINOR);
|
||||
vmbus_prep_negotiate_resp(icmsghdrp, negop,
|
||||
time_txf_buf,
|
||||
util_fw_version,
|
||||
ts_srv_version);
|
||||
} else {
|
||||
timedatap = (struct ictimesync_data *)&time_txf_buf[
|
||||
sizeof(struct vmbuspipe_hdr) +
|
||||
|
@ -257,6 +274,7 @@ static void heartbeat_onchannelcallback(void *context)
|
|||
struct icmsg_hdr *icmsghdrp;
|
||||
struct heartbeat_msg_data *heartbeat_msg;
|
||||
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
|
||||
struct icmsg_negotiate *negop = NULL;
|
||||
|
||||
vmbus_recvpacket(channel, hbeat_txf_buf,
|
||||
PAGE_SIZE, &recvlen, &requestid);
|
||||
|
@ -266,9 +284,9 @@ static void heartbeat_onchannelcallback(void *context)
|
|||
sizeof(struct vmbuspipe_hdr)];
|
||||
|
||||
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
|
||||
vmbus_prep_negotiate_resp(icmsghdrp, NULL,
|
||||
hbeat_txf_buf, UTIL_FW_MAJOR_MINOR,
|
||||
HEARTBEAT_MAJOR_MINOR);
|
||||
vmbus_prep_negotiate_resp(icmsghdrp, negop,
|
||||
hbeat_txf_buf, util_fw_version,
|
||||
hb_srv_version);
|
||||
} else {
|
||||
heartbeat_msg =
|
||||
(struct heartbeat_msg_data *)&hbeat_txf_buf[
|
||||
|
@ -321,6 +339,25 @@ static int util_probe(struct hv_device *dev,
|
|||
goto error;
|
||||
|
||||
hv_set_drvdata(dev, srv);
|
||||
/*
|
||||
* Based on the host; initialize the framework and
|
||||
* service version numbers we will negotiate.
|
||||
*/
|
||||
switch (vmbus_proto_version) {
|
||||
case (VERSION_WS2008):
|
||||
util_fw_version = UTIL_WS2K8_FW_VERSION;
|
||||
sd_srv_version = SD_WS2008_VERSION;
|
||||
ts_srv_version = TS_WS2008_VERSION;
|
||||
hb_srv_version = HB_WS2008_VERSION;
|
||||
break;
|
||||
|
||||
default:
|
||||
util_fw_version = UTIL_FW_VERSION;
|
||||
sd_srv_version = SD_VERSION;
|
||||
ts_srv_version = TS_VERSION;
|
||||
hb_srv_version = HB_VERSION;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
|
|
|
@ -525,16 +525,25 @@ static int applesmc_init_smcreg_try(void)
|
|||
{
|
||||
struct applesmc_registers *s = &smcreg;
|
||||
bool left_light_sensor, right_light_sensor;
|
||||
unsigned int count;
|
||||
u8 tmp[1];
|
||||
int ret;
|
||||
|
||||
if (s->init_complete)
|
||||
return 0;
|
||||
|
||||
ret = read_register_count(&s->key_count);
|
||||
ret = read_register_count(&count);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (s->cache && s->key_count != count) {
|
||||
pr_warn("key count changed from %d to %d\n",
|
||||
s->key_count, count);
|
||||
kfree(s->cache);
|
||||
s->cache = NULL;
|
||||
}
|
||||
s->key_count = count;
|
||||
|
||||
if (!s->cache)
|
||||
s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
|
||||
if (!s->cache)
|
||||
|
|
|
@ -98,6 +98,8 @@
|
|||
|
||||
#define DW_IC_ERR_TX_ABRT 0x1
|
||||
|
||||
#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
|
||||
|
||||
/*
|
||||
* status codes
|
||||
*/
|
||||
|
@ -388,22 +390,34 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
|
|||
static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
|
||||
{
|
||||
struct i2c_msg *msgs = dev->msgs;
|
||||
u32 ic_con;
|
||||
u32 ic_con, ic_tar = 0;
|
||||
|
||||
/* Disable the adapter */
|
||||
__i2c_dw_enable(dev, false);
|
||||
|
||||
/* set the slave (target) address */
|
||||
dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR);
|
||||
|
||||
/* if the slave address is ten bit address, enable 10BITADDR */
|
||||
ic_con = dw_readl(dev, DW_IC_CON);
|
||||
if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
|
||||
if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
|
||||
ic_con |= DW_IC_CON_10BITADDR_MASTER;
|
||||
else
|
||||
/*
|
||||
* If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
|
||||
* mode has to be enabled via bit 12 of IC_TAR register.
|
||||
* We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
|
||||
* detected from registers.
|
||||
*/
|
||||
ic_tar = DW_IC_TAR_10BITADDR_MASTER;
|
||||
} else {
|
||||
ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
|
||||
}
|
||||
|
||||
dw_writel(dev, ic_con, DW_IC_CON);
|
||||
|
||||
/*
|
||||
* Set the slave (target) address and enable 10-bit addressing mode
|
||||
* if applicable.
|
||||
*/
|
||||
dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
|
||||
|
||||
/* Enable the adapter */
|
||||
__i2c_dw_enable(dev, true);
|
||||
|
||||
|
|
|
@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
|
|||
|
||||
desc = &priv->hw[priv->head];
|
||||
|
||||
/* Initialize the DMA buffer */
|
||||
memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer));
|
||||
|
||||
/* Initialize the descriptor */
|
||||
memset(desc, 0, sizeof(struct ismt_desc));
|
||||
desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
|
||||
|
|
|
@ -234,9 +234,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
|
|||
ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
|
||||
(msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
|
||||
|
||||
writel_relaxed(data_reg_lo,
|
||||
writel(data_reg_lo,
|
||||
drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
|
||||
writel_relaxed(data_reg_hi,
|
||||
writel(data_reg_hi,
|
||||
drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
|
||||
|
||||
} else {
|
||||
|
@ -697,6 +697,7 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
|
|||
MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
#ifdef CONFIG_HAVE_CLK
|
||||
static int
|
||||
mv64xxx_calc_freq(const int tclk, const int n, const int m)
|
||||
{
|
||||
|
@ -726,16 +727,12 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
|
|||
return false;
|
||||
return true;
|
||||
}
|
||||
#endif /* CONFIG_HAVE_CLK */
|
||||
|
||||
static int
|
||||
mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
|
||||
struct device *dev)
|
||||
{
|
||||
const struct of_device_id *device;
|
||||
struct device_node *np = dev->of_node;
|
||||
u32 bus_freq, tclk;
|
||||
int rc = 0;
|
||||
|
||||
/* CLK is mandatory when using DT to describe the i2c bus. We
|
||||
* need to know tclk in order to calculate bus clock
|
||||
* factors.
|
||||
|
@ -744,6 +741,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
|
|||
/* Have OF but no CLK */
|
||||
return -ENODEV;
|
||||
#else
|
||||
const struct of_device_id *device;
|
||||
struct device_node *np = dev->of_node;
|
||||
u32 bus_freq, tclk;
|
||||
int rc = 0;
|
||||
|
||||
if (IS_ERR(drv_data->clk)) {
|
||||
rc = -ENODEV;
|
||||
goto out;
|
||||
|
|
|
@ -1178,8 +1178,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
|
|||
|
||||
i2c_del_adapter(&i2c->adap);
|
||||
|
||||
clk_disable_unprepare(i2c->clk);
|
||||
|
||||
if (pdev->dev.of_node && IS_ERR(i2c->pctrl))
|
||||
s3c24xx_i2c_dt_gpio_free(i2c);
|
||||
|
||||
|
|
|
@ -498,7 +498,7 @@ struct cached_dev {
|
|||
*/
|
||||
atomic_t has_dirty;
|
||||
|
||||
struct ratelimit writeback_rate;
|
||||
struct bch_ratelimit writeback_rate;
|
||||
struct delayed_work writeback_rate_update;
|
||||
|
||||
/*
|
||||
|
@ -507,10 +507,9 @@ struct cached_dev {
|
|||
*/
|
||||
sector_t last_read;
|
||||
|
||||
/* Number of writeback bios in flight */
|
||||
atomic_t in_flight;
|
||||
/* Limit number of writeback bios in flight */
|
||||
struct semaphore in_flight;
|
||||
struct closure_with_timer writeback;
|
||||
struct closure_waitlist writeback_wait;
|
||||
|
||||
struct keybuf writeback_keys;
|
||||
|
||||
|
|
|
@ -926,28 +926,45 @@ struct bkey *bch_next_recurse_key(struct btree *b, struct bkey *search)
|
|||
|
||||
/* Mergesort */
|
||||
|
||||
static void sort_key_next(struct btree_iter *iter,
|
||||
struct btree_iter_set *i)
|
||||
{
|
||||
i->k = bkey_next(i->k);
|
||||
|
||||
if (i->k == i->end)
|
||||
*i = iter->data[--iter->used];
|
||||
}
|
||||
|
||||
static void btree_sort_fixup(struct btree_iter *iter)
|
||||
{
|
||||
while (iter->used > 1) {
|
||||
struct btree_iter_set *top = iter->data, *i = top + 1;
|
||||
struct bkey *k;
|
||||
|
||||
if (iter->used > 2 &&
|
||||
btree_iter_cmp(i[0], i[1]))
|
||||
i++;
|
||||
|
||||
for (k = i->k;
|
||||
k != i->end && bkey_cmp(top->k, &START_KEY(k)) > 0;
|
||||
k = bkey_next(k))
|
||||
if (top->k > i->k)
|
||||
__bch_cut_front(top->k, k);
|
||||
else if (KEY_SIZE(k))
|
||||
bch_cut_back(&START_KEY(k), top->k);
|
||||
|
||||
if (top->k < i->k || k == i->k)
|
||||
if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
|
||||
break;
|
||||
|
||||
heap_sift(iter, i - top, btree_iter_cmp);
|
||||
if (!KEY_SIZE(i->k)) {
|
||||
sort_key_next(iter, i);
|
||||
heap_sift(iter, i - top, btree_iter_cmp);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (top->k > i->k) {
|
||||
if (bkey_cmp(top->k, i->k) >= 0)
|
||||
sort_key_next(iter, i);
|
||||
else
|
||||
bch_cut_front(top->k, i->k);
|
||||
|
||||
heap_sift(iter, i - top, btree_iter_cmp);
|
||||
} else {
|
||||
/* can't happen because of comparison func */
|
||||
BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
|
||||
bch_cut_back(&START_KEY(i->k), top->k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -255,7 +255,7 @@ void bch_btree_node_read(struct btree *b)
|
|||
|
||||
return;
|
||||
err:
|
||||
bch_cache_set_error(b->c, "io error reading bucket %lu",
|
||||
bch_cache_set_error(b->c, "io error reading bucket %zu",
|
||||
PTR_BUCKET_NR(b->c, &b->key, 0));
|
||||
}
|
||||
|
||||
|
@ -612,7 +612,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
|
|||
return SHRINK_STOP;
|
||||
|
||||
/* Return -1 if we can't do anything right now */
|
||||
if (sc->gfp_mask & __GFP_WAIT)
|
||||
if (sc->gfp_mask & __GFP_IO)
|
||||
mutex_lock(&c->bucket_lock);
|
||||
else if (!mutex_trylock(&c->bucket_lock))
|
||||
return -1;
|
||||
|
|
|
@ -153,7 +153,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
|
|||
bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
|
||||
pr_debug("%u journal buckets", ca->sb.njournal_buckets);
|
||||
|
||||
/* Read journal buckets ordered by golden ratio hash to quickly
|
||||
/*
|
||||
* Read journal buckets ordered by golden ratio hash to quickly
|
||||
* find a sequence of buckets with valid journal entries
|
||||
*/
|
||||
for (i = 0; i < ca->sb.njournal_buckets; i++) {
|
||||
|
@ -166,18 +167,20 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
|
|||
goto bsearch;
|
||||
}
|
||||
|
||||
/* If that fails, check all the buckets we haven't checked
|
||||
/*
|
||||
* If that fails, check all the buckets we haven't checked
|
||||
* already
|
||||
*/
|
||||
pr_debug("falling back to linear search");
|
||||
|
||||
for (l = 0; l < ca->sb.njournal_buckets; l++) {
|
||||
if (test_bit(l, bitmap))
|
||||
continue;
|
||||
|
||||
for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
|
||||
l < ca->sb.njournal_buckets;
|
||||
l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
|
||||
if (read_bucket(l))
|
||||
goto bsearch;
|
||||
}
|
||||
|
||||
if (list_empty(list))
|
||||
continue;
|
||||
bsearch:
|
||||
/* Binary search */
|
||||
m = r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
|
||||
|
@ -197,10 +200,12 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
|
|||
r = m;
|
||||
}
|
||||
|
||||
/* Read buckets in reverse order until we stop finding more
|
||||
/*
|
||||
* Read buckets in reverse order until we stop finding more
|
||||
* journal entries
|
||||
*/
|
||||
pr_debug("finishing up");
|
||||
pr_debug("finishing up: m %u njournal_buckets %u",
|
||||
m, ca->sb.njournal_buckets);
|
||||
l = m;
|
||||
|
||||
while (1) {
|
||||
|
@ -228,9 +233,10 @@ int bch_journal_read(struct cache_set *c, struct list_head *list,
|
|||
}
|
||||
}
|
||||
|
||||
c->journal.seq = list_entry(list->prev,
|
||||
struct journal_replay,
|
||||
list)->j.seq;
|
||||
if (!list_empty(list))
|
||||
c->journal.seq = list_entry(list->prev,
|
||||
struct journal_replay,
|
||||
list)->j.seq;
|
||||
|
||||
return 0;
|
||||
#undef read_bucket
|
||||
|
@ -428,7 +434,7 @@ static void do_journal_discard(struct cache *ca)
|
|||
return;
|
||||
}
|
||||
|
||||
switch (atomic_read(&ja->discard_in_flight) == DISCARD_IN_FLIGHT) {
|
||||
switch (atomic_read(&ja->discard_in_flight)) {
|
||||
case DISCARD_IN_FLIGHT:
|
||||
return;
|
||||
|
||||
|
@ -689,6 +695,7 @@ void bch_journal_meta(struct cache_set *c, struct closure *cl)
|
|||
if (cl)
|
||||
BUG_ON(!closure_wait(&w->wait, cl));
|
||||
|
||||
closure_flush(&c->journal.io);
|
||||
__journal_try_write(c, true);
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue