Merge branch 'for-rmk' of git://git.marvell.com/orion into devel-stable

This commit is contained in:
Russell King 2010-02-25 20:41:34 +00:00
commit 2a2d10f386
364 changed files with 4254 additions and 1959 deletions

View file

@ -20,7 +20,7 @@ Description:
lsm: [[subj_user=] [subj_role=] [subj_type=]
[obj_user=] [obj_role=] [obj_type=]]
base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION]
base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK]
mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
fsmagic:= hex value
uid:= decimal value
@ -40,11 +40,11 @@ Description:
measure func=BPRM_CHECK
measure func=FILE_MMAP mask=MAY_EXEC
measure func=INODE_PERM mask=MAY_READ uid=0
measure func=FILE_CHECK mask=MAY_READ uid=0
The default policy measures all executables in bprm_check,
all files mmapped executable in file_mmap, and all files
open for read by root in inode_permission.
open for read by root in do_filp_open.
Examples of LSM specific definitions:
@ -54,8 +54,8 @@ Description:
dont_measure obj_type=var_log_t
dont_measure obj_type=auditd_log_t
measure subj_user=system_u func=INODE_PERM mask=MAY_READ
measure subj_role=system_r func=INODE_PERM mask=MAY_READ
measure subj_user=system_u func=FILE_CHECK mask=MAY_READ
measure subj_role=system_r func=FILE_CHECK mask=MAY_READ
Smack:
measure subj_user=_ func=INODE_PERM mask=MAY_READ
measure subj_user=_ func=FILE_CHECK mask=MAY_READ

View file

@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
up_threshold: defines what the average CPU usage between the samplings
of 'sampling_rate' needs to be for the kernel to make a decision on
whether it should increase the frequency. For example when it is set
to its default value of '80' it means that between the checking
intervals the CPU needs to be on average more than 80% in use to then
to its default value of '95' it means that between the checking
intervals the CPU needs to be on average more than 95% in use to then
decide that the CPU frequency needs to be increased.
ignore_nice_load: this parameter takes a value of '0' or '1'. When

View file

@ -199,6 +199,10 @@ and is between 256 and 4096 characters. It is defined in the file
acpi_display_output=video
See above.
acpi_early_pdc_eval [HW,ACPI] Evaluate processor _PDC methods
early. Needed on some platforms to properly
initialize the EC.
acpi_irq_balance [HW,ACPI]
ACPI will balance active IRQs
default in APIC mode

View file

@ -616,10 +616,10 @@ M: Richard Purdie <rpurdie@rpsys.net>
S: Maintained
ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
M: Paulius Zaleckas <paulius.zaleckas@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://gitorious.org/linux-gemini/mainline.git
S: Maintained
S: Odd Fixes
F: arch/arm/mach-gemini/
ARM/EBSA110 MACHINE SUPPORT
@ -641,9 +641,9 @@ T: topgit git://git.openezx.org/openezx.git
F: arch/arm/mach-pxa/ezx.c
ARM/FARADAY FA526 PORT
M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
M: Paulius Zaleckas <paulius.zaleckas@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
S: Odd Fixes
F: arch/arm/mm/*-fa*
ARM/FOOTBRIDGE ARCHITECTURE
@ -3411,8 +3411,10 @@ S: Maintained
F: drivers/scsi/sym53c8xx_2/
LTP (Linux Test Project)
M: Subrata Modak <subrata@linux.vnet.ibm.com>
M: Mike Frysinger <vapier@gentoo.org>
M: Rishikesh K Rajak <risrajak@linux.vnet.ibm.com>
M: Garrett Cooper <yanegomi@gmail.com>
M: Mike Frysinger <vapier@gentoo.org>
M: Subrata Modak <subrata@linux.vnet.ibm.com>
L: ltp-list@lists.sourceforge.net (subscribers-only)
W: http://ltp.sourceforge.net/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git
@ -3836,6 +3838,7 @@ NETWORKING DRIVERS
L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
S: Odd Fixes
F: drivers/net/
F: include/linux/if_*

View file

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 33
EXTRAVERSION = -rc6
EXTRAVERSION = -rc8
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*

View file

@ -176,6 +176,7 @@ CONFIG_ARCH_MV78XX0=y
#
CONFIG_MACH_DB78X00_BP=y
CONFIG_MACH_RD78X00_MASA=y
CONFIG_MACH_TERASTATION_WXL=y
CONFIG_PLAT_ORION=y
#

View file

@ -42,7 +42,8 @@
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_ARM1026)
# define MULTI_CACHE 1
#endif

View file

@ -102,6 +102,7 @@ struct cpu_cache_fns cpu_cache;
#endif
#ifdef CONFIG_OUTER_CACHE
struct outer_cache_fns outer_cache;
EXPORT_SYMBOL(outer_cache);
#endif
struct stack {

View file

@ -86,7 +86,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
unsigned int reg_both, reg_level, reg_type;
reg_type = __raw_readl(base + GPIO_INT_TYPE);
reg_level = __raw_readl(base + GPIO_INT_BOTH_EDGE);
reg_level = __raw_readl(base + GPIO_INT_LEVEL);
reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE);
switch (type) {
@ -117,7 +117,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
}
__raw_writel(reg_type, base + GPIO_INT_TYPE);
__raw_writel(reg_level, base + GPIO_INT_BOTH_EDGE);
__raw_writel(reg_level, base + GPIO_INT_LEVEL);
__raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE);
gpio_ack_irq(irq);

View file

@ -32,6 +32,12 @@ config MACH_SHEEVAPLUG
Say 'Y' here if you want your kernel to support the
Marvell SheevaPlug Reference Board.
config MACH_ESATA_SHEEVAPLUG
bool "Marvell eSATA SheevaPlug Reference Board"
help
Say 'Y' here if you want your kernel to support the
Marvell eSATA SheevaPlug Reference Board.
config MACH_TS219
bool "QNAP TS-110, TS-119, TS-210, TS-219 and TS-219P Turbo NAS"
help

View file

@ -5,6 +5,7 @@ obj-$(CONFIG_MACH_RD88F6192_NAS) += rd88f6192-nas-setup.o
obj-$(CONFIG_MACH_RD88F6281) += rd88f6281-setup.o
obj-$(CONFIG_MACH_MV88F6281GTW_GE) += mv88f6281gtw_ge-setup.o
obj-$(CONFIG_MACH_SHEEVAPLUG) += sheevaplug-setup.o
obj-$(CONFIG_MACH_ESATA_SHEEVAPLUG) += sheevaplug-setup.o
obj-$(CONFIG_MACH_TS219) += ts219-setup.o tsx1x-common.o
obj-$(CONFIG_MACH_TS41X) += ts41x-setup.o tsx1x-common.o
obj-$(CONFIG_MACH_OPENRD) += openrd-setup.o

View file

@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/mtd/partitions.h>
#include <linux/mv643xx_eth.h>
#include <linux/gpio.h>
@ -42,10 +43,19 @@ static struct mv643xx_eth_platform_data sheevaplug_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
};
static struct mv_sata_platform_data sheeva_esata_sata_data = {
.n_ports = 2,
};
static struct mvsdio_platform_data sheevaplug_mvsdio_data = {
/* unfortunately the CD signal has not been connected */
};
static struct mvsdio_platform_data sheeva_esata_mvsdio_data = {
.gpio_write_protect = 44, /* MPP44 used as SD write protect */
.gpio_card_detect = 47, /* MPP47 used as SD card detect */
};
static struct gpio_led sheevaplug_led_pins[] = {
{
.name = "plug:green:health",
@ -74,13 +84,26 @@ static unsigned int sheevaplug_mpp_config[] __initdata = {
0
};
static unsigned int sheeva_esata_mpp_config[] __initdata = {
MPP29_GPIO, /* USB Power Enable */
MPP44_GPIO, /* SD Write Protect */
MPP47_GPIO, /* SD Card Detect */
MPP49_GPIO, /* LED Green */
0
};
static void __init sheevaplug_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
kirkwood_init();
kirkwood_mpp_conf(sheevaplug_mpp_config);
/* setup gpio pin select */
if (machine_is_sheeva_esata())
kirkwood_mpp_conf(sheeva_esata_mpp_config);
else
kirkwood_mpp_conf(sheevaplug_mpp_config);
kirkwood_uart0_init();
kirkwood_nand_init(ARRAY_AND_SIZE(sheevaplug_nand_parts), 25);
@ -91,11 +114,21 @@ static void __init sheevaplug_init(void)
kirkwood_ehci_init();
kirkwood_ge00_init(&sheevaplug_ge00_data);
kirkwood_sdio_init(&sheevaplug_mvsdio_data);
/* honor lower power consumption for plugs with out eSATA */
if (machine_is_sheeva_esata())
kirkwood_sata_init(&sheeva_esata_sata_data);
/* enable sd wp and sd cd on plugs with esata */
if (machine_is_sheeva_esata())
kirkwood_sdio_init(&sheeva_esata_mvsdio_data);
else
kirkwood_sdio_init(&sheevaplug_mvsdio_data);
platform_device_register(&sheevaplug_leds);
}
#ifdef CONFIG_MACH_SHEEVAPLUG
MACHINE_START(SHEEVAPLUG, "Marvell SheevaPlug Reference Board")
/* Maintainer: shadi Ammouri <shadi@marvell.com> */
.phys_io = KIRKWOOD_REGS_PHYS_BASE,
@ -106,3 +139,16 @@ MACHINE_START(SHEEVAPLUG, "Marvell SheevaPlug Reference Board")
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
MACHINE_END
#endif
#ifdef CONFIG_MACH_ESATA_SHEEVAPLUG
MACHINE_START(ESATA_SHEEVAPLUG, "Marvell eSATA SheevaPlug Reference Board")
.phys_io = KIRKWOOD_REGS_PHYS_BASE,
.io_pg_offst = ((KIRKWOOD_REGS_VIRT_BASE) >> 18) & 0xfffc,
.boot_params = 0x00000100,
.init_machine = sheevaplug_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
MACHINE_END
#endif

View file

@ -14,6 +14,12 @@ config MACH_RD78X00_MASA
Say 'Y' here if you want your kernel to support the
Marvell RD-78x00-mASA Reference Design.
config MACH_TERASTATION_WXL
bool "Buffalo WLX (Terastation Duo) NAS"
help
Say 'Y' here if you want your kernel to support the
Buffalo WXL Nas.
endmenu
endif

View file

@ -1,3 +1,4 @@
obj-y += common.o addr-map.o irq.o pcie.o
obj-y += common.o addr-map.o mpp.o irq.o pcie.o
obj-$(CONFIG_MACH_DB78X00_BP) += db78x00-bp-setup.o
obj-$(CONFIG_MACH_RD78X00_MASA) += rd78x00-masa-setup.o
obj-$(CONFIG_MACH_TERASTATION_WXL) += buffalo-wxl-setup.o

View file

@ -0,0 +1,155 @@
/*
* arch/arm/mach-mv78xx0/buffalo-wxl-setup.c
*
* Buffalo WXL (Terastation Duo) Setup routines
*
* sebastien requiem <sebastien@requiem.fr>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/mv643xx_eth.h>
#include <linux/ethtool.h>
#include <linux/i2c.h>
#include <mach/mv78xx0.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include "common.h"
#include "mpp.h"
/* This arch has 2 Giga Ethernet */
static struct mv643xx_eth_platform_data db78x00_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
};
static struct mv643xx_eth_platform_data db78x00_ge01_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
/* 2 SATA controller supporting HotPlug */
static struct mv_sata_platform_data db78x00_sata_data = {
.n_ports = 2,
};
static struct i2c_board_info __initdata db78x00_i2c_rtc = {
I2C_BOARD_INFO("ds1338", 0x68),
};
static unsigned int wxl_mpp_config[] __initdata = {
MPP0_GE1_TXCLK,
MPP1_GE1_TXCTL,
MPP2_GE1_RXCTL,
MPP3_GE1_RXCLK,
MPP4_GE1_TXD0,
MPP5_GE1_TXD1,
MPP6_GE1_TXD2,
MPP7_GE1_TXD3,
MPP8_GE1_RXD0,
MPP9_GE1_RXD1,
MPP10_GE1_RXD2,
MPP11_GE1_RXD3,
MPP12_GPIO,
MPP13_SYSRST_OUTn,
MPP14_SATA1_ACTn,
MPP15_SATA0_ACTn,
MPP16_GPIO,
MPP17_GPIO,
MPP18_GPIO,
MPP19_GPIO,
MPP20_GPIO,
MPP21_GPIO,
MPP22_GPIO,
MPP23_GPIO,
MPP24_UA2_TXD,
MPP25_UA2_RXD,
MPP26_UA2_CTSn,
MPP27_UA2_RTSn,
MPP28_GPIO,
MPP29_SYSRST_OUTn,
MPP30_GPIO,
MPP31_GPIO,
MPP32_GPIO,
MPP33_GPIO,
MPP34_GPIO,
MPP35_GPIO,
MPP36_GPIO,
MPP37_GPIO,
MPP38_GPIO,
MPP39_GPIO,
MPP40_UNUSED,
MPP41_UNUSED,
MPP42_UNUSED,
MPP43_UNUSED,
MPP44_UNUSED,
MPP45_UNUSED,
MPP46_UNUSED,
MPP47_UNUSED,
MPP48_SATA1_ACTn,
MPP49_SATA0_ACTn,
0
};
static void __init wxl_init(void)
{
/*
* Basic MV78xx0 setup. Needs to be called early.
*/
mv78xx0_init();
mv78xx0_mpp_conf(wxl_mpp_config);
/*
* Partition on-chip peripherals between the two CPU cores.
*/
mv78xx0_ehci0_init();
mv78xx0_ehci1_init();
mv78xx0_ehci2_init();
mv78xx0_ge00_init(&db78x00_ge00_data);
mv78xx0_ge01_init(&db78x00_ge01_data);
mv78xx0_sata_init(&db78x00_sata_data);
mv78xx0_uart0_init();
mv78xx0_uart1_init();
mv78xx0_uart2_init();
mv78xx0_uart3_init();
mv78xx0_i2c_init();
i2c_register_board_info(0, &db78x00_i2c_rtc, 1);
}
static int __init wxl_pci_init(void)
{
if (machine_is_terastation_wxl()) {
/*
* Assign the x16 PCIe slot on the board to CPU core
* #0, and let CPU core #1 have the four x1 slots.
*/
if (mv78xx0_core_index() == 0)
mv78xx0_pcie_init(0, 1);
else
mv78xx0_pcie_init(1, 0);
}
return 0;
}
subsys_initcall(wxl_pci_init);
MACHINE_START(TERASTATION_WXL, "Buffalo Nas WXL")
/* Maintainer: Sebastien Requiem <sebastien@requiem.fr> */
.phys_io = MV78XX0_REGS_PHYS_BASE,
.io_pg_offst = ((MV78XX0_REGS_VIRT_BASE) >> 18) & 0xfffc,
.boot_params = 0x00000100,
.init_machine = wxl_init,
.map_io = mv78xx0_map_io,
.init_irq = mv78xx0_init_irq,
.timer = &mv78xx0_timer,
MACHINE_END

View file

@ -0,0 +1,96 @@
/*
* arch/arm/mach-mv78x00/mpp.c
*
* MPP functions for Marvell MV78x00 SoCs
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mbus.h>
#include <linux/io.h>
#include <asm/gpio.h>
#include <mach/hardware.h>
#include "common.h"
#include "mpp.h"
static unsigned int __init mv78xx0_variant(void)
{
u32 dev, rev;
mv78xx0_pcie_id(&dev, &rev);
if (dev == MV78100_DEV_ID && rev >= MV78100_REV_A0)
return MPP_78100_A0_MASK;
printk(KERN_ERR "MPP setup: unknown mv78x00 variant "
"(dev %#x rev %#x)\n", dev, rev);
return 0;
}
#define MPP_CTRL(i) (DEV_BUS_VIRT_BASE + (i) * 4)
#define MPP_NR_REGS (1 + MPP_MAX/8)
void __init mv78xx0_mpp_conf(unsigned int *mpp_list)
{
u32 mpp_ctrl[MPP_NR_REGS];
unsigned int variant_mask;
int i;
variant_mask = mv78xx0_variant();
if (!variant_mask)
return;
/* Initialize gpiolib. */
orion_gpio_init();
printk(KERN_DEBUG "initial MPP regs:");
for (i = 0; i < MPP_NR_REGS; i++) {
mpp_ctrl[i] = readl(MPP_CTRL(i));
printk(" %08x", mpp_ctrl[i]);
}
printk("\n");
while (*mpp_list) {
unsigned int num = MPP_NUM(*mpp_list);
unsigned int sel = MPP_SEL(*mpp_list);
int shift, gpio_mode;
if (num > MPP_MAX) {
printk(KERN_ERR "mv78xx0_mpp_conf: invalid MPP "
"number (%u)\n", num);
continue;
}
if (!(*mpp_list & variant_mask)) {
printk(KERN_WARNING
"mv78xx0_mpp_conf: requested MPP%u config "
"unavailable on this hardware\n", num);
continue;
}
shift = (num & 7) << 2;
mpp_ctrl[num / 8] &= ~(0xf << shift);
mpp_ctrl[num / 8] |= sel << shift;
gpio_mode = 0;
if (*mpp_list & MPP_INPUT_MASK)
gpio_mode |= GPIO_INPUT_OK;
if (*mpp_list & MPP_OUTPUT_MASK)
gpio_mode |= GPIO_OUTPUT_OK;
if (sel != 0)
gpio_mode = 0;
orion_gpio_set_valid(num, gpio_mode);
mpp_list++;
}
printk(KERN_DEBUG " final MPP regs:");
for (i = 0; i < MPP_NR_REGS; i++) {
writel(mpp_ctrl[i], MPP_CTRL(i));
printk(" %08x", mpp_ctrl[i]);
}
printk("\n");
}

347
arch/arm/mach-mv78xx0/mpp.h Normal file
View file

@ -0,0 +1,347 @@
/*
* linux/arch/arm/mach-mv78xx0/mpp.h -- Multi Purpose Pins
*
*
* sebastien requiem <sebastien@requiem.fr>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#ifndef __MV78X00_MPP_H
#define __MV78X00_MPP_H
#define MPP(_num, _sel, _in, _out, _78100_A0) (\
/* MPP number */ ((_num) & 0xff) | \
/* MPP select value */ (((_sel) & 0xf) << 8) | \
/* may be input signal */ ((!!(_in)) << 12) | \
/* may be output signal */ ((!!(_out)) << 13) | \
/* available on A0 */ ((!!(_78100_A0)) << 14))
#define MPP_NUM(x) ((x) & 0xff)
#define MPP_SEL(x) (((x) >> 8) & 0xf)
/* num sel i o 78100_A0 */
#define MPP_INPUT_MASK MPP(0, 0x0, 1, 0, 0)
#define MPP_OUTPUT_MASK MPP(0, 0x0, 0, 1, 0)
#define MPP_78100_A0_MASK MPP(0, 0x0, 0, 0, 1)
#define MPP0_GPIO MPP(0, 0x0, 1, 1, 1)
#define MPP0_GE0_COL MPP(0, 0x1, 1, 0, 1)
#define MPP0_GE1_TXCLK MPP(0, 0x2, 0, 1, 1)
#define MPP0_UNUSED MPP(0, 0x3, 0, 0, 1)
#define MPP1_GPIO MPP(1, 0x0, 1, 1, 1)
#define MPP1_GE0_RXERR MPP(1, 0x1, 1, 0, 1)
#define MPP1_GE1_TXCTL MPP(1, 0x2, 0, 1, 1)
#define MPP1_UNUSED MPP(1, 0x3, 0, 0, 1)
#define MPP2_GPIO MPP(2, 0x0, 1, 1, 1)
#define MPP2_GE0_CRS MPP(2, 0x1, 1, 0, 1)
#define MPP2_GE1_RXCTL MPP(2, 0x2, 1, 0, 1)
#define MPP2_UNUSED MPP(2, 0x3, 0, 0, 1)
#define MPP3_GPIO MPP(3, 0x0, 1, 1, 1)
#define MPP3_GE0_TXERR MPP(3, 0x1, 0, 1, 1)
#define MPP3_GE1_RXCLK MPP(3, 0x2, 1, 0, 1)
#define MPP3_UNUSED MPP(3, 0x3, 0, 0, 1)
#define MPP4_GPIO MPP(4, 0x0, 1, 1, 1)
#define MPP4_GE0_TXD4 MPP(4, 0x1, 0, 1, 1)
#define MPP4_GE1_TXD0 MPP(4, 0x2, 0, 1, 1)
#define MPP4_UNUSED MPP(4, 0x3, 0, 0, 1)
#define MPP5_GPIO MPP(5, 0x0, 1, 1, 1)
#define MPP5_GE0_TXD5 MPP(5, 0x1, 0, 1, 1)
#define MPP5_GE1_TXD1 MPP(5, 0x2, 0, 1, 1)
#define MPP5_UNUSED MPP(5, 0x3, 0, 0, 1)
#define MPP6_GPIO MPP(6, 0x0, 1, 1, 1)
#define MPP6_GE0_TXD6 MPP(6, 0x1, 0, 1, 1)
#define MPP6_GE1_TXD2 MPP(6, 0x2, 0, 1, 1)
#define MPP6_UNUSED MPP(6, 0x3, 0, 0, 1)
#define MPP7_GPIO MPP(7, 0x0, 1, 1, 1)
#define MPP7_GE0_TXD7 MPP(7, 0x1, 0, 1, 1)
#define MPP7_GE1_TXD3 MPP(7, 0x2, 0, 1, 1)
#define MPP7_UNUSED MPP(7, 0x3, 0, 0, 1)
#define MPP8_GPIO MPP(8, 0x0, 1, 1, 1)
#define MPP8_GE0_RXD4 MPP(8, 0x1, 1, 0, 1)
#define MPP8_GE1_RXD0 MPP(8, 0x2, 1, 0, 1)
#define MPP8_UNUSED MPP(8, 0x3, 0, 0, 1)
#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1)
#define MPP9_GE0_RXD5 MPP(9, 0x1, 1, 0, 1)
#define MPP9_GE1_RXD1 MPP(9, 0x2, 1, 0, 1)
#define MPP9_UNUSED MPP(9, 0x3, 0, 0, 1)
#define MPP10_GPIO MPP(10, 0x0, 1, 1, 1)
#define MPP10_GE0_RXD6 MPP(10, 0x1, 1, 0, 1)
#define MPP10_GE1_RXD2 MPP(10, 0x2, 1, 0, 1)
#define MPP10_UNUSED MPP(10, 0x3, 0, 0, 1)
#define MPP11_GPIO MPP(11, 0x0, 1, 1, 1)
#define MPP11_GE0_RXD7 MPP(11, 0x1, 1, 0, 1)
#define MPP11_GE1_RXD3 MPP(11, 0x2, 1, 0, 1)
#define MPP11_UNUSED MPP(11, 0x3, 0, 0, 1)
#define MPP12_GPIO MPP(12, 0x0, 1, 1, 1)
#define MPP12_M_BB MPP(12, 0x3, 1, 0, 1)
#define MPP12_UA0_CTSn MPP(12, 0x4, 1, 0, 1)
#define MPP12_NAND_FLASH_REn0 MPP(12, 0x5, 0, 1, 1)
#define MPP12_TDM0_SCSn MPP(12, 0X6, 0, 1, 1)
#define MPP12_UNUSED MPP(12, 0x1, 0, 0, 1)
#define MPP13_GPIO MPP(13, 0x0, 1, 1, 1)
#define MPP13_SYSRST_OUTn MPP(13, 0x3, 0, 1, 1)
#define MPP13_UA0_RTSn MPP(13, 0x4, 0, 1, 1)
#define MPP13_NAN_FLASH_WEn0 MPP(13, 0x5, 0, 1, 1)
#define MPP13_TDM_SCLK MPP(13, 0x6, 0, 1, 1)
#define MPP13_UNUSED MPP(13, 0x1, 0, 0, 1)
#define MPP14_GPIO MPP(14, 0x0, 1, 1, 1)
#define MPP14_SATA1_ACTn MPP(14, 0x3, 0, 1, 1)
#define MPP14_UA1_CTSn MPP(14, 0x4, 1, 0, 1)
#define MPP14_NAND_FLASH_REn1 MPP(14, 0x5, 0, 1, 1)
#define MPP14_TDM_SMOSI MPP(14, 0x6, 0, 1, 1)
#define MPP14_UNUSED MPP(14, 0x1, 0, 0, 1)
#define MPP15_GPIO MPP(15, 0x0, 1, 1, 1)
#define MPP15_SATA0_ACTn MPP(15, 0x3, 0, 1, 1)
#define MPP15_UA1_RTSn MPP(15, 0x4, 0, 1, 1)
#define MPP15_NAND_FLASH_WEn1 MPP(15, 0x5, 0, 1, 1)
#define MPP15_TDM_SMISO MPP(15, 0x6, 1, 0, 1)
#define MPP15_UNUSED MPP(15, 0x1, 0, 0, 1)
#define MPP16_GPIO MPP(16, 0x0, 1, 1, 1)
#define MPP16_SATA1_PRESENTn MPP(16, 0x3, 0, 1, 1)
#define MPP16_UA2_TXD MPP(16, 0x4, 0, 1, 1)
#define MPP16_NAND_FLASH_REn3 MPP(16, 0x5, 0, 1, 1)
#define MPP16_TDM_INTn MPP(16, 0x6, 1, 0, 1)
#define MPP16_UNUSED MPP(16, 0x1, 0, 0, 1)
#define MPP17_GPIO MPP(17, 0x0, 1, 1, 1)
#define MPP17_SATA0_PRESENTn MPP(17, 0x3, 0, 1, 1)
#define MPP17_UA2_RXD MPP(17, 0x4, 1, 0, 1)
#define MPP17_NAND_FLASH_WEn3 MPP(17, 0x5, 0, 1, 1)
#define MPP17_TDM_RSTn MPP(17, 0x6, 0, 1, 1)
#define MPP17_UNUSED MPP(17, 0x1, 0, 0, 1)
#define MPP18_GPIO MPP(18, 0x0, 1, 1, 1)
#define MPP18_UA0_CTSn MPP(18, 0x4, 1, 0, 1)
#define MPP18_BOOT_FLASH_REn MPP(18, 0x5, 0, 1, 1)
#define MPP18_UNUSED MPP(18, 0x1, 0, 0, 1)
#define MPP19_GPIO MPP(19, 0x0, 1, 1, 1)
#define MPP19_UA0_CTSn MPP(19, 0x4, 0, 1, 1)
#define MPP19_BOOT_FLASH_WEn MPP(19, 0x5, 0, 1, 1)
#define MPP19_UNUSED MPP(19, 0x1, 0, 0, 1)
#define MPP20_GPIO MPP(20, 0x0, 1, 1, 1)
#define MPP20_UA1_CTSs MPP(20, 0x4, 1, 0, 1)
#define MPP20_TDM_PCLK MPP(20, 0x6, 1, 1, 0)
#define MPP20_UNUSED MPP(20, 0x1, 0, 0, 1)
#define MPP21_GPIO MPP(21, 0x0, 1, 1, 1)
#define MPP21_UA1_CTSs MPP(21, 0x4, 0, 1, 1)
#define MPP21_TDM_FSYNC MPP(21, 0x6, 1, 1, 0)
#define MPP21_UNUSED MPP(21, 0x1, 0, 0, 1)
#define MPP22_GPIO MPP(22, 0x0, 1, 1, 1)
#define MPP22_UA3_TDX MPP(22, 0x4, 0, 1, 1)
#define MPP22_NAND_FLASH_REn2 MPP(22, 0x5, 0, 1, 1)
#define MPP22_TDM_DRX MPP(22, 0x6, 1, 0, 1)
#define MPP22_UNUSED MPP(22, 0x1, 0, 0, 1)
#define MPP23_GPIO MPP(23, 0x0, 1, 1, 1)
#define MPP23_UA3_RDX MPP(23, 0x4, 1, 0, 1)
#define MPP23_NAND_FLASH_WEn2 MPP(23, 0x5, 0, 1, 1)
#define MPP23_TDM_DTX MPP(23, 0x6, 0, 1, 1)
#define MPP23_UNUSED MPP(23, 0x1, 0, 0, 1)
#define MPP24_GPIO MPP(24, 0x0, 1, 1, 1)
#define MPP24_UA2_TXD MPP(24, 0x4, 0, 1, 1)
#define MPP24_TDM_INTn MPP(24, 0x6, 1, 0, 1)
#define MPP24_UNUSED MPP(24, 0x1, 0, 0, 1)
#define MPP25_GPIO MPP(25, 0x0, 1, 1, 1)
#define MPP25_UA2_RXD MPP(25, 0x4, 1, 0, 1)
#define MPP25_TDM_RSTn MPP(25, 0x6, 0, 1, 1)
#define MPP25_UNUSED MPP(25, 0x1, 0, 0, 1)
#define MPP26_GPIO MPP(26, 0x0, 1, 1, 1)
#define MPP26_UA2_CTSn MPP(26, 0x4, 1, 0, 1)
#define MPP26_TDM_PCLK MPP(26, 0x6, 1, 1, 1)
#define MPP26_UNUSED MPP(26, 0x1, 0, 0, 1)
#define MPP27_GPIO MPP(27, 0x0, 1, 1, 1)
#define MPP27_UA2_RTSn MPP(27, 0x4, 0, 1, 1)
#define MPP27_TDM_FSYNC MPP(27, 0x6, 1, 1, 1)
#define MPP27_UNUSED MPP(27, 0x1, 0, 0, 1)
#define MPP28_GPIO MPP(28, 0x0, 1, 1, 1)
#define MPP28_UA3_TXD MPP(28, 0x4, 0, 1, 1)
#define MPP28_TDM_DRX MPP(28, 0x6, 1, 0, 1)
#define MPP28_UNUSED MPP(28, 0x1, 0, 0, 1)
#define MPP29_GPIO MPP(29, 0x0, 1, 1, 1)
#define MPP29_UA3_RXD MPP(29, 0x4, 1, 0, 1)
#define MPP29_SYSRST_OUTn MPP(29, 0x5, 0, 1, 1)
#define MPP29_TDM_DTX MPP(29, 0x6, 0, 1, 1)
#define MPP29_UNUSED MPP(29, 0x1, 0, 0, 1)
#define MPP30_GPIO MPP(30, 0x0, 1, 1, 1)
#define MPP30_UA3_CTSn MPP(30, 0x4, 1, 0, 1)
#define MPP30_UNUSED MPP(30, 0x1, 0, 0, 1)
#define MPP31_GPIO MPP(31, 0x0, 1, 1, 1)
#define MPP31_UA3_RTSn MPP(31, 0x4, 0, 1, 1)
#define MPP31_TDM1_SCSn MPP(31, 0x6, 0, 1, 1)
#define MPP31_UNUSED MPP(31, 0x1, 0, 0, 1)
#define MPP32_GPIO MPP(32, 0x1, 1, 1, 1)
#define MPP32_UA3_TDX MPP(32, 0x4, 0, 1, 1)
#define MPP32_SYSRST_OUTn MPP(32, 0x5, 0, 1, 1)
#define MPP32_TDM0_RXQ MPP(32, 0x6, 0, 1, 1)
#define MPP32_UNUSED MPP(32, 0x3, 0, 0, 1)
#define MPP33_GPIO MPP(33, 0x1, 1, 1, 1)
#define MPP33_UA3_RDX MPP(33, 0x4, 1, 0, 1)
#define MPP33_TDM0_TXQ MPP(33, 0x6, 0, 1, 1)
#define MPP33_UNUSED MPP(33, 0x3, 0, 0, 1)
#define MPP34_GPIO MPP(34, 0x1, 1, 1, 1)
#define MPP34_UA2_TDX MPP(34, 0x4, 0, 1, 1)
#define MPP34_TDM1_RXQ MPP(34, 0x6, 0, 1, 1)
#define MPP34_UNUSED MPP(34, 0x3, 0, 0, 1)
#define MPP35_GPIO MPP(35, 0x1, 1, 1, 1)
#define MPP35_UA2_RDX MPP(35, 0x4, 1, 0, 1)
#define MPP35_TDM1_TXQ MPP(35, 0x6, 0, 1, 1)
#define MPP35_UNUSED MPP(35, 0x3, 0, 0, 1)
#define MPP36_GPIO MPP(36, 0x1, 1, 1, 1)
#define MPP36_UA0_CTSn MPP(36, 0x2, 1, 0, 1)
#define MPP36_UA2_TDX MPP(36, 0x4, 0, 1, 1)
#define MPP36_TDM0_SCSn MPP(36, 0x6, 0, 1, 1)
#define MPP36_UNUSED MPP(36, 0x3, 0, 0, 1)
#define MPP37_GPIO MPP(37, 0x1, 1, 1, 1)
#define MPP37_UA0_RTSn MPP(37, 0x2, 0, 1, 1)
#define MPP37_UA2_RXD MPP(37, 0x4, 1, 0, 1)
#define MPP37_SYSRST_OUTn MPP(37, 0x5, 0, 1, 1)
#define MPP37_TDM_SCLK MPP(37, 0x6, 0, 1, 1)
#define MPP37_UNUSED MPP(37, 0x3, 0, 0, 1)
#define MPP38_GPIO MPP(38, 0x1, 1, 1, 1)
#define MPP38_UA1_CTSn MPP(38, 0x2, 1, 0, 1)
#define MPP38_UA3_TXD MPP(38, 0x4, 0, 1, 1)
#define MPP38_SYSRST_OUTn MPP(38, 0x5, 0, 1, 1)
#define MPP38_TDM_SMOSI MPP(38, 0x6, 0, 1, 1)
#define MPP38_UNUSED MPP(38, 0x3, 0, 0, 1)
#define MPP39_GPIO MPP(39, 0x1, 1, 1, 1)
#define MPP39_UA1_RTSn MPP(39, 0x2, 0, 1, 1)
#define MPP39_UA3_RXD MPP(39, 0x4, 1, 0, 1)
#define MPP39_SYSRST_OUTn MPP(39, 0x5, 0, 1, 1)
#define MPP39_TDM_SMISO MPP(39, 0x6, 1, 0, 1)
#define MPP39_UNUSED MPP(39, 0x3, 0, 0, 1)
#define MPP40_GPIO MPP(40, 0x1, 1, 1, 1)
#define MPP40_TDM_INTn MPP(40, 0x6, 1, 0, 1)
#define MPP40_UNUSED MPP(40, 0x0, 0, 0, 1)
#define MPP41_GPIO MPP(41, 0x1, 1, 1, 1)
#define MPP41_TDM_RSTn MPP(41, 0x6, 0, 1, 1)
#define MPP41_UNUSED MPP(41, 0x0, 0, 0, 1)
#define MPP42_GPIO MPP(42, 0x1, 1, 1, 1)
#define MPP42_TDM_PCLK MPP(42, 0x6, 1, 1, 1)
#define MPP42_UNUSED MPP(42, 0x0, 0, 0, 1)
#define MPP43_GPIO MPP(43, 0x1, 1, 1, 1)
#define MPP43_TDM_FSYNC MPP(43, 0x6, 1, 1, 1)
#define MPP43_UNUSED MPP(43, 0x0, 0, 0, 1)
#define MPP44_GPIO MPP(44, 0x1, 1, 1, 1)
#define MPP44_TDM_DRX MPP(44, 0x6, 1, 0, 1)
#define MPP44_UNUSED MPP(44, 0x0, 0, 0, 1)
#define MPP45_GPIO MPP(45, 0x1, 1, 1, 1)
#define MPP45_SATA0_ACTn MPP(45, 0x3, 0, 1, 1)
#define MPP45_TDM_DRX MPP(45, 0x6, 0, 1, 1)
#define MPP45_UNUSED MPP(45, 0x0, 0, 0, 1)
#define MPP46_GPIO MPP(46, 0x1, 1, 1, 1)
#define MPP46_TDM_SCSn MPP(46, 0x6, 0, 1, 1)
#define MPP46_UNUSED MPP(46, 0x0, 0, 0, 1)
#define MPP47_GPIO MPP(47, 0x1, 1, 1, 1)
#define MPP47_UNUSED MPP(47, 0x0, 0, 0, 1)
#define MPP48_GPIO MPP(48, 0x1, 1, 1, 1)
#define MPP48_SATA1_ACTn MPP(48, 0x3, 0, 1, 1)
#define MPP48_UNUSED MPP(48, 0x2, 0, 0, 1)
#define MPP49_GPIO MPP(49, 0x1, 1, 1, 1)
#define MPP49_SATA0_ACTn MPP(49, 0x3, 0, 1, 1)
#define MPP49_M_BB MPP(49, 0x4, 1, 0, 1)
#define MPP49_UNUSED MPP(49, 0x2, 0, 0, 1)
#define MPP_MAX 49
void mv78xx0_mpp_conf(unsigned int *mpp_list);
#endif

View file

@ -408,6 +408,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
{
struct twl4030_hsmmc_info *c;
int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
int i;
if (cpu_is_omap2430()) {
control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
@ -434,7 +435,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
mmc = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL);
if (!mmc) {
pr_err("Cannot allocate memory for mmc device!\n");
return;
goto done;
}
if (c->name)
@ -532,6 +533,10 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
continue;
c->dev = mmc->dev;
}
done:
for (i = 0; i < nr_hsmmc; i++)
kfree(hsmmc_data[i]);
}
#endif

View file

@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
# Last update: Thu Jan 28 22:15:54 2010
# Last update: Sat Feb 20 14:16:15 2010
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@ -2257,7 +2257,7 @@ oratisalog MACH_ORATISALOG ORATISALOG 2268
oratismadi MACH_ORATISMADI ORATISMADI 2269
oratisot16 MACH_ORATISOT16 ORATISOT16 2270
oratisdesk MACH_ORATISDESK ORATISDESK 2271
v2_ca9 MACH_V2P_CA9 V2P_CA9 2272
vexpress MACH_VEXPRESS VEXPRESS 2272
sintexo MACH_SINTEXO SINTEXO 2273
cm3389 MACH_CM3389 CM3389 2274
omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275
@ -2636,3 +2636,45 @@ hw90240 MACH_HW90240 HW90240 2648
dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649
mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650
scat110 MACH_SCAT110 SCAT110 2651
acer_a1 MACH_ACER_A1 ACER_A1 2652
cmcontrol MACH_CMCONTROL CMCONTROL 2653
pelco_lamar MACH_PELCO_LAMAR PELCO_LAMAR 2654
rfp43 MACH_RFP43 RFP43 2655
sk86r0301 MACH_SK86R0301 SK86R0301 2656
ctpxa MACH_CTPXA CTPXA 2657
epb_arm9_a MACH_EPB_ARM9_A EPB_ARM9_A 2658
guruplug MACH_GURUPLUG GURUPLUG 2659
spear310 MACH_SPEAR310 SPEAR310 2660
spear320 MACH_SPEAR320 SPEAR320 2661
robotx MACH_ROBOTX ROBOTX 2662
lsxhl MACH_LSXHL LSXHL 2663
smartlite MACH_SMARTLITE SMARTLITE 2664
cws2 MACH_CWS2 CWS2 2665
m619 MACH_M619 M619 2666
smartview MACH_SMARTVIEW SMARTVIEW 2667
lsa_salsa MACH_LSA_SALSA LSA_SALSA 2668
kizbox MACH_KIZBOX KIZBOX 2669
htccharmer MACH_HTCCHARMER HTCCHARMER 2670
guf_neso_lt MACH_GUF_NESO_LT GUF_NESO_LT 2671
pm9g45 MACH_PM9G45 PM9G45 2672
htcpanther MACH_HTCPANTHER HTCPANTHER 2673
htcpanther_cdma MACH_HTCPANTHER_CDMA HTCPANTHER_CDMA 2674
reb01 MACH_REB01 REB01 2675
aquila MACH_AQUILA AQUILA 2676
spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677
sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678
surf7x30 MACH_SURF7X30 SURF7X30 2679
micro2440 MACH_MICRO2440 MICRO2440 2680
am2440 MACH_AM2440 AM2440 2681
tq2440 MACH_TQ2440 TQ2440 2682
lpc2478oem MACH_LPC2478OEM LPC2478OEM 2683
ak880x MACH_AK880X AK880X 2684
cobra3530 MACH_COBRA3530 COBRA3530 2685
pmppb MACH_PMPPB PMPPB 2686
u6715 MACH_U6715 U6715 2687
axar1500_sender MACH_AXAR1500_SENDER AXAR1500_SENDER 2688
g30_dvb MACH_G30_DVB G30_DVB 2689
vc088x MACH_VC088X VC088X 2690
mioa702 MACH_MIOA702 MIOA702 2691
hpmin MACH_HPMIN HPMIN 2692
ak880xak MACH_AK880XAK AK880XAK 2693

View file

@ -1325,7 +1325,7 @@ struct platform_device *__init
at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
{
struct platform_device *pdev;
struct mci_dma_slave *slave;
struct mci_dma_data *slave;
u32 pioa_mask;
u32 piob_mask;
@ -1344,7 +1344,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
ARRAY_SIZE(atmel_mci0_resource)))
goto fail;
slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL);
slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL);
if (!slave)
goto fail;
slave->sdata.dma_dev = &dw_dmac0_device.dev;
slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
@ -1357,7 +1359,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
if (platform_device_add_data(pdev, data,
sizeof(struct mci_platform_data)))
goto fail;
goto fail_free;
/* CLK line is common to both slots */
pioa_mask = 1 << 10;
@ -1381,7 +1383,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
/* Slot is unused */
break;
default:
goto fail;
goto fail_free;
}
select_peripheral(PIOA, pioa_mask, PERIPH_A, 0);
@ -1408,7 +1410,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
break;
default:
if (!data->slot[0].bus_width)
goto fail;
goto fail_free;
data->slot[1].bus_width = 0;
break;
@ -1419,9 +1421,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
platform_device_add(pdev);
return pdev;
fail_free:
kfree(slave);
fail:
data->dma_slave = NULL;
kfree(slave);
platform_device_put(pdev);
return NULL;
}

View file

@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned int *lock)
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
#define acpi_ht 0 /* no HT-only mode on IA64 */
#endif
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }

View file

@ -201,7 +201,9 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
relevant until we have real hardware to play with... */
#define ELF_PLATFORM NULL
#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
#define SET_PERSONALITY(ex) \
set_personality((current->personality & ~PER_MASK) | PER_LINUX)
#define elf_read_implies_exec(ex, executable_stack) \
((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)

View file

@ -54,6 +54,7 @@ void __init setup_arch(char **cmdline_p)
microblaze_cache_init();
invalidate_dcache();
enable_dcache();
invalidate_icache();

File diff suppressed because it is too large Load diff

View file

@ -287,9 +287,9 @@ static inline int __cpu_has_fpu(void)
static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
{
#ifdef __NEED_VMBITS_PROBE
write_c0_entryhi(0x3ffffffffffff000ULL);
write_c0_entryhi(0x3fffffffffffe000ULL);
back_to_back_c0_hazard();
c->vmbits = fls64(read_c0_entryhi() & 0x3ffffffffffff000ULL);
c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
#endif
}

View file

@ -1501,6 +1501,7 @@ void __cpuinit per_cpu_trap_init(void)
cp0_perfcount_irq = -1;
} else {
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
cp0_compare_irq_shift = cp0_compare_irq;
cp0_perfcount_irq = -1;
}

View file

@ -174,7 +174,7 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma,
* Probe Octeon's caches
*
*/
static void __devinit probe_octeon(void)
static void __cpuinit probe_octeon(void)
{
unsigned long icache_size;
unsigned long dcache_size;
@ -235,7 +235,7 @@ static void __devinit probe_octeon(void)
* Setup the Octeon cache flush routines
*
*/
void __devinit octeon_cache_init(void)
void __cpuinit octeon_cache_init(void)
{
extern unsigned long ebase;
extern char except_vec2_octeon;

View file

@ -155,7 +155,7 @@ static inline void setup_protection_map(void)
protection_map[15] = PAGE_SHARED;
}
void __devinit cpu_cache_init(void)
void __cpuinit cpu_cache_init(void)
{
if (cpu_has_3k_cache) {
extern void __weak r3k_cache_init(void);

View file

@ -404,7 +404,7 @@ void __init sni_rm200_i8259_irqs(void)
if (!rm200_pic_master)
return;
rm200_pic_slave = ioremap_nocache(0x160000a0, 4);
if (!rm200_pic_master) {
if (!rm200_pic_slave) {
iounmap(rm200_pic_master);
return;
}

View file

@ -468,7 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
tracehook_signal_handler(sig, info, ka, regs, 0);
tracehook_signal_handler(sig, info, ka, regs,
test_thread_flag(TIF_SINGLESTEP) ||
test_thread_flag(TIF_BLOCKSTEP));
return 1;
}

View file

@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
psize = get_slice_psize(mm, addr);
/* Mask the address for the correct page size */
addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
#else
BUG();
psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
#endif
} else
} else {
psize = pte_pagesize_index(mm, addr, pte);
/* Mask the address for the standard page size. If we
* have a 64k page kernel, but the hardware does not
* support 64k pages, this might be different from the
* hardware page size encoded in the slice table. */
addr &= PAGE_MASK;
}
/* Mask the address for the correct page size */
addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
/* Build full vaddr */
if (!is_kernel_addr(addr)) {

View file

@ -338,7 +338,8 @@ static void __init mpc85xx_mds_pic_init(void)
}
mpic = mpic_alloc(np, r.start,
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
MPIC_BROKEN_FRR_NIRQS,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
of_node_put(np);

View file

@ -46,6 +46,7 @@ smp_85xx_kick_cpu(int nr)
__iomem u32 *bptr_vaddr;
struct device_node *np;
int n = 0;
int ioremappable;
WARN_ON (nr < 0 || nr >= NR_CPUS);
@ -59,21 +60,37 @@ smp_85xx_kick_cpu(int nr)
return;
}
/*
* A secondary core could be in a spinloop in the bootpage
* (0xfffff000), somewhere in highmem, or somewhere in lowmem.
* The bootpage and highmem can be accessed via ioremap(), but
* we need to directly access the spinloop if its in lowmem.
*/
ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
/* Map the spin table */
bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
if (ioremappable)
bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
else
bptr_vaddr = phys_to_virt(*cpu_rel_addr);
local_irq_save(flags);
out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
if (!ioremappable)
flush_dcache_range((ulong)bptr_vaddr,
(ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
/* Wait a bit for the CPU to ack. */
while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
mdelay(1);
local_irq_restore(flags);
iounmap(bptr_vaddr);
if (ioremappable)
iounmap(bptr_vaddr);
pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
}

View file

@ -784,9 +784,13 @@ static void xics_set_cpu_priority(unsigned char cppr)
{
struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
BUG_ON(os_cppr->index != 0);
/*
* we only really want to set the priority when there's
* just one cppr value on the stack
*/
WARN_ON(os_cppr->index != 0);
os_cppr->stack[os_cppr->index] = cppr;
os_cppr->stack[0] = cppr;
if (firmware_has_feature(FW_FEATURE_LPAR))
lpar_cppr_info(cppr);
@ -821,8 +825,14 @@ void xics_setup_cpu(void)
void xics_teardown_cpu(void)
{
struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
int cpu = smp_processor_id();
/*
* we have to reset the cppr index to 0 because we're
* not going to return from the IPI
*/
os_cppr->index = 0;
xics_set_cpu_priority(0);
/* Clear any pending IPI request */

View file

@ -293,12 +293,12 @@ struct _lowcore
__u64 clock_comparator; /* 0x02d0 */
__u32 machine_flags; /* 0x02d8 */
__u32 ftrace_func; /* 0x02dc */
__u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */
__u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */
/* Interrupt response block */
__u8 irb[64]; /* 0x0300 */
__u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */
__u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
/*
* 0xe00 contains the address of the IPL Parameter Information

View file

@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store)
mov #1, r5
call_handle_tlbmiss:
setup_frame_reg
mov.l 1f, r0
mov r5, r8
mov.l @r0, r6
@ -365,6 +364,8 @@ handle_exception:
mov.l @k2, k2 ! read out vector and keep in k2
handle_exception_special:
setup_frame_reg
! Setup return address and jump to exception handler
mov.l 7f, r9 ! fetch return address
stc r2_bank, r0 ! k2 (vector)

View file

@ -540,6 +540,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
mempool_free(frame, dwarf_frame_pool);
}
extern void ret_from_irq(void);
/**
* dwarf_unwind_stack - unwind the stack
*
@ -678,6 +680,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
addr = frame->cfa + reg->addr;
frame->return_addr = __raw_readl(addr);
/*
* Ah, the joys of unwinding through interrupts.
*
* Interrupts are tricky - the DWARF info needs to be _really_
* accurate and unfortunately I'm seeing a lot of bogus DWARF
* info. For example, I've seen interrupts occur in epilogues
* just after the frame pointer (r14) had been restored. The
* problem was that the DWARF info claimed that the CFA could be
* reached by using the value of the frame pointer before it was
* restored.
*
* So until the compiler can be trusted to produce reliable
* DWARF info when it really matters, let's stop unwinding once
* we've calculated the function that was interrupted.
*/
if (prev && prev->pc == (unsigned long)ret_from_irq)
frame->return_addr = 0;
return frame;
bail:

View file

@ -70,8 +70,14 @@ ret_from_exception:
CFI_STARTPROC simple
CFI_DEF_CFA r14, 0
CFI_REL_OFFSET 17, 64
CFI_REL_OFFSET 15, 0
CFI_REL_OFFSET 15, 60
CFI_REL_OFFSET 14, 56
CFI_REL_OFFSET 13, 52
CFI_REL_OFFSET 12, 48
CFI_REL_OFFSET 11, 44
CFI_REL_OFFSET 10, 40
CFI_REL_OFFSET 9, 36
CFI_REL_OFFSET 8, 32
preempt_stop()
ENTRY(ret_from_irq)
!

View file

@ -133,6 +133,8 @@ void user_enable_single_step(struct task_struct *child)
struct pt_regs *regs = child->thread.uregs;
regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
set_tsk_thread_flag(child, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *child)
@ -140,6 +142,8 @@ void user_disable_single_step(struct task_struct *child)
struct pt_regs *regs = child->thread.uregs;
regs->sr &= ~SR_SSTEP;
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
static int genregs_get(struct task_struct *target,
@ -454,6 +458,8 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
regs->regs[9]);
@ -461,8 +467,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[9]);
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
/* Called with interrupts disabled */

View file

@ -118,7 +118,9 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
* clear the TS_RESTORE_SIGMASK flag.
*/
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
tracehook_signal_handler(signr, &info, &ka, regs, 0);
tracehook_signal_handler(signr, &info, &ka, regs,
test_thread_flag(TIF_SINGLESTEP));
return 1;
}
}

View file

@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
* Set some valid stack frames to give to the child.
*/
childstack = (struct sparc_stackf __user *)
(sp & ~0x7UL);
(sp & ~0xfUL);
parentstack = (struct sparc_stackf __user *)
regs->u_regs[UREG_FP];

View file

@ -398,11 +398,11 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
} else
__get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
/* Now 8-byte align the stack as this is mandatory in the
* Sparc ABI due to how register windows work. This hides
* the restriction from thread libraries etc. -DaveM
/* Now align the stack as this is mandatory in the Sparc ABI
* due to how register windows work. This hides the
* restriction from thread libraries etc.
*/
csp &= ~7UL;
csp &= ~15UL;
distance = fp - psp;
rval = (csp - distance);

View file

@ -120,8 +120,8 @@ struct rt_signal_frame32 {
};
/* Align macros */
#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7)))
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7)))
#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15)))
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15)))
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
@ -420,15 +420,17 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns
sp = current->sas_ss_sp + current->sas_ss_size;
}
sp -= framesize;
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
sp &= ~7UL;
sp &= ~15UL;
return (void __user *)(sp - framesize);
return (void __user *) sp;
}
static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)

View file

@ -267,15 +267,17 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re
sp = current->sas_ss_sp + current->sas_ss_size;
}
sp -= framesize;
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
sp &= ~7UL;
sp &= ~15UL;
return (void __user *)(sp - framesize);
return (void __user *) sp;
}
static inline int

View file

@ -353,7 +353,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
/* Checks if the fp is valid */
static int invalid_frame_pointer(void __user *fp, int fplen)
{
if (((unsigned long) fp) & 7)
if (((unsigned long) fp) & 15)
return 1;
return 0;
}
@ -396,15 +396,17 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
sp = current->sas_ss_sp + current->sas_ss_size;
}
sp -= framesize;
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
sp &= ~7UL;
sp &= ~15UL;
return (void __user *)(sp - framesize);
return (void __user *) sp;
}
static inline void

View file

@ -170,10 +170,7 @@ static inline void elf_common_init(struct thread_struct *t,
}
#define ELF_PLAT_INIT(_r, load_addr) \
do { \
elf_common_init(&current->thread, _r, 0); \
clear_thread_flag(TIF_IA32); \
} while (0)
elf_common_init(&current->thread, _r, 0)
#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
elf_common_init(&current->thread, regs, __USER_DS)

View file

@ -11,9 +11,9 @@
#include <linux/irqflags.h>
/* entries in ARCH_DLINFO: */
#ifdef CONFIG_IA32_EMULATION
#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
# define AT_VECTOR_SIZE_ARCH 2
#else
#else /* else it's non-compat x86-64 */
# define AT_VECTOR_SIZE_ARCH 1
#endif

View file

@ -1185,9 +1185,6 @@ static void __init acpi_process_madt(void)
if (!error) {
acpi_lapic = 1;
#ifdef CONFIG_X86_BIGSMP
generic_bigsmp_probe();
#endif
/*
* Parse MADT IO-APIC entries
*/
@ -1197,8 +1194,6 @@ static void __init acpi_process_madt(void)
acpi_ioapic = 1;
smp_found_config = 1;
if (apic->setup_apic_routing)
apic->setup_apic_routing();
}
}
if (error == -EINVAL) {
@ -1347,14 +1342,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
},
},
{
.callback = force_acpi_ht,
.ident = "ASUS P2B-DS",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
},
},
{
.callback = force_acpi_ht,
.ident = "ASUS CUR-DLS",

View file

@ -1641,9 +1641,7 @@ int __init APIC_init_uniprocessor(void)
#endif
enable_IR_x2apic();
#ifdef CONFIG_X86_64
default_setup_apic_routing();
#endif
verify_local_APIC();
connect_bsp_APIC();
@ -1891,21 +1889,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
if (apicid > max_physical_apicid)
max_physical_apicid = apicid;
#ifdef CONFIG_X86_32
if (num_processors > 8) {
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
if (!APIC_XAPIC(version)) {
def_to_bigsmp = 0;
break;
}
/* If P4 and above fall through */
case X86_VENDOR_AMD:
def_to_bigsmp = 1;
}
}
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;

View file

@ -52,7 +52,32 @@ static int __init print_ipi_mode(void)
}
late_initcall(print_ipi_mode);
void default_setup_apic_routing(void)
void __init default_setup_apic_routing(void)
{
int version = apic_version[boot_cpu_physical_apicid];
if (num_possible_cpus() > 8) {
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
if (!APIC_XAPIC(version)) {
def_to_bigsmp = 0;
break;
}
/* If P4 and above fall through */
case X86_VENDOR_AMD:
def_to_bigsmp = 1;
}
}
#ifdef CONFIG_X86_BIGSMP
generic_bigsmp_probe();
#endif
if (apic->setup_apic_routing)
apic->setup_apic_routing();
}
static void setup_apic_flat_routing(void)
{
#ifdef CONFIG_X86_IO_APIC
printk(KERN_INFO
@ -103,7 +128,7 @@ struct apic apic_default = {
.init_apic_ldr = default_init_apic_ldr,
.ioapic_phys_id_map = default_ioapic_phys_id_map,
.setup_apic_routing = default_setup_apic_routing,
.setup_apic_routing = setup_apic_flat_routing,
.multi_timer_check = NULL,
.apicid_to_node = default_apicid_to_node,
.cpu_to_logical_apicid = default_cpu_to_logical_apicid,

View file

@ -67,7 +67,7 @@ void __init default_setup_apic_routing(void)
}
#endif
if (apic == &apic_flat && num_processors > 8)
if (apic == &apic_flat && num_possible_cpus() > 8)
apic = &apic_physflat;
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);

View file

@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
kfree(data->powernow_table);
kfree(data);
per_cpu(powernow_data, pol->cpu) = NULL;
return 0;
}
@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
int err;
if (!data)
return -EINVAL;
return 0;
smp_call_function_single(cpu, query_values_on_cpu, &err, true);
if (err)

View file

@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
x86_init.mpparse.mpc_record(1);
}
#ifdef CONFIG_X86_BIGSMP
generic_bigsmp_probe();
#endif
if (apic->setup_apic_routing)
apic->setup_apic_routing();
if (!num_processors)
printk(KERN_ERR "MPTABLE: no processors registered!\n");
return num_processors;

View file

@ -527,6 +527,7 @@ void set_personality_ia32(void)
/* Make sure to be in 32bit mode */
set_thread_flag(TIF_IA32);
current->personality |= force_personality32;
/* Prepare the first "return" to user space */
current_thread_info()->status |= TS_COMPAT;

View file

@ -1083,9 +1083,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
set_cpu_sibling_map(0);
enable_IR_x2apic();
#ifdef CONFIG_X86_64
default_setup_apic_routing();
#endif
if (smp_sanity_check(max_cpus) < 0) {
printk(KERN_INFO "SMP disabled\n");

View file

@ -467,6 +467,9 @@ static int pit_ioport_read(struct kvm_io_device *this,
return -EOPNOTSUPP;
addr &= KVM_PIT_CHANNEL_MASK;
if (addr == 3)
return 0;
s = &pit_state->channels[addr];
mutex_lock(&pit_state->lock);

View file

@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
static int version;
struct pvclock_wall_clock wc;
struct timespec now, sys, boot;
struct timespec boot;
if (!wall_clock)
return;
@ -685,9 +685,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
* wall clock specified here. guest system time equals host
* system time for us, thus we must fill in host boot time here.
*/
now = current_kernel_time();
ktime_get_ts(&sys);
boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
getboottime(&boot);
wc.sec = boot.tv_sec;
wc.nsec = boot.tv_nsec;
@ -762,6 +760,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
local_irq_save(flags);
kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
ktime_get_ts(&ts);
monotonic_to_bootbased(&ts);
local_irq_restore(flags);
/* With all the info we got, fill in the values */

View file

@ -18,7 +18,7 @@ static inline pte_t gup_get_pte(pte_t *ptep)
#else
/*
* With get_user_pages_fast, we walk down the pagetables without taking
* any locks. For this we would like to load the pointers atoimcally,
* any locks. For this we would like to load the pointers atomically,
* but that is not possible (without expensive cmpxchg8b) on PAE. What
* we do have is the guarantee that a pte will only either go from not
* present to present, or present to not present or both -- it will not

View file

@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4;
*/
#define CFQ_MIN_TT (2)
/*
* Allow merged cfqqs to perform this amount of seeky I/O before
* deciding to break the queues up again.
*/
#define CFQQ_COOP_TOUT (HZ)
#define CFQ_SLICE_SCALE (5)
#define CFQ_HW_QUEUE_MIN (5)
#define CFQ_SERVICE_SHIFT 12
#define CFQQ_SEEK_THR 8 * 1024
#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
#define RQ_CIC(rq) \
((struct cfq_io_context *) (rq)->elevator_private)
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
@ -137,7 +134,6 @@ struct cfq_queue {
u64 seek_total;
sector_t seek_mean;
sector_t last_request_pos;
unsigned long seeky_start;
pid_t pid;
@ -314,6 +310,7 @@ enum cfqq_state_flags {
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
CFQ_CFQQ_FLAG_sync, /* synchronous queue */
CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
};
@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed);
CFQ_CFQQ_FNS(slice_new);
CFQ_CFQQ_FNS(sync);
CFQ_CFQQ_FNS(coop);
CFQ_CFQQ_FNS(split_coop);
CFQ_CFQQ_FNS(deep);
CFQ_CFQQ_FNS(wait_busy);
#undef CFQ_CFQQ_FNS
@ -1565,6 +1563,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_clear_cfqq_wait_request(cfqq);
cfq_clear_cfqq_wait_busy(cfqq);
/*
* If this cfqq is shared between multiple processes, check to
* make sure that those processes are still issuing I/Os within
* the mean seek distance. If not, it may be time to break the
* queues apart again.
*/
if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
cfq_mark_cfqq_split_coop(cfqq);
/*
* store what was left of this slice, if the queue idled/timed out
*/
@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
return cfqd->last_position - blk_rq_pos(rq);
}
#define CFQQ_SEEK_THR 8 * 1024
#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct request *rq, bool for_preempt)
{
@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
total = cfqq->seek_total + (cfqq->seek_samples/2);
do_div(total, cfqq->seek_samples);
cfqq->seek_mean = (sector_t)total;
/*
* If this cfqq is shared between multiple processes, check to
* make sure that those processes are still issuing I/Os within
* the mean seek distance. If not, it may be time to break the
* queues apart again.
*/
if (cfq_cfqq_coop(cfqq)) {
if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
cfqq->seeky_start = jiffies;
else if (!CFQQ_SEEKY(cfqq))
cfqq->seeky_start = 0;
}
}
/*
@ -3453,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
return cic_to_cfqq(cic, 1);
}
static int should_split_cfqq(struct cfq_queue *cfqq)
{
if (cfqq->seeky_start &&
time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
return 1;
return 0;
}
/*
* Returns NULL if a new cfqq should be allocated, or the old cfqq if this
* was the last process referring to said cfqq.
@ -3469,9 +3452,9 @@ static struct cfq_queue *
split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
{
if (cfqq_process_refs(cfqq) == 1) {
cfqq->seeky_start = 0;
cfqq->pid = current->pid;
cfq_clear_cfqq_coop(cfqq);
cfq_clear_cfqq_split_coop(cfqq);
return cfqq;
}
@ -3510,7 +3493,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
/*
* If the queue was seeky for too long, break it apart.
*/
if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
cfqq = split_cfqq(cic, cfqq);
if (!cfqq)

View file

@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle)
struct platform_device *dd;
id = dock_station_count;
memset(&ds, 0, sizeof(ds));
dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
if (IS_ERR(dd))
return PTR_ERR(dd);

View file

@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
(void *)2},
{ set_max_cstate, "Pavilion zv5000", {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
(void *)1},
{ set_max_cstate, "Asus L8400B", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
{},
};

View file

@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
return status;
}
static int early_pdc_done;
void acpi_processor_set_pdc(acpi_handle handle)
{
struct acpi_object_list *obj_list;
@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle)
if (arch_has_acpi_pdc() == false)
return;
if (early_pdc_done)
return;
obj_list = acpi_processor_alloc_pdc();
if (!obj_list)
return;
@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id)
return 0;
}
static int param_early_pdc_optin(char *s)
{
early_pdc_optin = 1;
return 1;
}
__setup("acpi_early_pdc_eval", param_early_pdc_optin);
static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
{
set_early_pdc_optin, "HP Envy", {
@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void)
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
early_init_pdc, NULL, NULL, NULL);
early_pdc_done = 1;
}

View file

@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
if (child)
*child = device;
return 0;
if (device)
return 0;
else
return -ENODEV;
}
/*
* acpi_bus_add and acpi_bus_start
*
* scan a given ACPI tree and (probably recently hot-plugged)
* create and add or starts found devices.
*
* If no devices were found -ENODEV is returned which does not
* mean that this is a real error, there just have been no suitable
* ACPI objects in the table trunk from which the kernel could create
* a device and add/start an appropriate driver.
*/
int
acpi_bus_add(struct acpi_device **child,
struct acpi_device *parent, acpi_handle handle, int type)
@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child,
memset(&ops, 0, sizeof(ops));
ops.acpi_op_add = 1;
acpi_bus_scan(handle, &ops, child);
return 0;
return acpi_bus_scan(handle, &ops, child);
}
EXPORT_SYMBOL(acpi_bus_add);
@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device)
{
struct acpi_bus_ops ops;
if (!device)
return -EINVAL;
memset(&ops, 0, sizeof(ops));
ops.acpi_op_start = 1;
acpi_bus_scan(device->handle, &ops, NULL);
return 0;
return acpi_bus_scan(device->handle, &ops, NULL);
}
EXPORT_SYMBOL(acpi_bus_start);

View file

@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
unsigned long table_end;
acpi_size tbl_size;
if (acpi_disabled)
if (acpi_disabled && !acpi_ht)
return -ENODEV;
if (!handler)
@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
if (acpi_disabled)
if (acpi_disabled && !acpi_ht)
return -ENODEV;
if (!handler)

View file

@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
},
.driver_data = "F.23", /* cutoff BIOS version */
},
/*
* Acer eMachines G725 has the same problem. BIOS
* V1.03 is known to be broken. V3.04 is known to
* work. Inbetween, there are V1.06, V2.06 and V3.03
* that we don't have much idea about. For now,
* blacklist anything older than V3.04.
*/
{
.ident = "G725",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
},
.driver_data = "V3.04", /* cutoff BIOS version */
},
{ } /* terminate list */
};
const struct dmi_system_id *dmi = dmi_first_match(sysids);

View file

@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* write indication (used for PIO/DMA setup), result TF is
* copied back and we don't whine too much about its failure.
*/
tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (scmd->sc_data_direction == DMA_TO_DEVICE)
tf->flags |= ATA_TFLAG_WRITE;

View file

@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
do_write);
}
if (!do_write)
flush_dcache_page(page);
qc->curbytes += qc->sect_size;
qc->cursg_ofs += qc->sect_size;

View file

@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj)
else
pr_debug("class '%s' does not have a release() function, "
"be careful\n", class->name);
kfree(cp);
}
static struct sysfs_ops class_sysfs_ops = {

View file

@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
if (*pos > h->highest_lun)
return 0;
if (drv == NULL) /* it's possible for h->drv[] to have holes. */
return 0;
if (drv->heads == 0)
return 0;

View file

@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
exit:
sdio_release_host(card->func);
kfree(tmpbuf);
return ret;
}

View file

@ -767,16 +767,19 @@ int __init agp_amd64_init(void)
static int __init agp_amd64_mod_init(void)
{
#ifndef MODULE
if (gart_iommu_aperture)
return agp_bridges_found ? 0 : -ENODEV;
#endif
return agp_amd64_init();
}
static void __exit agp_amd64_cleanup(void)
{
#ifndef MODULE
if (gart_iommu_aperture)
return;
#endif
if (aperture_resource)
release_resource(aperture_resource);
pci_unregister_driver(&agp_amd64_pci_driver);

View file

@ -39,12 +39,12 @@
struct tpm_inf_dev {
int iotype;
void __iomem *mem_base; /* MMIO ioremap'd addr */
unsigned long map_base; /* phys MMIO base */
unsigned long map_size; /* MMIO region size */
unsigned int index_off; /* index register offset */
void __iomem *mem_base; /* MMIO ioremap'd addr */
unsigned long map_base; /* phys MMIO base */
unsigned long map_size; /* MMIO region size */
unsigned int index_off; /* index register offset */
unsigned int data_regs; /* Data registers */
unsigned int data_regs; /* Data registers */
unsigned int data_size;
unsigned int config_port; /* IO Port config index reg */
@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = {
.miscdev = {.fops = &inf_ops,},
};
static const struct pnp_device_id tpm_pnp_tbl[] = {
static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
/* Infineon TPMs */
{"IFX0101", 0},
{"IFX0102", 0},
{"", 0}
};
MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
const struct pnp_device_id *dev_id)
@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
!(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
tpm_dev.iotype = TPM_INF_IO_PORT;
tpm_dev.iotype = TPM_INF_IO_PORT;
tpm_dev.config_port = pnp_port_start(dev, 0);
tpm_dev.config_size = pnp_port_len(dev, 0);
@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
goto err_last;
}
} else if (pnp_mem_valid(dev, 0) &&
!(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
!(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
tpm_dev.iotype = TPM_INF_IO_MEM;
tpm_dev.iotype = TPM_INF_IO_MEM;
tpm_dev.map_base = pnp_mem_start(dev, 0);
tpm_dev.map_size = pnp_mem_len(dev, 0);
@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
"product id 0x%02x%02x"
"%s\n",
tpm_dev.iotype == TPM_INF_IO_PORT ?
tpm_dev.config_port :
tpm_dev.map_base + tpm_dev.index_off,
tpm_dev.config_port :
tpm_dev.map_base + tpm_dev.index_off,
tpm_dev.iotype == TPM_INF_IO_PORT ?
tpm_dev.data_regs :
tpm_dev.map_base + tpm_dev.data_regs,
tpm_dev.data_regs :
tpm_dev.map_base + tpm_dev.data_regs,
version[0], version[1],
vendorid[0], vendorid[1],
productid[0], productid[1], chipname);
@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
iounmap(tpm_dev.mem_base);
release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
}
tpm_dev_vendor_release(chip);
tpm_remove_hardware(chip->dev);
}
}
static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
{
struct tpm_chip *chip = pnp_get_drvdata(dev);
int rc;
if (chip) {
u8 savestate[] = {
0, 193, /* TPM_TAG_RQU_COMMAND */
0, 0, 0, 10, /* blob length (in bytes) */
0, 0, 0, 152 /* TPM_ORD_SaveState */
};
dev_info(&dev->dev, "saving TPM state\n");
rc = tpm_inf_send(chip, savestate, sizeof(savestate));
if (rc < 0) {
dev_err(&dev->dev, "error while saving TPM state\n");
return rc;
}
}
return 0;
}
static int tpm_inf_pnp_resume(struct pnp_dev *dev)
{
/* Re-configure TPM after suspending */
tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
tpm_config_out(IOLIMH, TPM_INF_ADDR);
tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
tpm_config_out(IOLIML, TPM_INF_ADDR);
tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
/* activate register */
tpm_config_out(TPM_DAR, TPM_INF_ADDR);
tpm_config_out(0x01, TPM_INF_DATA);
tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
/* disable RESET, LP and IRQC */
tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
return tpm_pm_resume(&dev->dev);
}
static struct pnp_driver tpm_inf_pnp_driver = {
.name = "tpm_inf_pnp",
.driver = {
.owner = THIS_MODULE,
.suspend = tpm_pm_suspend,
.resume = tpm_pm_resume,
},
.id_table = tpm_pnp_tbl,
.id_table = tpm_inf_pnp_tbl,
.probe = tpm_inf_pnp_probe,
.remove = __devexit_p(tpm_inf_pnp_remove),
.suspend = tpm_inf_pnp_suspend,
.resume = tpm_inf_pnp_resume,
.remove = __devexit_p(tpm_inf_pnp_remove)
};
static int __init init_inf(void)
@ -638,5 +673,5 @@ module_exit(cleanup_inf);
MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
MODULE_VERSION("1.9");
MODULE_VERSION("1.9.2");
MODULE_LICENSE("GPL");

View file

@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on)
pid = task_pid(current);
type = PIDTYPE_PID;
}
retval = __f_setown(filp, pid, type, 0);
get_pid(pid);
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
retval = __f_setown(filp, pid, type, 0);
put_pid(pid);
if (retval)
goto out;
} else {

View file

@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
(dbs_tuners_ins.up_threshold -
dbs_tuners_ins.down_differential);
if (freq_next < policy->min)
freq_next = policy->min;
if (!dbs_tuners_ins.powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
CPUFREQ_RELATION_L);

View file

@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data)
cohd_fin->pending_irqs--;
cohc->completed = cohd_fin->desc.cookie;
BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
if (cohc->nbr_active_done == 0)
return;

View file

@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device)
chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex);
device_unregister(&chan->dev->device);
free_percpu(chan->local);
}
}
EXPORT_SYMBOL(dma_async_device_unregister);

View file

@ -467,7 +467,7 @@ static int dmatest_func(void *data)
if (iterations > 0)
while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit);
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
interruptible_sleep_on(&wait_dmatest_exit);
}

View file

@ -249,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
if (is_ioat_active(status) || is_ioat_idle(status))
ioat_suspend(chan);
while (is_ioat_active(status) || is_ioat_idle(status)) {
if (end && time_after(jiffies, end)) {
if (tmo && time_after(jiffies, end)) {
err = -ETIMEDOUT;
break;
}

View file

@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
* @buffer_n: buffer number to update.
* 0 or 1 are the only valid values.
* @phyaddr: buffer physical address.
* @return: Returns 0 on success or negative error code on failure. This
* function will fail if the buffer is set to ready.
*/
/* Called under spin_lock(_irqsave)(&ichan->lock) */
static int ipu_update_channel_buffer(struct idmac_channel *ichan,
int buffer_n, dma_addr_t phyaddr)
static void ipu_update_channel_buffer(struct idmac_channel *ichan,
int buffer_n, dma_addr_t phyaddr)
{
enum ipu_channel channel = ichan->dma_chan.chan_id;
uint32_t reg;
@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan,
}
spin_unlock_irqrestore(&ipu_data.lock, flags);
return 0;
}
/* Called under spin_lock_irqsave(&ichan->lock) */
@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
{
unsigned int chan_id = ichan->dma_chan.chan_id;
struct device *dev = &ichan->dma_chan.dev->device;
int ret;
if (async_tx_test_ack(&desc->txd))
return -EINTR;
@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
* could make it conditional on status >= IPU_CHANNEL_ENABLED, but
* doing it again shouldn't hurt either.
*/
ret = ipu_update_channel_buffer(ichan, buf_idx,
sg_dma_address(sg));
if (ret < 0) {
dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
sg, chan_id, buf_idx);
return ret;
}
ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
ipu_select_buffer(chan_id, buf_idx);
dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (likely(sgnew) &&
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
callback = desc->txd.callback;
callback_param = desc->txd.callback_param;
callback = descnew->txd.callback;
callback_param = descnew->txd.callback_param;
spin_unlock(&ichan->lock);
callback(callback_param);
if (callback)
callback(callback_param);
spin_lock(&ichan->lock);
}

View file

@ -2658,10 +2658,11 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
* the memory system completely. A command line option allows to force-enable
* hardware ECC later in amd64_enable_ecc_error_reporting().
*/
static const char *ecc_warning =
"WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n"
" Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n"
" Also, use of the override can cause unknown side effects.\n";
static const char *ecc_msg =
"ECC disabled in the BIOS or no ECC capability, module will not load.\n"
" Either enable ECC checking or force module loading by setting "
"'ecc_enable_override'.\n"
" (Note that use of the override may cause unknown side effects.)\n";
static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
{
@ -2673,7 +2674,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
if (!ecc_enabled)
amd64_printk(KERN_WARNING, "This node reports that Memory ECC "
amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
"is currently disabled, set F3x%x[22] (%s).\n",
K8_NBCFG, pci_name(pvt->misc_f3_ctl));
else
@ -2681,13 +2682,13 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
if (!nb_mce_en)
amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR "
amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
"0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, pvt->mc_node_id);
if (!ecc_enabled || !nb_mce_en) {
if (!ecc_enable_override) {
amd64_printk(KERN_WARNING, "%s", ecc_warning);
amd64_printk(KERN_NOTICE, "%s", ecc_msg);
return -ENODEV;
}
ecc_enable_override = 0;

View file

@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
end <<= (24 - PAGE_SHIFT);
end |= (1 << (24 - PAGE_SHIFT)) - 1;
csrow->first_page = start >> PAGE_SHIFT;
csrow->last_page = end >> PAGE_SHIFT;
csrow->first_page = start;
csrow->last_page = end;
csrow->nr_pages = end + 1 - start;
csrow->grain = 8;
csrow->mtype = mtype;
@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
mpc85xx_init_csrows(mci);
#ifdef CONFIG_EDAC_DEBUG
edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
#endif
/* store the original error disable bits */
orig_ddr_err_disable =
in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);

View file

@ -893,20 +893,31 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
static struct kmem_cache *fwnet_packet_task_cache;
static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
{
dev_kfree_skb_any(ptask->skb);
kmem_cache_free(fwnet_packet_task_cache, ptask);
}
static int fwnet_send_packet(struct fwnet_packet_task *ptask);
static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
{
struct fwnet_device *dev;
struct fwnet_device *dev = ptask->dev;
unsigned long flags;
dev = ptask->dev;
bool free;
spin_lock_irqsave(&dev->lock, flags);
list_del(&ptask->pt_link);
spin_unlock_irqrestore(&dev->lock, flags);
ptask->outstanding_pkts--; /* FIXME access inside lock */
ptask->outstanding_pkts--;
/* Check whether we or the networking TX soft-IRQ is last user. */
free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link));
if (ptask->outstanding_pkts == 0)
list_del(&ptask->pt_link);
spin_unlock_irqrestore(&dev->lock, flags);
if (ptask->outstanding_pkts > 0) {
u16 dg_size;
@ -951,10 +962,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
}
fwnet_send_packet(ptask);
} else {
dev_kfree_skb_any(ptask->skb);
kmem_cache_free(fwnet_packet_task_cache, ptask);
}
if (free)
fwnet_free_ptask(ptask);
}
static void fwnet_write_complete(struct fw_card *card, int rcode,
@ -977,6 +988,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
unsigned tx_len;
struct rfc2734_header *bufhdr;
unsigned long flags;
bool free;
dev = ptask->dev;
tx_len = ptask->max_payload;
@ -1022,12 +1034,16 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
generation, SCODE_100, 0ULL, ptask->skb->data,
tx_len + 8, fwnet_write_complete, ptask);
/* FIXME race? */
spin_lock_irqsave(&dev->lock, flags);
list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
/* If the AT tasklet already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
if (!free)
list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
goto out;
}
fw_send_request(dev->card, &ptask->transaction,
@ -1035,12 +1051,19 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
ptask->generation, ptask->speed, ptask->fifo_addr,
ptask->skb->data, tx_len, fwnet_write_complete, ptask);
/* FIXME race? */
spin_lock_irqsave(&dev->lock, flags);
list_add_tail(&ptask->pt_link, &dev->sent_list);
/* If the AT tasklet already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
if (!free)
list_add_tail(&ptask->pt_link, &dev->sent_list);
spin_unlock_irqrestore(&dev->lock, flags);
dev->netdev->trans_start = jiffies;
out:
if (free)
fwnet_free_ptask(ptask);
return 0;
}
@ -1298,6 +1321,8 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
spin_unlock_irqrestore(&dev->lock, flags);
ptask->max_payload = max_payload;
INIT_LIST_HEAD(&ptask->pt_link);
fwnet_send_packet(ptask);
return NETDEV_TX_OK;

View file

@ -2101,11 +2101,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
u32 payload_index, payload_end_index, next_page_index;
int page, end_page, i, length, offset;
/*
* FIXME: Cycle lost behavior should be configurable: lose
* packet, retransmit or terminate..
*/
p = packet;
payload_index = payload;
@ -2135,6 +2130,14 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
if (!p->skip) {
d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
d[0].req_count = cpu_to_le16(8);
/*
* Link the skip address to this descriptor itself. This causes
* a context to skip a cycle whenever lost cycles or FIFO
* overruns occur, without dropping the data. The application
* should then decide whether this is an error condition or not.
* FIXME: Make the context's cycle-lost behaviour configurable?
*/
d[0].branch_address = cpu_to_le32(d_bus | z);
header = (__le32 *) &d[1];
header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |

View file

@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
gart_info->table_mask);
(unsigned long long)gart_info->table_mask);
ret = 1;
goto done;
}

View file

@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
return mode;
}
/*
* EDID is delightfully ambiguous about how interlaced modes are to be
* encoded. Our internal representation is of frame height, but some
* HDTV detailed timings are encoded as field height.
*
* The format list here is from CEA, in frame size. Technically we
* should be checking refresh rate too. Whatever.
*/
static void
drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
struct detailed_pixel_timing *pt)
{
int i;
static const struct {
int w, h;
} cea_interlaced[] = {
{ 1920, 1080 },
{ 720, 480 },
{ 1440, 480 },
{ 2880, 480 },
{ 720, 576 },
{ 1440, 576 },
{ 2880, 576 },
};
static const int n_sizes =
sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
if (!(pt->misc & DRM_EDID_PT_INTERLACED))
return;
for (i = 0; i < n_sizes; i++) {
if ((mode->hdisplay == cea_interlaced[i].w) &&
(mode->vdisplay == cea_interlaced[i].h / 2)) {
mode->vdisplay *= 2;
mode->vsync_start *= 2;
mode->vsync_end *= 2;
mode->vtotal *= 2;
mode->vtotal |= 1;
}
}
mode->flags |= DRM_MODE_FLAG_INTERLACE;
}
/**
* drm_mode_detailed - create a new mode from an EDID detailed timing section
* @dev: DRM device (needed to create new mode)
@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
drm_mode_set_name(mode);
if (pt->misc & DRM_EDID_PT_INTERLACED)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
drm_mode_do_interlace_quirk(mode, pt);
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;

View file

@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
wasted += alignment - tmp;
}
if (entry->size >= size + wasted) {
if (entry->size >= size + wasted &&
(entry->start + wasted + size) <= end) {
if (!best_match)
return entry;
if (entry->size < best_size) {

View file

@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
sizeof(struct drm_clip_rect), GFP_KERNEL);
if (cliprects == NULL)
if (cliprects == NULL) {
ret = -ENOMEM;
goto fail_batch_free;
}
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *

View file

@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = {
const static struct intel_device_info intel_pineview_info = {
.is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
.has_pipe_cxsr = 1,
.need_gfx_hws = 1,
.has_hotplug = 1,
};
@ -174,12 +174,42 @@ const static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
static int i915_suspend(struct drm_device *dev, pm_message_t state)
static int i915_drm_freeze(struct drm_device *dev)
{
pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
int error = i915_gem_idle(dev);
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
return error;
}
drm_irq_uninstall(dev);
}
i915_save_state(dev);
return 0;
}
static void i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev || !dev_priv) {
DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
intel_opregion_free(dev, 1);
/* Modeset on resume, not lid events */
dev_priv->modeset_on_lid = 0;
}
static int i915_suspend(struct drm_device *dev, pm_message_t state)
{
int error;
if (!dev || !dev->dev_private) {
DRM_ERROR("dev: %p\n", dev);
DRM_ERROR("DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
@ -187,19 +217,11 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
if (state.event == PM_EVENT_PRETHAW)
return 0;
pci_save_state(dev->pdev);
error = i915_drm_freeze(dev);
if (error)
return error;
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
if (i915_gem_idle(dev))
dev_err(&dev->pdev->dev,
"GEM idle failed, resume may fail\n");
drm_irq_uninstall(dev);
}
i915_save_state(dev);
intel_opregion_free(dev, 1);
i915_drm_suspend(dev);
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
@ -207,45 +229,45 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
pci_set_power_state(dev->pdev, PCI_D3hot);
}
/* Modeset on resume, not lid events */
dev_priv->modeset_on_lid = 0;
return 0;
}
static int i915_resume(struct drm_device *dev)
static int i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
if (pci_enable_device(dev->pdev))
return -1;
pci_set_master(dev->pdev);
i915_restore_state(dev);
intel_opregion_init(dev, 1);
int error = 0;
/* KMS EnterVT equivalent */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
ret = i915_gem_init_ringbuffer(dev);
if (ret != 0)
ret = -1;
error = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
drm_irq_install(dev);
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
}
dev_priv->modeset_on_lid = 0;
return ret;
return error;
}
static int i915_resume(struct drm_device *dev)
{
if (pci_enable_device(dev->pdev))
return -EIO;
pci_set_master(dev->pdev);
i915_restore_state(dev);
intel_opregion_init(dev, 1);
return i915_drm_thaw(dev);
}
/**
@ -386,57 +408,69 @@ i915_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
static int
i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
static int i915_pm_suspend(struct device *dev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int error;
return i915_suspend(dev, state);
}
if (!drm_dev || !drm_dev->dev_private) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
static int
i915_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
error = i915_drm_freeze(drm_dev);
if (error)
return error;
return i915_resume(dev);
}
i915_drm_suspend(drm_dev);
static int
i915_pm_suspend(struct device *dev)
{
return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
}
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
static int
i915_pm_resume(struct device *dev)
{
return i915_pci_resume(to_pci_dev(dev));
}
static int
i915_pm_freeze(struct device *dev)
{
return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
}
static int
i915_pm_thaw(struct device *dev)
{
/* thaw during hibernate, do nothing! */
return 0;
}
static int
i915_pm_poweroff(struct device *dev)
static int i915_pm_resume(struct device *dev)
{
return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE);
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return i915_resume(drm_dev);
}
static int
i915_pm_restore(struct device *dev)
static int i915_pm_freeze(struct device *dev)
{
return i915_pci_resume(to_pci_dev(dev));
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
if (!drm_dev || !drm_dev->dev_private) {
dev_err(dev, "DRM not initialized, aborting suspend.\n");
return -ENODEV;
}
return i915_drm_freeze(drm_dev);
}
static int i915_pm_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
return i915_drm_thaw(drm_dev);
}
static int i915_pm_poweroff(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int error;
error = i915_drm_freeze(drm_dev);
if (!error)
i915_drm_suspend(drm_dev);
return error;
}
const struct dev_pm_ops i915_pm_ops = {
@ -445,7 +479,7 @@ const struct dev_pm_ops i915_pm_ops = {
.freeze = i915_pm_freeze,
.thaw = i915_pm_thaw,
.poweroff = i915_pm_poweroff,
.restore = i915_pm_restore,
.restore = i915_pm_resume,
};
static struct vm_operations_struct i915_gem_vm_ops = {

View file

@ -492,6 +492,15 @@ typedef struct drm_i915_private {
*/
struct list_head flushing_list;
/**
* List of objects currently pending a GPU write flush.
*
* All elements on this list will belong to either the
* active_list or flushing_list, last_rendering_seqno can
* be used to differentiate between the two elements.
*/
struct list_head gpu_write_list;
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
@ -592,6 +601,8 @@ struct drm_i915_gem_object {
/** This object's place on the active/flushing/inactive lists */
struct list_head list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
/** This object's place on the fenced object LRU */
struct list_head fence_list;

View file

@ -1552,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
BUG_ON(!list_empty(&obj_priv->gpu_write_list));
obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
@ -1622,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
&dev_priv->mm.flushing_list, list) {
&dev_priv->mm.gpu_write_list,
gpu_write_list) {
struct drm_gem_object *obj = obj_priv->obj;
if ((obj->write_domain & flush_domains) ==
@ -1630,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = 0;
list_del_init(&obj_priv->gpu_write_list);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
@ -2084,8 +2088,8 @@ static int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
int ret;
uint32_t seqno;
bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
@ -2107,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
if (ret)
return ret;
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
ret = i915_gem_evict_from_inactive_list(dev);
if (ret)
return ret;
@ -2701,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, NULL, obj->write_domain);
obj->write_domain = 0;
BUG_ON(obj->write_domain);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
@ -3564,6 +3570,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t reloc_count = 0, i;
int ret = 0;
if (relocs == NULL)
return 0;
for (i = 0; i < buffer_count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
int unwritten;
@ -3653,7 +3662,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_gem_object *batch_obj;
struct drm_i915_gem_object *obj_priv;
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_gem_relocation_entry *relocs;
struct drm_i915_gem_relocation_entry *relocs = NULL;
int ret = 0, ret2, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains, reloc_index;
@ -3679,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->num_cliprects != 0) {
cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL)
if (cliprects == NULL) {
ret = -ENOMEM;
goto pre_mutex_err;
}
ret = copy_from_user(cliprects,
(struct drm_clip_rect __user *)
@ -3722,6 +3733,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (object_list[i] == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec_list[i].handle, i);
/* prevent error path from reading uninitialized data */
args->buffer_count = i + 1;
ret = -EBADF;
goto err;
}
@ -3730,6 +3743,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (obj_priv->in_execbuffer) {
DRM_ERROR("Object %p appears more than once in object list\n",
object_list[i]);
/* prevent error path from reading uninitialized data */
args->buffer_count = i + 1;
ret = -EBADF;
goto err;
}
@ -3843,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
if (dev->flush_domains)
if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
(void)i915_add_request(dev, file_priv,
dev->flush_domains);
}
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
if (obj->write_domain)
list_move_tail(&obj_priv->gpu_write_list,
&dev_priv->mm.gpu_write_list);
else
list_del_init(&obj_priv->gpu_write_list);
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
@ -3926,6 +3948,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
/* Copy the updated relocations out regardless of current error
* state. Failure to update the relocs would mean that the next
* time userland calls execbuf, it would do so with presumed offset
@ -3940,7 +3963,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = ret2;
}
pre_mutex_err:
drm_free_large(object_list);
kfree(cliprects);
@ -4363,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
obj_priv->obj = obj;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
INIT_LIST_HEAD(&obj_priv->gpu_write_list);
INIT_LIST_HEAD(&obj_priv->fence_list);
obj_priv->madv = I915_MADV_WILLNEED;
@ -4814,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
spin_lock_init(&dev_priv->mm.active_list_lock);
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);

View file

@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
if (de_iir & DE_GSE)
ironlake_opregion_gse_intr(dev);
if (de_iir & DE_PLANEA_FLIP_DONE) {
intel_prepare_page_flip(dev, 0);
intel_finish_page_flip(dev, 0);
}
if (de_iir & DE_PLANEB_FLIP_DONE) {
intel_prepare_page_flip(dev, 1);
intel_finish_page_flip(dev, 1);
}
if (de_iir & DE_PIPEA_VBLANK)
drm_handle_vblank(dev, 0);
if (de_iir & DE_PIPEB_VBLANK)
drm_handle_vblank(dev, 1);
/* check event from PCH */
if ((de_iir & DE_PCH_EVENT) &&
(pch_iir & SDE_HOTPLUG_MASK)) {
@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!(pipeconf & PIPEACONF_ENABLE))
return -EINVAL;
if (IS_IRONLAKE(dev))
return 0;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (IS_I965G(dev))
if (IS_IRONLAKE(dev))
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else if (IS_I965G(dev))
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
else
@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
if (IS_IRONLAKE(dev))
return;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
PIPE_START_VBLANK_INTERRUPT_ENABLE);
if (IS_IRONLAKE(dev))
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
PIPE_START_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
u32 render_mask = GT_USER_INTERRUPT;
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
dev_priv->irq_mask_reg = ~display_mask;
dev_priv->de_irq_enable_reg = display_mask;
dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));

View file

@ -338,6 +338,7 @@
#define FBC_CTL_PERIODIC (1<<30)
#define FBC_CTL_INTERVAL_SHIFT (16)
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
#define FBC_C3_IDLE (1<<13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO (1<<0)
#define FBC_COMMAND 0x0320c

View file

@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
/* disable HPD first */
I915_WRITE(PCH_ADPA, adpa);
(void)I915_READ(PCH_ADPA);
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |

View file

@ -240,33 +240,86 @@ struct intel_limit {
#define IRONLAKE_DOT_MAX 350000
#define IRONLAKE_VCO_MIN 1760000
#define IRONLAKE_VCO_MAX 3510000
#define IRONLAKE_N_MIN 1
#define IRONLAKE_N_MAX 6
#define IRONLAKE_M_MIN 79
#define IRONLAKE_M_MAX 127
#define IRONLAKE_M1_MIN 12
#define IRONLAKE_M1_MAX 22
#define IRONLAKE_M2_MIN 5
#define IRONLAKE_M2_MAX 9
#define IRONLAKE_P_SDVO_DAC_MIN 5
#define IRONLAKE_P_SDVO_DAC_MAX 80
#define IRONLAKE_P_LVDS_MIN 28
#define IRONLAKE_P_LVDS_MAX 112
#define IRONLAKE_P1_MIN 1
#define IRONLAKE_P1_MAX 8
#define IRONLAKE_P2_SDVO_DAC_SLOW 10
#define IRONLAKE_P2_SDVO_DAC_FAST 5
#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
#define IRONLAKE_P_DISPLAY_PORT_MIN 10
#define IRONLAKE_P_DISPLAY_PORT_MAX 20
#define IRONLAKE_P2_DISPLAY_PORT_FAST 10
#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10
#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0
#define IRONLAKE_P1_DISPLAY_PORT_MIN 1
#define IRONLAKE_P1_DISPLAY_PORT_MAX 2
/* We have parameter ranges for different type of outputs. */
/* DAC & HDMI Refclk 120Mhz */
#define IRONLAKE_DAC_N_MIN 1
#define IRONLAKE_DAC_N_MAX 5
#define IRONLAKE_DAC_M_MIN 79
#define IRONLAKE_DAC_M_MAX 127
#define IRONLAKE_DAC_P_MIN 5
#define IRONLAKE_DAC_P_MAX 80
#define IRONLAKE_DAC_P1_MIN 1
#define IRONLAKE_DAC_P1_MAX 8
#define IRONLAKE_DAC_P2_SLOW 10
#define IRONLAKE_DAC_P2_FAST 5
/* LVDS single-channel 120Mhz refclk */
#define IRONLAKE_LVDS_S_N_MIN 1
#define IRONLAKE_LVDS_S_N_MAX 3
#define IRONLAKE_LVDS_S_M_MIN 79
#define IRONLAKE_LVDS_S_M_MAX 118
#define IRONLAKE_LVDS_S_P_MIN 28
#define IRONLAKE_LVDS_S_P_MAX 112
#define IRONLAKE_LVDS_S_P1_MIN 2
#define IRONLAKE_LVDS_S_P1_MAX 8
#define IRONLAKE_LVDS_S_P2_SLOW 14
#define IRONLAKE_LVDS_S_P2_FAST 14
/* LVDS dual-channel 120Mhz refclk */
#define IRONLAKE_LVDS_D_N_MIN 1
#define IRONLAKE_LVDS_D_N_MAX 3
#define IRONLAKE_LVDS_D_M_MIN 79
#define IRONLAKE_LVDS_D_M_MAX 127
#define IRONLAKE_LVDS_D_P_MIN 14
#define IRONLAKE_LVDS_D_P_MAX 56
#define IRONLAKE_LVDS_D_P1_MIN 2
#define IRONLAKE_LVDS_D_P1_MAX 8
#define IRONLAKE_LVDS_D_P2_SLOW 7
#define IRONLAKE_LVDS_D_P2_FAST 7
/* LVDS single-channel 100Mhz refclk */
#define IRONLAKE_LVDS_S_SSC_N_MIN 1
#define IRONLAKE_LVDS_S_SSC_N_MAX 2
#define IRONLAKE_LVDS_S_SSC_M_MIN 79
#define IRONLAKE_LVDS_S_SSC_M_MAX 126
#define IRONLAKE_LVDS_S_SSC_P_MIN 28
#define IRONLAKE_LVDS_S_SSC_P_MAX 112
#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
/* LVDS dual-channel 100Mhz refclk */
#define IRONLAKE_LVDS_D_SSC_N_MIN 1
#define IRONLAKE_LVDS_D_SSC_N_MAX 3
#define IRONLAKE_LVDS_D_SSC_M_MIN 79
#define IRONLAKE_LVDS_D_SSC_M_MAX 126
#define IRONLAKE_LVDS_D_SSC_P_MIN 14
#define IRONLAKE_LVDS_D_SSC_P_MAX 42
#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
/* DisplayPort */
#define IRONLAKE_DP_N_MIN 1
#define IRONLAKE_DP_N_MAX 2
#define IRONLAKE_DP_M_MIN 81
#define IRONLAKE_DP_M_MAX 90
#define IRONLAKE_DP_P_MIN 10
#define IRONLAKE_DP_P_MAX 20
#define IRONLAKE_DP_P2_FAST 10
#define IRONLAKE_DP_P2_SLOW 10
#define IRONLAKE_DP_P2_LIMIT 0
#define IRONLAKE_DP_P1_MIN 1
#define IRONLAKE_DP_P1_MAX 2
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = {
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_sdvo = {
static const intel_limit_t intel_limits_ironlake_dac = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
.n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
.m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
.n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
.m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
.p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX },
.p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
.p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
.p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
.p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
.p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
.p2_slow = IRONLAKE_DAC_P2_SLOW,
.p2_fast = IRONLAKE_DAC_P2_FAST },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_lvds = {
static const intel_limit_t intel_limits_ironlake_single_lvds = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
.n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
.m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
.n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
.m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
.p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX },
.p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
.p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
.p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
.p2_slow = IRONLAKE_P2_LVDS_SLOW,
.p2_fast = IRONLAKE_P2_LVDS_FAST },
.p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
.p2_fast = IRONLAKE_LVDS_S_P2_FAST },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
.n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
.m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
.p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
.p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
.p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
.p2_fast = IRONLAKE_LVDS_D_P2_FAST },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
.n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
.m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
.p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
.p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
.p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
.p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
.n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
.m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
.p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
.p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
.p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
.p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
.find_pll = intel_g4x_find_best_PLL,
};
@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN,
.max = IRONLAKE_VCO_MAX},
.n = { .min = IRONLAKE_N_MIN,
.max = IRONLAKE_N_MAX },
.m = { .min = IRONLAKE_M_MIN,
.max = IRONLAKE_M_MAX },
.n = { .min = IRONLAKE_DP_N_MIN,
.max = IRONLAKE_DP_N_MAX },
.m = { .min = IRONLAKE_DP_M_MIN,
.max = IRONLAKE_DP_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN,
.max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN,
.max = IRONLAKE_M2_MAX },
.p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
.max = IRONLAKE_P_DISPLAY_PORT_MAX },
.p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
.max = IRONLAKE_P1_DISPLAY_PORT_MAX},
.p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
.p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
.p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
.p = { .min = IRONLAKE_DP_P_MIN,
.max = IRONLAKE_DP_P_MAX },
.p1 = { .min = IRONLAKE_DP_P1_MIN,
.max = IRONLAKE_DP_P1_MAX},
.p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
.p2_slow = IRONLAKE_DP_P2_SLOW,
.p2_fast = IRONLAKE_DP_P2_FAST },
.find_pll = intel_find_pll_ironlake_dp,
};
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_ironlake_lvds;
else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
int refclk = 120;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
refclk = 100;
if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP) {
/* LVDS dual channel */
if (refclk == 100)
limit = &intel_limits_ironlake_dual_lvds_100m;
else
limit = &intel_limits_ironlake_dual_lvds;
} else {
if (refclk == 100)
limit = &intel_limits_ironlake_single_lvds_100m;
else
limit = &intel_limits_ironlake_single_lvds;
}
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
HAS_eDP)
limit = &intel_limits_ironlake_display_port;
else
limit = &intel_limits_ironlake_sdvo;
limit = &intel_limits_ironlake_dac;
return limit;
}
@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
if (obj_priv->tiling_mode != I915_TILING_NONE)
@ -1638,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_OFF:
DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
drm_vblank_off(dev, pipe);
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@ -2519,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
sr_entries = roundup(sr_entries / cacheline_size, 1);
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
}
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@ -2562,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
srwm = 1;
srwm &= 0x3f;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@ -2630,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
if (srwm < 0)
srwm = 1;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
} else {
/* Turn off self refresh if both pipes are enabled */
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@ -3949,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
struct drm_gem_object *obj;
struct drm_gem_object *old_fb_obj;
struct drm_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
int pending;
};
@ -3960,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex);
i915_gem_object_unpin(work->obj);
drm_gem_object_unreference(work->obj);
i915_gem_object_unpin(work->old_fb_obj);
drm_gem_object_unreference(work->pending_flip_obj);
drm_gem_object_unreference(work->old_fb_obj);
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@ -3984,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
if (work && !work->pending) {
obj_priv = work->pending_flip_obj->driver_private;
DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
obj_priv,
atomic_read(&obj_priv->pending_flip));
}
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
@ -4004,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev->event_lock, flags);
obj_priv = work->obj->driver_private;
if (atomic_dec_and_test(&obj_priv->pending_flip))
obj_priv = work->pending_flip_obj->driver_private;
/* Initial scanout buffer will have a 0 pending flip count */
if ((atomic_read(&obj_priv->pending_flip) == 0) ||
atomic_dec_and_test(&obj_priv->pending_flip))
DRM_WAKEUP(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
}
@ -4018,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work)
if (intel_crtc->unpin_work) {
intel_crtc->unpin_work->pending = 1;
} else {
DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@ -4035,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
int ret;
int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
int ret, pipesrc;
RING_LOCALS;
work = kzalloc(sizeof *work, GFP_KERNEL);
@ -4047,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->dev = crtc->dev;
intel_fb = to_intel_framebuffer(crtc->fb);
work->obj = intel_fb->obj;
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
mutex_unlock(&dev->struct_mutex);
@ -4066,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
obj->driver_private);
kfree(work);
intel_crtc->unpin_work = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
}
/* Reference the old fb object for the scheduled work. */
drm_gem_object_reference(work->obj);
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(work->old_fb_obj);
drm_gem_object_reference(obj);
crtc->fb = fb;
i915_gem_object_flush_write_domain(obj);
drm_vblank_get(dev, intel_crtc->pipe);
obj_priv = obj->driver_private;
atomic_inc(&obj_priv->pending_flip);
work->pending_flip_obj = obj;
BEGIN_LP_RING(4);
OUT_RING(MI_DISPLAY_FLIP |
@ -4086,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(fb->pitch);
if (IS_I965G(dev)) {
OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
OUT_RING((fb->width << 16) | fb->height);
pipesrc = I915_READ(pipesrc_reg);
OUT_RING(pipesrc & 0x0fff0fff);
} else {
OUT_RING(obj_priv->gtt_offset);
OUT_RING(MI_NOOP);

View file

@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
mutex_lock(&dev->struct_mutex);
ret = i915_gem_object_pin(fbo, PAGE_SIZE);
ret = i915_gem_object_pin(fbo, 64*1024);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;

View file

@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = {
{
.ident = "Samsung SX20S",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"),
DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
},
},
@ -622,6 +622,13 @@ static const struct dmi_system_id bad_lid_status[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
},
},
{
.ident = "Aspire 1810T",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
},
},
{
.ident = "PC-81005",
.matches = {
@ -629,6 +636,13 @@ static const struct dmi_system_id bad_lid_status[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
},
},
{
.ident = "Clevo M5x0N",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
},
},
{ }
};
@ -643,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
{
enum drm_connector_status status = connector_status_connected;
if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
status = connector_status_disconnected;
return status;

View file

@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
} else if (flags & SDVO_OUTPUT_CVBS0) {
sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
sdvo_priv->is_tv = true;
intel_output->needs_tv_clock = true;
intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
} else if (flags & SDVO_OUTPUT_LVDS0) {
sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;

View file

@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
{
int result;
if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
&result))
return -ENODEV;
NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
if (result & 0x1) { /* Stamina mode - disable the external GPU */
if (result) { /* Ensure that the external GPU is enabled */
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
NULL);
} else { /* Stamina mode - disable the external GPU */
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
NULL);
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
NULL);
} else { /* Ensure that the external GPU is enabled */
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
NULL);
}
return 0;

View file

@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
if (dev_priv->card_type >= NV_50)
if (dev_priv->card_type >= NV_40)
return 1;
/*
@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct init_exec iexec = {true, false};
struct nvbios *bios = &dev_priv->VBIOS;
uint8_t *table = &bios->data[bios->display.script_table_ptr];
uint8_t *otable = NULL;
@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
}
bios->display.output = dcbent;
if (pxclk == 0) {
script = ROM16(otable[6]);
if (!script) {
@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
parse_init_table(bios, script, &iexec);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -1) {
script = ROM16(otable[8]);
@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
parse_init_table(bios, script, &iexec);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -2) {
if (table[4] >= 12)
@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
parse_init_table(bios, script, &iexec);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk > 0) {
script = ROM16(otable[table[4] + i*6 + 2]);
@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
parse_init_table(bios, script, &iexec);
nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk < 0) {
script = ROM16(otable[table[4] + i*6 + 4]);
@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
parse_init_table(bios, script, &iexec);
nouveau_bios_run_init_table(dev, script, dcbent);
}
return 0;
@ -5865,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct nvbios *bios = &dev_priv->VBIOS;
struct init_exec iexec = { true, false };
mutex_lock(&bios->lock);
bios->display.output = dcbent;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
mutex_unlock(&bios->lock);
}
static bool NVInitVBIOS(struct drm_device *dev)
@ -5876,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
struct nvbios *bios = &dev_priv->VBIOS;
memset(bios, 0, sizeof(struct nvbios));
mutex_init(&bios->lock);
bios->dev = dev;
if (!NVShadowVBIOS(dev, bios->data))

View file

@ -205,6 +205,8 @@ struct nvbios {
struct drm_device *dev;
struct nouveau_bios_info pub;
struct mutex lock;
uint8_t data[NV_PROM_SIZE];
unsigned int length;
bool execute;

Some files were not shown because too many files have changed in this diff Show more