Merge branch 'merge' into next

This commit is contained in:
Benjamin Herrenschmidt 2009-05-29 13:54:52 +10:00
commit 435462c6e6
269 changed files with 11366 additions and 1208 deletions

View file

@ -133,4 +133,4 @@ RAM/SWAP in 10240 inodes and it is only accessible by root.
Author:
Christoph Rohland <cr@sap.com>, 1.12.01
Updated:
Hugh Dickins <hugh@veritas.com>, 4 June 2007
Hugh Dickins, 4 June 2007

View file

@ -1542,6 +1542,10 @@ and is between 256 and 4096 characters. It is defined in the file
register save and restore. The kernel will only save
legacy floating-point registers on task switch.
noxsave [BUGS=X86] Disables x86 extended register state save
and restore using xsave. The kernel will fallback to
enabling legacy floating-point and sse state.
nohlt [BUGS=ARM,SH] Tells the kernel that the sleep(SH) or
wfi(ARM) instruction doesn't work correctly and not to
use it. This is also useful when using JTAG debugger.

View file

@ -1266,13 +1266,22 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
sctp_wmem - vector of 3 INTEGERs: min, default, max
See tcp_wmem for a description.
UNDOCUMENTED:
/proc/sys/net/core/*
dev_weight FIXME
dev_weight - INTEGER
The maximum number of packets that kernel can handle on a NAPI
interrupt, it's a Per-CPU variable.
Default: 64
/proc/sys/net/unix/*
max_dgram_qlen FIXME
max_dgram_qlen - INTEGER
The maximum length of dgram socket receive queue
Default: 10
UNDOCUMENTED:
/proc/sys/net/irda/*
fast_poll_increase FIXME

View file

@ -334,6 +334,7 @@ STAC9227/9228/9229/927x
ref-no-jd Reference board without HP/Mic jack detection
3stack D965 3stack
5stack D965 5stack + SPDIF
5stack-no-fp D965 5stack without front panel
dell-3stack Dell Dimension E520
dell-bios Fixes with Dell BIOS setup
auto BIOS setup (default)

View file

@ -1132,17 +1132,17 @@ F: fs/bfs/
F: include/linux/bfs_fs.h
BLACKFIN ARCHITECTURE
P: Bryan Wu
M: cooloney@kernel.org
P: Mike Frysinger
M: vapier@gentoo.org
L: uclinux-dist-devel@blackfin.uclinux.org
W: http://blackfin.uclinux.org
S: Supported
F: arch/blackfin/
BLACKFIN EMAC DRIVER
P: Bryan Wu
M: cooloney@kernel.org
L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only)
P: Michael Hennerich
M: michael.hennerich@analog.com
L: uclinux-dist-devel@blackfin.uclinux.org
W: http://blackfin.uclinux.org
S: Supported
F: drivers/net/bfin_mac.*
@ -1150,7 +1150,7 @@ F: drivers/net/bfin_mac.*
BLACKFIN RTC DRIVER
P: Mike Frysinger
M: vapier.adi@gmail.com
L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only)
L: uclinux-dist-devel@blackfin.uclinux.org
W: http://blackfin.uclinux.org
S: Supported
F: drivers/rtc/rtc-bfin.c
@ -1158,7 +1158,7 @@ F: drivers/rtc/rtc-bfin.c
BLACKFIN SERIAL DRIVER
P: Sonic Zhang
M: sonic.zhang@analog.com
L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only)
L: uclinux-dist-devel@blackfin.uclinux.org
W: http://blackfin.uclinux.org
S: Supported
F: drivers/serial/bfin_5xx.c
@ -1166,7 +1166,7 @@ F: drivers/serial/bfin_5xx.c
BLACKFIN WATCHDOG DRIVER
P: Mike Frysinger
M: vapier.adi@gmail.com
L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only)
L: uclinux-dist-devel@blackfin.uclinux.org
W: http://blackfin.uclinux.org
S: Supported
F: drivers/watchdog/bfin_wdt.c
@ -1174,7 +1174,7 @@ F: drivers/watchdog/bfin_wdt.c
BLACKFIN I2C TWI DRIVER
P: Sonic Zhang
M: sonic.zhang@analog.com
L: uclinux-dist-devel@blackfin.uclinux.org (subscribers-only)
L: uclinux-dist-devel@blackfin.uclinux.org
W: http://blackfin.uclinux.org/
S: Supported
F: drivers/i2c/busses/i2c-bfin-twi.c
@ -1431,6 +1431,14 @@ P: Russell King
M: linux@arm.linux.org.uk
F: include/linux/clk.h
CISCO FCOE HBA DRIVER
P: Abhijeet Joglekar
M: abjoglek@cisco.com
P: Joe Eykholt
M: jeykholt@cisco.com
L: linux-scsi@vger.kernel.org
S: Supported
CODA FILE SYSTEM
P: Jan Harkes
M: jaharkes@cs.cmu.edu
@ -5579,6 +5587,14 @@ M: ian@mnementh.co.uk
S: Maintained
F: drivers/mmc/host/tmio_mmc.*
TMPFS (SHMEM FILESYSTEM)
P: Hugh Dickins
M: hugh.dickins@tiscali.co.uk
L: linux-mm@kvack.org
S: Maintained
F: include/linux/shmem_fs.h
F: mm/shmem.c
TPM DEVICE DRIVER
P: Debora Velarde
M: debora@linux.vnet.ibm.com

View file

@ -1,8 +1,8 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 30
EXTRAVERSION = -rc6
NAME = Vindictive Armadillo
EXTRAVERSION = -rc7
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"

View file

@ -273,6 +273,7 @@ config ARCH_EP93XX
select HAVE_CLK
select COMMON_CLKDEV
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_HOLES_MEMORYMODEL
help
This enables support for the Cirrus EP93xx series of CPUs.
@ -976,10 +977,9 @@ config OABI_COMPAT
UNPREDICTABLE (in fact it can be predicted that it won't work
at all). If in doubt say Y.
config ARCH_FLATMEM_HAS_HOLES
config ARCH_HAS_HOLES_MEMORYMODEL
bool
default y
depends on FLATMEM
default n
# Discontigmem is deprecated
config ARCH_DISCONTIGMEM_ENABLE

View file

@ -253,9 +253,9 @@ void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
}
#ifdef CONFIG_SMP
void gic_raise_softirq(cpumask_t cpumask, unsigned int irq)
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
unsigned long map = *cpus_addr(cpumask);
unsigned long map = *cpus_addr(*mask);
/* this always happens on GIC0 */
writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);

View file

@ -36,7 +36,7 @@
void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start);
void gic_cpu_init(unsigned int gic_nr, void __iomem *base);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
void gic_raise_softirq(cpumask_t cpumask, unsigned int irq);
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
#endif
#endif

View file

@ -53,17 +53,12 @@ extern void smp_store_cpu_info(unsigned int cpuid);
/*
* Raise an IPI cross call on CPUs in callmap.
*/
extern void smp_cross_call(cpumask_t callmap);
/*
* Broadcast a timer interrupt to the other CPUs.
*/
extern void smp_send_timer(void);
extern void smp_cross_call(const struct cpumask *mask);
/*
* Broadcast a clock event to other CPUs.
*/
extern void smp_timer_broadcast(cpumask_t mask);
extern void smp_timer_broadcast(const struct cpumask *mask);
/*
* Boot a secondary CPU, and assign it the specified idle task.
@ -102,7 +97,8 @@ extern int platform_cpu_kill(unsigned int cpu);
extern void platform_cpu_enable(unsigned int cpu);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi(cpumask_t mask);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
/*
* Local timer interrupt handling function (can be IPI'ed).

View file

@ -326,14 +326,14 @@ void __init smp_prepare_boot_cpu(void)
per_cpu(cpu_data, cpu).idle = current;
}
static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
{
unsigned long flags;
unsigned int cpu;
local_irq_save(flags);
for_each_cpu_mask(cpu, callmap) {
for_each_cpu(cpu, mask) {
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
spin_lock(&ipi->lock);
@ -344,19 +344,19 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg)
/*
* Call the platform specific cross-CPU call function.
*/
smp_cross_call(callmap);
smp_cross_call(mask);
local_irq_restore(flags);
}
void arch_send_call_function_ipi(cpumask_t mask)
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_ipi_message(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
}
void show_ipi_list(struct seq_file *p)
@ -498,17 +498,10 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs)
void smp_send_reschedule(int cpu)
{
send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
}
void smp_send_timer(void)
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
send_ipi_message(mask, IPI_TIMER);
}
void smp_timer_broadcast(cpumask_t mask)
void smp_timer_broadcast(const struct cpumask *mask)
{
send_ipi_message(mask, IPI_TIMER);
}
@ -517,7 +510,7 @@ void smp_send_stop(void)
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
send_ipi_message(mask, IPI_CPU_STOP);
send_ipi_message(&mask, IPI_CPU_STOP);
}
/*
@ -528,20 +521,17 @@ int setup_profiling_timer(unsigned int multiplier)
return -EINVAL;
}
static int
on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask)
static void
on_each_cpu_mask(void (*func)(void *), void *info, int wait,
const struct cpumask *mask)
{
int ret = 0;
preempt_disable();
ret = smp_call_function_mask(mask, func, info, wait);
if (cpu_isset(smp_processor_id(), mask))
smp_call_function_many(mask, func, info, wait);
if (cpumask_test_cpu(smp_processor_id(), mask))
func(info);
preempt_enable();
return ret;
}
/**********************************************************************/
@ -602,20 +592,17 @@ void flush_tlb_all(void)
void flush_tlb_mm(struct mm_struct *mm)
{
cpumask_t mask = mm->cpu_vm_mask;
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask);
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
cpumask_t mask = vma->vm_mm->cpu_vm_mask;
struct tlb_args ta;
ta.ta_vma = vma;
ta.ta_start = uaddr;
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask);
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask);
}
void flush_tlb_kernel_page(unsigned long kaddr)
@ -630,14 +617,13 @@ void flush_tlb_kernel_page(unsigned long kaddr)
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
cpumask_t mask = vma->vm_mm->cpu_vm_mask;
struct tlb_args ta;
ta.ta_vma = vma;
ta.ta_start = start;
ta.ta_end = end;
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask);
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)

View file

@ -21,15 +21,50 @@
#include <asm/div64.h>
#include <mach/hardware.h>
/*
* The EP93xx has two external crystal oscillators. To generate the
* required high-frequency clocks, the processor uses two phase-locked-
* loops (PLLs) to multiply the incoming external clock signal to much
* higher frequencies that are then divided down by programmable dividers
* to produce the needed clocks. The PLLs operate independently of one
* another.
*/
#define EP93XX_EXT_CLK_RATE 14745600
#define EP93XX_EXT_RTC_RATE 32768
struct clk {
unsigned long rate;
int users;
int sw_locked;
u32 enable_reg;
u32 enable_mask;
unsigned long (*get_rate)(struct clk *clk);
};
static struct clk clk_uart = {
.rate = 14745600,
static unsigned long get_uart_rate(struct clk *clk);
static struct clk clk_uart1 = {
.sw_locked = 1,
.enable_reg = EP93XX_SYSCON_DEVICE_CONFIG,
.enable_mask = EP93XX_SYSCON_DEVICE_CONFIG_U1EN,
.get_rate = get_uart_rate,
};
static struct clk clk_uart2 = {
.sw_locked = 1,
.enable_reg = EP93XX_SYSCON_DEVICE_CONFIG,
.enable_mask = EP93XX_SYSCON_DEVICE_CONFIG_U2EN,
.get_rate = get_uart_rate,
};
static struct clk clk_uart3 = {
.sw_locked = 1,
.enable_reg = EP93XX_SYSCON_DEVICE_CONFIG,
.enable_mask = EP93XX_SYSCON_DEVICE_CONFIG_U3EN,
.get_rate = get_uart_rate,
};
static struct clk clk_pll1;
static struct clk clk_f;
@ -95,9 +130,9 @@ static struct clk clk_m2m1 = {
{ .dev_id = dev, .con_id = con, .clk = ck }
static struct clk_lookup clocks[] = {
INIT_CK("apb:uart1", NULL, &clk_uart),
INIT_CK("apb:uart2", NULL, &clk_uart),
INIT_CK("apb:uart3", NULL, &clk_uart),
INIT_CK("apb:uart1", NULL, &clk_uart1),
INIT_CK("apb:uart2", NULL, &clk_uart2),
INIT_CK("apb:uart3", NULL, &clk_uart3),
INIT_CK(NULL, "pll1", &clk_pll1),
INIT_CK(NULL, "fclk", &clk_f),
INIT_CK(NULL, "hclk", &clk_h),
@ -125,6 +160,8 @@ int clk_enable(struct clk *clk)
u32 value;
value = __raw_readl(clk->enable_reg);
if (clk->sw_locked)
__raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
__raw_writel(value | clk->enable_mask, clk->enable_reg);
}
@ -138,13 +175,29 @@ void clk_disable(struct clk *clk)
u32 value;
value = __raw_readl(clk->enable_reg);
if (clk->sw_locked)
__raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
__raw_writel(value & ~clk->enable_mask, clk->enable_reg);
}
}
EXPORT_SYMBOL(clk_disable);
static unsigned long get_uart_rate(struct clk *clk)
{
u32 value;
value = __raw_readl(EP93XX_SYSCON_CLOCK_CONTROL);
if (value & EP93XX_SYSCON_CLOCK_UARTBAUD)
return EP93XX_EXT_CLK_RATE;
else
return EP93XX_EXT_CLK_RATE / 2;
}
unsigned long clk_get_rate(struct clk *clk)
{
if (clk->get_rate)
return clk->get_rate(clk);
return clk->rate;
}
EXPORT_SYMBOL(clk_get_rate);
@ -162,7 +215,7 @@ static unsigned long calc_pll_rate(u32 config_word)
unsigned long long rate;
int i;
rate = 14745600;
rate = EP93XX_EXT_CLK_RATE;
rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */
rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */
do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */
@ -195,7 +248,7 @@ static int __init ep93xx_clock_init(void)
value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1);
if (!(value & 0x00800000)) { /* PLL1 bypassed? */
clk_pll1.rate = 14745600;
clk_pll1.rate = EP93XX_EXT_CLK_RATE;
} else {
clk_pll1.rate = calc_pll_rate(value);
}
@ -206,7 +259,7 @@ static int __init ep93xx_clock_init(void)
value = __raw_readl(EP93XX_SYSCON_CLOCK_SET2);
if (!(value & 0x00080000)) { /* PLL2 bypassed? */
clk_pll2.rate = 14745600;
clk_pll2.rate = EP93XX_EXT_CLK_RATE;
} else if (value & 0x00040000) { /* PLL2 enabled? */
clk_pll2.rate = calc_pll_rate(value);
} else {

View file

@ -159,7 +159,10 @@
#define EP93XX_SYSCON_CLOCK_SET1 EP93XX_SYSCON_REG(0x20)
#define EP93XX_SYSCON_CLOCK_SET2 EP93XX_SYSCON_REG(0x24)
#define EP93XX_SYSCON_DEVICE_CONFIG EP93XX_SYSCON_REG(0x80)
#define EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE 0x00800000
#define EP93XX_SYSCON_DEVICE_CONFIG_U3EN (1<<24)
#define EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE (1<<23)
#define EP93XX_SYSCON_DEVICE_CONFIG_U2EN (1<<20)
#define EP93XX_SYSCON_DEVICE_CONFIG_U1EN (1<<18)
#define EP93XX_SYSCON_SWLOCK EP93XX_SYSCON_REG(0xc0)
#define EP93XX_WATCHDOG_BASE (EP93XX_APB_VIRT_BASE + 0x00140000)

View file

@ -121,7 +121,7 @@ static struct clk uartclk = {
.rate = 14745600,
};
static struct clk_lookup lookups[] __initdata = {
static struct clk_lookup lookups[] = {
{ /* UART0 */
.dev_id = "mb:16",
.clk = &uartclk,

View file

@ -18,7 +18,7 @@
/* IO_START and IO_BASE are defined in hardware.h */
#define SYS_CLOCK_START (IO_START + SYS_CLCOK_OFF) /* Physical address */
#define SYS_CLOCK_START (IO_START + SYS_CLOCK_OFF) /* Physical address */
#define SYS_CLOCK_BASE (IO_BASE + SYS_CLOCK_OFF) /* Virtual address */
/* Define the interface to the SYS_CLOCK */

View file

@ -103,10 +103,10 @@ static struct omap_clk omap24xx_clks[] = {
CLK(NULL, "mdm_ick", &mdm_ick, CK_243X),
CLK(NULL, "mdm_osc_ck", &mdm_osc_ck, CK_243X),
/* DSS domain clocks */
CLK(NULL, "dss_ick", &dss_ick, CK_243X | CK_242X),
CLK(NULL, "dss1_fck", &dss1_fck, CK_243X | CK_242X),
CLK(NULL, "dss2_fck", &dss2_fck, CK_243X | CK_242X),
CLK(NULL, "dss_54m_fck", &dss_54m_fck, CK_243X | CK_242X),
CLK("omapfb", "ick", &dss_ick, CK_243X | CK_242X),
CLK("omapfb", "dss1_fck", &dss1_fck, CK_243X | CK_242X),
CLK("omapfb", "dss2_fck", &dss2_fck, CK_243X | CK_242X),
CLK("omapfb", "tv_fck", &dss_54m_fck, CK_243X | CK_242X),
/* L3 domain clocks */
CLK(NULL, "core_l3_ck", &core_l3_ck, CK_243X | CK_242X),
CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_243X | CK_242X),
@ -206,7 +206,7 @@ static struct omap_clk omap24xx_clks[] = {
CLK(NULL, "aes_ick", &aes_ick, CK_243X | CK_242X),
CLK(NULL, "pka_ick", &pka_ick, CK_243X | CK_242X),
CLK(NULL, "usb_fck", &usb_fck, CK_243X | CK_242X),
CLK(NULL, "usbhs_ick", &usbhs_ick, CK_243X),
CLK("musb_hdrc", "ick", &usbhs_ick, CK_243X),
CLK("mmci-omap-hs.0", "ick", &mmchs1_ick, CK_243X),
CLK("mmci-omap-hs.0", "fck", &mmchs1_fck, CK_243X),
CLK("mmci-omap-hs.1", "ick", &mmchs2_ick, CK_243X),

View file

@ -157,7 +157,7 @@ static struct omap_clk omap34xx_clks[] = {
CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck, CK_343X),
CLK(NULL, "ssi_sst_fck", &ssi_sst_fck, CK_343X),
CLK(NULL, "core_l3_ick", &core_l3_ick, CK_343X),
CLK(NULL, "hsotgusb_ick", &hsotgusb_ick, CK_343X),
CLK("musb_hdrc", "ick", &hsotgusb_ick, CK_343X),
CLK(NULL, "sdrc_ick", &sdrc_ick, CK_343X),
CLK(NULL, "gpmc_fck", &gpmc_fck, CK_343X),
CLK(NULL, "security_l3_ick", &security_l3_ick, CK_343X),
@ -197,11 +197,11 @@ static struct omap_clk omap34xx_clks[] = {
CLK("omap_rng", "ick", &rng_ick, CK_343X),
CLK(NULL, "sha11_ick", &sha11_ick, CK_343X),
CLK(NULL, "des1_ick", &des1_ick, CK_343X),
CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck, CK_343X),
CLK(NULL, "dss_tv_fck", &dss_tv_fck, CK_343X),
CLK(NULL, "dss_96m_fck", &dss_96m_fck, CK_343X),
CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck, CK_343X),
CLK(NULL, "dss_ick", &dss_ick, CK_343X),
CLK("omapfb", "dss1_fck", &dss1_alwon_fck, CK_343X),
CLK("omapfb", "tv_fck", &dss_tv_fck, CK_343X),
CLK("omapfb", "video_fck", &dss_96m_fck, CK_343X),
CLK("omapfb", "dss2_fck", &dss2_alwon_fck, CK_343X),
CLK("omapfb", "ick", &dss_ick, CK_343X),
CLK(NULL, "cam_mclk", &cam_mclk, CK_343X),
CLK(NULL, "cam_ick", &cam_ick, CK_343X),
CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X),

View file

@ -2182,7 +2182,7 @@ static struct clk wkup_32k_fck = {
static struct clk gpio1_dbck = {
.name = "gpio1_dbck",
.ops = &clkops_omap2_dflt_wait,
.ops = &clkops_omap2_dflt,
.parent = &wkup_32k_fck,
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
.enable_bit = OMAP3430_EN_GPIO1_SHIFT,
@ -2427,7 +2427,7 @@ static struct clk per_32k_alwon_fck = {
static struct clk gpio6_dbck = {
.name = "gpio6_dbck",
.ops = &clkops_omap2_dflt_wait,
.ops = &clkops_omap2_dflt,
.parent = &per_32k_alwon_fck,
.enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
.enable_bit = OMAP3430_EN_GPIO6_SHIFT,
@ -2437,7 +2437,7 @@ static struct clk gpio6_dbck = {
static struct clk gpio5_dbck = {
.name = "gpio5_dbck",
.ops = &clkops_omap2_dflt_wait,
.ops = &clkops_omap2_dflt,
.parent = &per_32k_alwon_fck,
.enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
.enable_bit = OMAP3430_EN_GPIO5_SHIFT,
@ -2447,7 +2447,7 @@ static struct clk gpio5_dbck = {
static struct clk gpio4_dbck = {
.name = "gpio4_dbck",
.ops = &clkops_omap2_dflt_wait,
.ops = &clkops_omap2_dflt,
.parent = &per_32k_alwon_fck,
.enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
.enable_bit = OMAP3430_EN_GPIO4_SHIFT,
@ -2457,7 +2457,7 @@ static struct clk gpio4_dbck = {
static struct clk gpio3_dbck = {
.name = "gpio3_dbck",
.ops = &clkops_omap2_dflt_wait,
.ops = &clkops_omap2_dflt,
.parent = &per_32k_alwon_fck,
.enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
.enable_bit = OMAP3430_EN_GPIO3_SHIFT,
@ -2467,7 +2467,7 @@ static struct clk gpio3_dbck = {
static struct clk gpio2_dbck = {
.name = "gpio2_dbck",
.ops = &clkops_omap2_dflt_wait,
.ops = &clkops_omap2_dflt,
.parent = &per_32k_alwon_fck,
.enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
.enable_bit = OMAP3430_EN_GPIO2_SHIFT,

View file

@ -354,10 +354,12 @@ static void omap_init_mcspi(void)
platform_device_register(&omap2_mcspi1);
platform_device_register(&omap2_mcspi2);
#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
platform_device_register(&omap2_mcspi3);
if (cpu_is_omap2430() || cpu_is_omap343x())
platform_device_register(&omap2_mcspi3);
#endif
#ifdef CONFIG_ARCH_OMAP3
platform_device_register(&omap2_mcspi4);
if (cpu_is_omap343x())
platform_device_register(&omap2_mcspi4);
#endif
}

View file

@ -409,7 +409,7 @@
/* PM_PREPWSTST_CAM specific bits */
/* PM_PWSTCTRL_USBHOST specific bits */
#define OMAP3430ES2_SAVEANDRESTORE_SHIFT (1 << 4)
#define OMAP3430ES2_SAVEANDRESTORE_SHIFT 4
/* RM_RSTST_PER specific bits */

View file

@ -187,7 +187,7 @@ int tusb6010_platform_retime(unsigned is_refclk)
unsigned sysclk_ps;
int status;
if (!refclk_psec || sysclk_ps == 0)
if (!refclk_psec || fclk_ps == 0)
return -ENODEV;
sysclk_ps = is_refclk ? refclk_psec : TUSB6010_OSCCLK_60;

View file

@ -46,6 +46,7 @@
#include <mach/audio.h>
#include <mach/pxafb.h>
#include <mach/i2c.h>
#include <mach/regs-uart.h>
#include <mach/viper.h>
#include <asm/setup.h>

View file

@ -750,14 +750,6 @@ void __init realview_timer_init(unsigned int timer_irq)
{
u32 val;
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
/*
* The dummy clock device has to be registered before the main device
* so that the latter will broadcast the clock events
*/
local_timer_setup();
#endif
/*
* set clock frequency:
* REALVIEW_REFCLK is 32KHz

View file

@ -15,16 +15,9 @@
/*
* We use IRQ1 as the IPI
*/
static inline void smp_cross_call(cpumask_t callmap)
{
gic_raise_softirq(callmap, 1);
}
/*
* Do nothing on MPcore.
*/
static inline void smp_cross_call_done(cpumask_t callmap)
static inline void smp_cross_call(const struct cpumask *mask)
{
gic_raise_softirq(mask, 1);
}
#endif

View file

@ -189,8 +189,10 @@ void __cpuinit local_timer_setup(void)
struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
clk->name = "dummy_timer";
clk->features = CLOCK_EVT_FEAT_DUMMY;
clk->rating = 200;
clk->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DUMMY;
clk->rating = 400;
clk->mult = 1;
clk->set_mode = dummy_timer_set_mode;
clk->broadcast = smp_timer_broadcast;

View file

@ -77,13 +77,6 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
{
trace_hardirqs_off();
/*
* the primary core may have used a "cross call" soft interrupt
* to get this processor out of WFI in the BootMonitor - make
* sure that we are no longer being sent this soft interrupt
*/
smp_cross_call_done(cpumask_of_cpu(cpu));
/*
* if any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
@ -136,7 +129,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
* Use smp_cross_call() for this, since there's little
* point duplicating the code here
*/
smp_cross_call(cpumask_of_cpu(cpu));
smp_cross_call(cpumask_of(cpu));
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
@ -224,11 +217,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (max_cpus > ncores)
max_cpus = ncores;
#ifdef CONFIG_LOCAL_TIMERS
#if defined(CONFIG_LOCAL_TIMERS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
/*
* Enable the local timer for primary CPU. If the device is
* dummy (!CONFIG_LOCAL_TIMERS), it was already registers in
* realview_timer_init
* Enable the local timer or broadcast device for the boot CPU.
*/
local_timer_setup();
#endif

View file

@ -588,8 +588,6 @@ static void __init bast_map_io(void)
s3c_device_nand.dev.platform_data = &bast_nand_info;
s3c_i2c0_set_platdata(&bast_i2c_info);
s3c24xx_init_io(bast_iodesc, ARRAY_SIZE(bast_iodesc));
s3c24xx_init_clocks(0);
s3c24xx_init_uarts(bast_uartcfgs, ARRAY_SIZE(bast_uartcfgs));
@ -602,6 +600,7 @@ static void __init bast_init(void)
sysdev_class_register(&bast_pm_sysclass);
sysdev_register(&bast_pm_sysdev);
s3c_i2c0_set_platdata(&bast_i2c_info);
s3c24xx_fb_set_platdata(&bast_fb_info);
platform_add_devices(bast_devices, ARRAY_SIZE(bast_devices));

View file

@ -413,7 +413,7 @@ static struct clk ref24_clk = {
.rate = 24000000,
};
static struct clk_lookup lookups[] __initdata = {
static struct clk_lookup lookups[] = {
{ /* UART0 */
.dev_id = "dev:f1",
.clk = &ref24_clk,

View file

@ -114,4 +114,8 @@ extern unsigned int SingleCPDO(struct roundingData *roundData,
extern unsigned int DoubleCPDO(struct roundingData *roundData,
const unsigned int opcode, FPREG * rFd);
/* extneded_cpdo.c */
extern unsigned int ExtendedCPDO(struct roundingData *roundData,
const unsigned int opcode, FPREG * rFd);
#endif

View file

@ -27,10 +27,6 @@
#include "fpmodule.inl"
#include "softfloat.h"
#ifdef CONFIG_FPE_NWFPE_XP
extern flag floatx80_is_nan(floatx80);
#endif
unsigned int PerformFLT(const unsigned int opcode);
unsigned int PerformFIX(const unsigned int opcode);

View file

@ -226,6 +226,8 @@ char floatx80_le_quiet( floatx80, floatx80 );
char floatx80_lt_quiet( floatx80, floatx80 );
char floatx80_is_signaling_nan( floatx80 );
extern flag floatx80_is_nan(floatx80);
#endif
static inline flag extractFloat32Sign(float32 a)

View file

@ -206,9 +206,10 @@ void __init omapfb_reserve_sdram(void)
config_invalid = 1;
return;
}
if (rg.paddr)
if (rg.paddr) {
reserve_bootmem(rg.paddr, rg.size, BOOTMEM_DEFAULT);
reserved += rg.size;
reserved += rg.size;
}
omapfb_config.mem_desc.region[i] = rg;
configured_regions++;
}

View file

@ -307,7 +307,7 @@ static inline int gpio_valid(int gpio)
return 0;
if (cpu_is_omap24xx() && gpio < 128)
return 0;
if (cpu_is_omap34xx() && gpio < 160)
if (cpu_is_omap34xx() && gpio < 192)
return 0;
return -1;
}

View file

@ -306,8 +306,6 @@ struct clk s3c24xx_uclk = {
int s3c24xx_register_clock(struct clk *clk)
{
clk->owner = THIS_MODULE;
if (clk->enable == NULL)
clk->enable = clk_null_enable;

View file

@ -1235,7 +1235,7 @@ int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *d
EXPORT_SYMBOL(s3c2410_dma_getposition);
static struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev)
static inline struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev)
{
return container_of(dev, struct s3c2410_dma_chan, dev);
}

View file

@ -57,7 +57,7 @@
#if 1
#define gpio_dbg(x...) do { } while(0)
#else
#define gpio_dbg(x...) printk(KERN_DEBUG ## x)
#define gpio_dbg(x...) printk(KERN_DEBUG x)
#endif
/* The s3c64xx_gpiolib_4bit routines are to control the gpio banks where

View file

@ -61,14 +61,14 @@
#define S3C64XX_GPH7_ADDR_CF1 (0x06 << 28)
#define S3C64XX_GPH7_EINT_G6_7 (0x07 << 28)
#define S3C64XX_GPH8_MMC1_DATA6 (0x02 << 32)
#define S3C64XX_GPH8_MMC2_DATA2 (0x03 << 32)
#define S3C64XX_GPH8_I2S_V40_LRCLK (0x05 << 32)
#define S3C64XX_GPH8_ADDR_CF2 (0x06 << 32)
#define S3C64XX_GPH8_EINT_G6_8 (0x07 << 32)
#define S3C64XX_GPH9_MMC1_DATA7 (0x02 << 36)
#define S3C64XX_GPH9_MMC2_DATA3 (0x03 << 36)
#define S3C64XX_GPH9_I2S_V40_DI (0x05 << 36)
#define S3C64XX_GPH9_EINT_G6_9 (0x07 << 36)
#define S3C64XX_GPH8_MMC1_DATA6 (0x02 << 0)
#define S3C64XX_GPH8_MMC2_DATA2 (0x03 << 0)
#define S3C64XX_GPH8_I2S_V40_LRCLK (0x05 << 0)
#define S3C64XX_GPH8_ADDR_CF2 (0x06 << 0)
#define S3C64XX_GPH8_EINT_G6_8 (0x07 << 0)
#define S3C64XX_GPH9_OUTPUT (0x01 << 4)
#define S3C64XX_GPH9_MMC1_DATA7 (0x02 << 4)
#define S3C64XX_GPH9_MMC2_DATA3 (0x03 << 4)
#define S3C64XX_GPH9_I2S_V40_DI (0x05 << 4)
#define S3C64XX_GPH9_EINT_G6_9 (0x07 << 4)

View file

@ -1 +0,0 @@
+mach

View file

@ -378,8 +378,10 @@
#define __NR_dup3 363
#define __NR_pipe2 364
#define __NR_inotify_init1 365
#define __NR_preadv 366
#define __NR_pwritev 367
#define __NR_syscall 366
#define __NR_syscall 368
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */

1
arch/blackfin/kernel/.gitignore vendored Normal file
View file

@ -0,0 +1 @@
vmlinux.lds

View file

@ -8,9 +8,8 @@
#define strncmp __inline_strncmp
#include <asm/string.h>
#undef strncmp
#include <linux/module.h>
#undef strncmp
int strncmp(const char *cs, const char *ct, size_t count)
{

View file

@ -1581,6 +1581,8 @@ ENTRY(_sys_call_table)
.long _sys_dup3
.long _sys_pipe2
.long _sys_inotify_init1 /* 365 */
.long _sys_preadv
.long _sys_pwritev
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall

View file

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.29
# Tue Mar 24 10:23:20 2009
# Linux kernel version: 2.6.30-rc5
# Mon May 11 09:01:02 2009
#
CONFIG_MICROBLAZE=y
# CONFIG_SWAP is not set
@ -32,6 +32,7 @@ CONFIG_LOCALVERSION_AUTO=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
CONFIG_POSIX_MQUEUE_SYSCTL=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
# CONFIG_TASKSTATS is not set
@ -63,6 +64,7 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_HOTPLUG is not set
CONFIG_PRINTK=y
CONFIG_BUG=y
@ -80,6 +82,8 @@ CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
# CONFIG_MARKERS is not set
# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
@ -92,7 +96,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
# CONFIG_LBD is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
@ -166,6 +169,8 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
CONFIG_UNEVICTABLE_LRU=y
CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1
#
# Exectuable file formats
@ -180,7 +185,6 @@ CONFIG_NET=y
#
# Networking options
#
CONFIG_COMPAT_NET_DEV_OPS=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
@ -232,6 +236,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
@ -244,7 +249,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
# CONFIG_PHONET is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
CONFIG_WIRELESS_OLD_REGULATORY=y
@ -379,6 +383,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ATA is not set
# CONFIG_MD is not set
CONFIG_NETDEVICES=y
CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@ -388,6 +393,7 @@ CONFIG_NETDEVICES=y
# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
# CONFIG_ETHOC is not set
# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
@ -405,7 +411,6 @@ CONFIG_NETDEV_10000=y
#
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
# CONFIG_IWLWIFI_LEDS is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@ -455,6 +460,7 @@ CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_IPMI_HANDLER is not set
CONFIG_HW_RANDOM=y
# CONFIG_HW_RANDOM_TIMERIOMEM is not set
# CONFIG_RTC is not set
# CONFIG_GEN_RTC is not set
# CONFIG_R3964 is not set
@ -525,7 +531,7 @@ CONFIG_USB_SUPPORT=y
#
#
# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
# CONFIG_USB_GADGET is not set
@ -538,6 +544,7 @@ CONFIG_USB_SUPPORT=y
# CONFIG_ACCESSIBILITY is not set
# CONFIG_RTC_CLASS is not set
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
@ -562,6 +569,11 @@ CONFIG_FILE_LOCKING=y
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
#
# Caches
#
# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
#
@ -601,8 +613,13 @@ CONFIG_CRAMFS=y
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_BLOCK=y
# CONFIG_ROMFS_BACKED_BY_MTD is not set
# CONFIG_ROMFS_BACKED_BY_BOTH is not set
CONFIG_ROMFS_ON_BLOCK=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@ -614,7 +631,6 @@ CONFIG_LOCKD_V4=y
CONFIG_NFS_ACL_SUPPORT=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
# CONFIG_SUNRPC_REGISTER_V4 is not set
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
@ -647,6 +663,9 @@ CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_SOFTLOCKUP=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
@ -678,15 +697,8 @@ CONFIG_DEBUG_SG=y
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_FAULT_INJECTION is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
#
# Tracers
#
# CONFIG_SCHED_TRACER is not set
# CONFIG_CONTEXT_SWITCH_TRACER is not set
# CONFIG_BOOT_TRACER is not set
# CONFIG_TRACE_BRANCH_PROFILING is not set
# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
# CONFIG_PAGE_POISONING is not set
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_EARLY_PRINTK=y
CONFIG_HEART_BEAT=y
@ -777,6 +789,7 @@ CONFIG_CRYPTO=y
# Compression
#
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_ZLIB is not set
# CONFIG_CRYPTO_LZO is not set
#
@ -784,6 +797,7 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_HW=y
# CONFIG_BINARY_PRINTF is not set
#
# Library routines
@ -797,8 +811,8 @@ CONFIG_GENERIC_FIND_LAST_BIT=y
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_PLIST=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y

View file

@ -137,8 +137,8 @@ void __init init_IRQ(void)
intr_type =
*(int *) of_get_property(intc, "xlnx,kind-of-intr", NULL);
if (intr_type >= (1 << nr_irq))
printk(KERN_INFO " ERROR: Mishmash in king-of-intr param\n");
if (intr_type >= (1 << (nr_irq + 1)))
printk(KERN_INFO " ERROR: Mismatch in kind-of-intr param\n");
#ifdef CONFIG_SELFMOD_INTC
selfmod_function((int *) arr_func, intc_baseaddr);

View file

@ -473,12 +473,12 @@ endif
# Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys
#
ifdef CONFIG_SGI_IP28
ifeq ($(call cc-option-yn,-mr10k-cache-barrier=1), n)
$(error gcc doesn't support needed option -mr10k-cache-barrier=1)
ifeq ($(call cc-option-yn,-mr10k-cache-barrier=store), n)
$(error gcc doesn't support needed option -mr10k-cache-barrier=store)
endif
endif
core-$(CONFIG_SGI_IP28) += arch/mips/sgi-ip22/
cflags-$(CONFIG_SGI_IP28) += -mr10k-cache-barrier=1 -I$(srctree)/arch/mips/include/asm/mach-ip28
cflags-$(CONFIG_SGI_IP28) += -mr10k-cache-barrier=store -I$(srctree)/arch/mips/include/asm/mach-ip28
load-$(CONFIG_SGI_IP28) += 0xa800000020004000
#

View file

@ -956,7 +956,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
void __user * __cl_addr = (addr); \
unsigned long __cl_size = (n); \
if (__cl_size && access_ok(VERIFY_WRITE, \
((unsigned long)(__cl_addr)), __cl_size)) \
__cl_addr, __cl_size)) \
__cl_size = __clear_user(__cl_addr, __cl_size); \
__cl_size; \
})

View file

@ -82,8 +82,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
unsigned long flags;
int size;
unsigned long size, flags;
#ifdef DEBUG_TLB
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
@ -121,8 +120,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long flags;
int size;
unsigned long size, flags;
#ifdef DEBUG_TLB
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);

View file

@ -117,8 +117,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
unsigned long flags;
int size;
unsigned long size, flags;
ENTER_CRITICAL(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
@ -160,8 +159,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long flags;
int size;
unsigned long size, flags;
ENTER_CRITICAL(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;

View file

@ -111,8 +111,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
/* Usable for KV1 addresses only! */
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long flags;
int size;
unsigned long size, flags;
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;

View file

@ -148,7 +148,7 @@ static irqreturn_t panel_int(int irq, void *dev_id)
if (sgint->istat1 & SGINT_ISTAT1_PWR) {
/* Wait until interrupt goes away */
disable_irq(SGI_PANEL_IRQ);
disable_irq_nosync(SGI_PANEL_IRQ);
init_timer(&debounce_timer);
debounce_timer.function = debounce;
debounce_timer.expires = jiffies + 5;

View file

@ -53,7 +53,7 @@ static inline void ip32_machine_halt(void)
static void ip32_machine_power_off(void)
{
volatile unsigned char reg_a, xctrl_a, xctrl_b;
unsigned char reg_a, xctrl_a, xctrl_b;
disable_irq(MACEISA_RTC_IRQ);
reg_a = CMOS_READ(RTC_REG_A);
@ -91,9 +91,10 @@ static void blink_timeout(unsigned long data)
static void debounce(unsigned long data)
{
volatile unsigned char reg_a, reg_c, xctrl_a;
unsigned char reg_a, reg_c, xctrl_a;
reg_c = CMOS_READ(RTC_INTR_FLAGS);
reg_a = CMOS_READ(RTC_REG_A);
CMOS_WRITE(reg_a | DS_REGA_DV0, RTC_REG_A);
wbflush();
xctrl_a = CMOS_READ(DS_B1_XCTRL4A);
@ -137,7 +138,7 @@ static inline void ip32_power_button(void)
static irqreturn_t ip32_rtc_int(int irq, void *dev_id)
{
volatile unsigned char reg_c;
unsigned char reg_c;
reg_c = CMOS_READ(RTC_INTR_FLAGS);
if (!(reg_c & RTC_IRQF)) {
@ -145,7 +146,7 @@ static irqreturn_t ip32_rtc_int(int irq, void *dev_id)
"%s: RTC IRQ without RTC_IRQF\n", __func__);
}
/* Wait until interrupt goes away */
disable_irq(MACEISA_RTC_IRQ);
disable_irq_nosync(MACEISA_RTC_IRQ);
init_timer(&debounce_timer);
debounce_timer.function = debounce;
debounce_timer.expires = jiffies + 50;

View file

@ -872,6 +872,18 @@ config TASK_SIZE
default "0x80000000" if PPC_PREP || PPC_8xx
default "0xc0000000"
config CONSISTENT_SIZE_BOOL
bool "Set custom consistent memory pool size"
depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
help
This option allows you to set the size of the
consistent memory pool. This pool of virtual memory
is used to make consistent memory allocations.
config CONSISTENT_SIZE
hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
default "0x00200000" if NOT_COHERENT_CACHE
config PIN_TLB
bool "Pinned Kernel TLBs (860 ONLY)"
depends on ADVANCED_OPTIONS && 8xx

View file

@ -1,13 +1,14 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.29-rc8
# Fri Mar 13 09:28:45 2009
# Linux kernel version: 2.6.30-rc5
# Fri May 15 10:37:00 2009
#
CONFIG_PPC64=y
#
# Processor support
#
CONFIG_PPC_BOOK3S=y
# CONFIG_POWER4_ONLY is not set
CONFIG_POWER3=y
CONFIG_POWER4=y
@ -55,9 +56,11 @@ CONFIG_OF=y
# CONFIG_GENERIC_TBSYNC is not set
CONFIG_AUDIT_ARCH=y
CONFIG_GENERIC_BUG=y
CONFIG_DTC=y
# CONFIG_DEFAULT_UIMAGE is not set
# CONFIG_PPC_DCR_NATIVE is not set
# CONFIG_PPC_DCR_MMIO is not set
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
@ -72,6 +75,7 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
CONFIG_POSIX_MQUEUE_SYSCTL=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
# CONFIG_AUDIT is not set
@ -88,8 +92,7 @@ CONFIG_CLASSIC_RCU=y
CONFIG_LOG_BUF_SHIFT=17
# CONFIG_GROUP_SCHED is not set
# CONFIG_CGROUPS is not set
CONFIG_SYSFS_DEPRECATED=y
CONFIG_SYSFS_DEPRECATED_V2=y
# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
@ -99,6 +102,9 @@ CONFIG_NAMESPACES=y
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@ -107,6 +113,7 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
CONFIG_KALLSYMS_EXTRA_PASS=y
# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@ -138,6 +145,7 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_HAVE_DMA_ATTRS=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
@ -150,7 +158,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
# CONFIG_BLK_DEV_IO_TRACE is not set
CONFIG_BLK_DEV_BSG=y
# CONFIG_BLK_DEV_INTEGRITY is not set
CONFIG_BLOCK_COMPAT=y
@ -172,7 +179,6 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
#
# Platform support
#
CONFIG_PPC_MULTIPLATFORM=y
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_ISERIES is not set
# CONFIG_PPC_PMAC is not set
@ -209,6 +215,7 @@ CONFIG_SPU_FS_64K_LS=y
# CONFIG_SPU_TRACE is not set
CONFIG_SPU_BASE=y
# CONFIG_PQ2ADS is not set
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
# CONFIG_IPIC is not set
# CONFIG_MPIC is not set
# CONFIG_MPIC_WEIRD is not set
@ -279,11 +286,14 @@ CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_UNEVICTABLE_LRU=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
CONFIG_ARCH_MEMORY_PROBE=y
CONFIG_PPC_HAS_HASH_64K=y
CONFIG_PPC_4K_PAGES=y
# CONFIG_PPC_16K_PAGES is not set
# CONFIG_PPC_64K_PAGES is not set
# CONFIG_PPC_256K_PAGES is not set
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_SCHED_SMT=y
CONFIG_PROC_DEVICETREE=y
@ -316,7 +326,6 @@ CONFIG_NET=y
#
# Networking options
#
CONFIG_COMPAT_NET_DEV_OPS=y
CONFIG_PACKET=y
CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
@ -389,6 +398,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
@ -396,6 +406,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NET_DROP_MONITOR is not set
# CONFIG_HAMRADIO is not set
# CONFIG_CAN is not set
# CONFIG_IRDA is not set
@ -419,11 +430,9 @@ CONFIG_BT_HCIBTUSB=m
# CONFIG_BT_HCIBFUSB is not set
# CONFIG_BT_HCIVHCI is not set
# CONFIG_AF_RXRPC is not set
# CONFIG_PHONET is not set
CONFIG_WIRELESS=y
CONFIG_CFG80211=m
# CONFIG_CFG80211_REG_DEBUG is not set
CONFIG_NL80211=y
# CONFIG_WIRELESS_OLD_REGULATORY is not set
CONFIG_WIRELESS_EXT=y
# CONFIG_WIRELESS_EXT_SYSFS is not set
@ -602,6 +611,7 @@ CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SCSI_SRP_ATTRS is not set
# CONFIG_SCSI_LOWLEVEL is not set
# CONFIG_SCSI_DH is not set
# CONFIG_SCSI_OSD_INITIATOR is not set
# CONFIG_ATA is not set
CONFIG_MD=y
# CONFIG_BLK_DEV_MD is not set
@ -616,6 +626,7 @@ CONFIG_BLK_DEV_DM=m
# CONFIG_DM_UEVENT is not set
# CONFIG_MACINTOSH_DRIVERS is not set
CONFIG_NETDEVICES=y
CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@ -625,6 +636,8 @@ CONFIG_NETDEVICES=y
# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
CONFIG_MII=m
# CONFIG_ETHOC is not set
# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
@ -646,12 +659,13 @@ CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE=y
CONFIG_WLAN_80211=y
# CONFIG_LIBERTAS is not set
# CONFIG_LIBERTAS_THINFIRM is not set
# CONFIG_AT76C50X_USB is not set
# CONFIG_USB_ZD1201 is not set
# CONFIG_USB_NET_RNDIS_WLAN is not set
# CONFIG_RTL8187 is not set
# CONFIG_MAC80211_HWSIM is not set
# CONFIG_P54_COMMON is not set
# CONFIG_IWLWIFI_LEDS is not set
# CONFIG_AR9170_USB is not set
# CONFIG_HOSTAP is not set
# CONFIG_B43 is not set
# CONFIG_B43LEGACY is not set
@ -673,6 +687,7 @@ CONFIG_USB_PEGASUS=m
CONFIG_USB_USBNET=m
CONFIG_USB_NET_AX8817X=m
# CONFIG_USB_NET_CDCETHER is not set
# CONFIG_USB_NET_CDC_EEM is not set
# CONFIG_USB_NET_DM9601 is not set
# CONFIG_USB_NET_SMSC95XX is not set
# CONFIG_USB_NET_GL620A is not set
@ -724,28 +739,7 @@ CONFIG_INPUT_EVDEV=m
#
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_JOYSTICK=y
# CONFIG_JOYSTICK_ANALOG is not set
# CONFIG_JOYSTICK_A3D is not set
# CONFIG_JOYSTICK_ADI is not set
# CONFIG_JOYSTICK_COBRA is not set
# CONFIG_JOYSTICK_GF2K is not set
# CONFIG_JOYSTICK_GRIP is not set
# CONFIG_JOYSTICK_GRIP_MP is not set
# CONFIG_JOYSTICK_GUILLEMOT is not set
# CONFIG_JOYSTICK_INTERACT is not set
# CONFIG_JOYSTICK_SIDEWINDER is not set
# CONFIG_JOYSTICK_TMDC is not set
# CONFIG_JOYSTICK_IFORCE is not set
# CONFIG_JOYSTICK_WARRIOR is not set
# CONFIG_JOYSTICK_MAGELLAN is not set
# CONFIG_JOYSTICK_SPACEORB is not set
# CONFIG_JOYSTICK_SPACEBALL is not set
# CONFIG_JOYSTICK_STINGER is not set
# CONFIG_JOYSTICK_TWIDJOY is not set
# CONFIG_JOYSTICK_ZHENHUA is not set
# CONFIG_JOYSTICK_JOYDUMP is not set
# CONFIG_JOYSTICK_XPAD is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
@ -864,6 +858,7 @@ CONFIG_FB_PS3_DEFAULT_SIZE_M=9
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FB_METRONOME is not set
# CONFIG_FB_MB862XX is not set
# CONFIG_FB_BROADSHEET is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
@ -934,15 +929,17 @@ CONFIG_USB_HIDDEV=y
#
# Special HID drivers
#
# CONFIG_HID_COMPAT is not set
# CONFIG_HID_A4TECH is not set
# CONFIG_HID_APPLE is not set
# CONFIG_HID_BELKIN is not set
# CONFIG_HID_CHERRY is not set
# CONFIG_HID_CHICONY is not set
# CONFIG_HID_CYPRESS is not set
# CONFIG_DRAGONRISE_FF is not set
# CONFIG_HID_EZKEY is not set
# CONFIG_HID_KYE is not set
# CONFIG_HID_GYRATION is not set
# CONFIG_HID_KENSINGTON is not set
# CONFIG_HID_LOGITECH is not set
# CONFIG_HID_MICROSOFT is not set
# CONFIG_HID_MONTEREY is not set
@ -950,7 +947,7 @@ CONFIG_USB_HIDDEV=y
# CONFIG_HID_PANTHERLORD is not set
# CONFIG_HID_PETALYNX is not set
# CONFIG_HID_SAMSUNG is not set
# CONFIG_HID_SONY is not set
CONFIG_HID_SONY=m
# CONFIG_HID_SUNPLUS is not set
# CONFIG_GREENASIA_FF is not set
# CONFIG_HID_TOPSEED is not set
@ -1012,11 +1009,11 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_TMC is not set
#
# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
#
# see USB_STORAGE Help for more information
# also be needed; see USB_STORAGE Help for more info
#
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
@ -1058,7 +1055,6 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
# CONFIG_USB_PHIDGET is not set
# CONFIG_USB_IDMOUSE is not set
# CONFIG_USB_FTDI_ELAN is not set
# CONFIG_USB_APPLEDISPLAY is not set
@ -1074,6 +1070,7 @@ CONFIG_USB_STORAGE=m
#
# OTG and related infrastructure
#
# CONFIG_NOP_USB_XCEIV is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@ -1113,8 +1110,10 @@ CONFIG_RTC_INTF_DEV=y
#
# on-CPU RTC drivers
#
CONFIG_RTC_DRV_PPC=m
# CONFIG_RTC_DRV_GENERIC is not set
CONFIG_RTC_DRV_PS3=m
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
@ -1125,6 +1124,7 @@ CONFIG_EXT2_FS=m
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=m
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_XATTR=y
# CONFIG_EXT3_FS_POSIX_ACL is not set
# CONFIG_EXT3_FS_SECURITY is not set
@ -1160,6 +1160,11 @@ CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m
# CONFIG_FUSE_FS is not set
#
# Caches
#
# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
#
@ -1211,6 +1216,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
# CONFIG_NILFS2_FS is not set
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
@ -1223,7 +1229,6 @@ CONFIG_LOCKD_V4=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
CONFIG_SUNRPC_GSS=y
# CONFIG_SUNRPC_REGISTER_V4 is not set
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
@ -1283,6 +1288,7 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
# CONFIG_DLM is not set
CONFIG_BINARY_PRINTF=y
#
# Library routines
@ -1296,15 +1302,16 @@ CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
CONFIG_LZO_COMPRESS=m
CONFIG_LZO_DECOMPRESS=m
CONFIG_PLIST=y
CONFIG_DECOMPRESS_GZIP=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
CONFIG_HAVE_LMB=y
CONFIG_NLATTR=y
#
# Kernel hacking
@ -1322,6 +1329,9 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
CONFIG_SCHED_DEBUG=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
@ -1357,12 +1367,15 @@ CONFIG_DEBUG_LIST=y
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_RING_BUFFER=y
CONFIG_TRACING=y
CONFIG_TRACING_SUPPORT=y
#
# Tracers
@ -1371,18 +1384,21 @@ CONFIG_TRACING=y
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
# CONFIG_CONTEXT_SWITCH_TRACER is not set
# CONFIG_EVENT_TRACER is not set
# CONFIG_BOOT_TRACER is not set
# CONFIG_TRACE_BRANCH_PROFILING is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
# CONFIG_FTRACE_STARTUP_TEST is not set
# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
CONFIG_PRINT_STACK_DEPTH=64
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_DEBUG_PAGEALLOC is not set
# CONFIG_CODE_PATCHING_SELFTEST is not set
# CONFIG_FTR_FIXUP_SELFTEST is not set
# CONFIG_MSI_BITMAP_SELFTEST is not set
@ -1415,10 +1431,12 @@ CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_HASH2=y
CONFIG_CRYPTO_RNG=m
CONFIG_CRYPTO_RNG2=y
CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_MANAGER2=y
CONFIG_CRYPTO_GF128MUL=m
# CONFIG_CRYPTO_NULL is not set
CONFIG_CRYPTO_WORKQUEUE=y
# CONFIG_CRYPTO_CRYPTD is not set
# CONFIG_CRYPTO_AUTHENC is not set
# CONFIG_CRYPTO_TEST is not set
@ -1487,6 +1505,7 @@ CONFIG_CRYPTO_SALSA20=m
# Compression
#
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_ZLIB is not set
CONFIG_CRYPTO_LZO=m
#

View file

@ -26,7 +26,9 @@
* allocate the space "normally" and use the cache management functions
* to ensure it is consistent.
*/
extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
struct device;
extern void *__dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp);
extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
@ -37,7 +39,7 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
* Cache coherent cores.
*/
#define __dma_alloc_coherent(gfp, size, handle) NULL
#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
#define __dma_free_coherent(size, addr) ((void)0)
#define __dma_sync(addr, size, rw) ((void)0)
#define __dma_sync_page(pg, off, sz, rw) ((void)0)

View file

@ -14,8 +14,6 @@
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
extern unsigned long FIXADDR_TOP;
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <asm/page.h>
@ -24,6 +22,8 @@ extern unsigned long FIXADDR_TOP;
#include <asm/kmap_types.h>
#endif
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at

View file

@ -10,7 +10,7 @@
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
extern unsigned long ioremap_bot, ioremap_base;
extern unsigned long ioremap_bot;
#ifdef CONFIG_44x
extern int icache_44x_need_flush;
@ -55,9 +55,31 @@ extern int icache_44x_need_flush;
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/*
* This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
* value (for now) on others, from where we can start layout kernel
* virtual space that goes below PKMAP and FIXMAP
*/
#ifdef CONFIG_HIGHMEM
#define KVIRT_TOP PKMAP_BASE
#else
#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
#endif
/*
* ioremap_bot starts at that address. Early ioremaps move down from there,
* until mem_init() at which point this becomes the top of the vmalloc
* and ioremap space
*/
#ifdef CONFIG_NOT_COHERENT_CACHE
#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
#else
#define IOREMAP_TOP KVIRT_TOP
#endif
/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 64MB value just means that there will be a 64MB "hole" after the
* current 16MB value just means that there will be a 64MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced

View file

@ -32,7 +32,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
{
void *ret;
#ifdef CONFIG_NOT_COHERENT_CACHE
ret = __dma_alloc_coherent(size, dma_handle, flag);
ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
if (ret == NULL)
return NULL;
*dma_handle += get_dma_direct_offset(dev);

View file

@ -157,7 +157,7 @@ __ftrace_make_nop(struct module *mod,
* 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
*/
pr_debug("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
/* Find where the trampoline jumps to */
if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
@ -165,7 +165,7 @@ __ftrace_make_nop(struct module *mod,
return -EFAULT;
}
pr_debug(" %08x %08x", jmp[0], jmp[1]);
pr_devel(" %08x %08x", jmp[0], jmp[1]);
/* verify that this is what we expect it to be */
if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
@ -181,18 +181,18 @@ __ftrace_make_nop(struct module *mod,
offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
(int)((short)jmp[1]);
pr_debug(" %x ", offset);
pr_devel(" %x ", offset);
/* get the address this jumps too */
tramp = mod->arch.toc + offset + 32;
pr_debug("toc: %lx", tramp);
pr_devel("toc: %lx", tramp);
if (probe_kernel_read(jmp, (void *)tramp, 8)) {
printk(KERN_ERR "Failed to read %lx\n", tramp);
return -EFAULT;
}
pr_debug(" %08x %08x\n", jmp[0], jmp[1]);
pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
@ -269,7 +269,7 @@ __ftrace_make_nop(struct module *mod,
* 0x4e, 0x80, 0x04, 0x20 bctr
*/
pr_debug("ip:%lx jumps to %lx", ip, tramp);
pr_devel("ip:%lx jumps to %lx", ip, tramp);
/* Find where the trampoline jumps to */
if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
@ -277,7 +277,7 @@ __ftrace_make_nop(struct module *mod,
return -EFAULT;
}
pr_debug(" %08x %08x ", jmp[0], jmp[1]);
pr_devel(" %08x %08x ", jmp[0], jmp[1]);
/* verify that this is what we expect it to be */
if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
@ -293,7 +293,7 @@ __ftrace_make_nop(struct module *mod,
if (tramp & 0x8000)
tramp -= 0x10000;
pr_debug(" %lx ", tramp);
pr_devel(" %lx ", tramp);
if (tramp != addr) {
printk(KERN_ERR
@ -402,7 +402,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
/* ld r2,40(r1) */
op[1] = 0xe8410028;
pr_debug("write to %lx\n", rec->ip);
pr_devel("write to %lx\n", rec->ip);
if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
return -EPERM;
@ -442,7 +442,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return -EINVAL;
}
pr_debug("write to %lx\n", rec->ip);
pr_devel("write to %lx\n", rec->ip);
if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
return -EPERM;
@ -594,7 +594,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
PPC_LONG "2b,4b\n"
".previous"
: [old] "=r" (old), [faulted] "=r" (faulted)
: [old] "=&r" (old), [faulted] "=r" (faulted)
: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);

View file

@ -264,6 +264,7 @@ SECTIONS
*(.data.page_aligned)
}
. = ALIGN(L1_CACHE_BYTES);
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
*(.data.cacheline_aligned)
}

View file

@ -18,7 +18,6 @@ obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
memcpy_64.o usercopy_64.o mem_64.o string.o
obj-$(CONFIG_XMON) += sstep.o
obj-$(CONFIG_KPROBES) += sstep.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
ifeq ($(CONFIG_PPC64),y)
obj-$(CONFIG_SMP) += locks.o

View file

@ -1,237 +0,0 @@
/*
* PowerPC version derived from arch/arm/mm/consistent.c
* Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
*
* Copyright (C) 2000 Russell King
*
* Consistent memory allocators. Used for DMA devices that want to
* share uncached memory with the processor core. The function return
* is the virtual address and 'dma_handle' is the physical address.
* Mostly stolen from the ARM port, with some changes for PowerPC.
* -- Dan
*
* Reorganized to get rid of the arch-specific consistent_* functions
* and provide non-coherent implementations for the DMA API. -Matt
*
* Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
* implementation. This is pulled straight from ARM and barely
* modified. -Matt
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <asm/tlbflush.h>
/*
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
void *
__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
{
struct page *page;
unsigned long order;
int i;
unsigned int nr_pages = PAGE_ALIGN(size)>>PAGE_SHIFT;
unsigned int array_size = nr_pages * sizeof(struct page *);
struct page **pages;
struct page *end;
u64 mask = 0x00ffffff, limit; /* ISA default */
struct vm_struct *area;
BUG_ON(!mem_init_done);
size = PAGE_ALIGN(size);
limit = (mask + 1) & ~mask;
if (limit && size >= limit) {
printk(KERN_WARNING "coherent allocation too big (requested "
"%#x mask %#Lx)\n", size, mask);
return NULL;
}
order = get_order(size);
if (mask != 0xffffffff)
gfp |= GFP_DMA;
page = alloc_pages(gfp, order);
if (!page)
goto no_page;
end = page + (1 << order);
/*
* Invalidate any data that might be lurking in the
* kernel direct-mapped region for device DMA.
*/
{
unsigned long kaddr = (unsigned long)page_address(page);
memset(page_address(page), 0, size);
flush_dcache_range(kaddr, kaddr + size);
}
split_page(page, order);
/*
* Set the "dma handle"
*/
*handle = page_to_phys(page);
area = get_vm_area_caller(size, VM_IOREMAP,
__builtin_return_address(1));
if (!area)
goto out_free_pages;
if (array_size > PAGE_SIZE) {
pages = vmalloc(array_size);
area->flags |= VM_VPAGES;
} else {
pages = kmalloc(array_size, GFP_KERNEL);
}
if (!pages)
goto out_free_area;
area->pages = pages;
area->nr_pages = nr_pages;
for (i = 0; i < nr_pages; i++)
pages[i] = page + i;
if (map_vm_area(area, pgprot_noncached(PAGE_KERNEL), &pages))
goto out_unmap;
/*
* Free the otherwise unused pages.
*/
page += nr_pages;
while (page < end) {
__free_page(page);
page++;
}
return area->addr;
out_unmap:
vunmap(area->addr);
if (array_size > PAGE_SIZE)
vfree(pages);
else
kfree(pages);
goto out_free_pages;
out_free_area:
free_vm_area(area);
out_free_pages:
if (page)
__free_pages(page, order);
no_page:
return NULL;
}
EXPORT_SYMBOL(__dma_alloc_coherent);
/*
* free a page as defined by the above mapping.
*/
void __dma_free_coherent(size_t size, void *vaddr)
{
vfree(vaddr);
}
EXPORT_SYMBOL(__dma_free_coherent);
/*
* make an area consistent.
*/
void __dma_sync(void *vaddr, size_t size, int direction)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
switch (direction) {
case DMA_NONE:
BUG();
case DMA_FROM_DEVICE:
/*
* invalidate only when cache-line aligned otherwise there is
* the potential for discarding uncommitted data from the cache
*/
if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
flush_dcache_range(start, end);
else
invalidate_dcache_range(start, end);
break;
case DMA_TO_DEVICE: /* writeback only */
clean_dcache_range(start, end);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
flush_dcache_range(start, end);
break;
}
}
EXPORT_SYMBOL(__dma_sync);
#ifdef CONFIG_HIGHMEM
/*
* __dma_sync_page() implementation for systems using highmem.
* In this case, each page of a buffer must be kmapped/kunmapped
* in order to have a virtual address for __dma_sync(). This must
* not sleep so kmap_atomic()/kunmap_atomic() are used.
*
* Note: yes, it is possible and correct to have a buffer extend
* beyond the first page.
*/
static inline void __dma_sync_page_highmem(struct page *page,
unsigned long offset, size_t size, int direction)
{
size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
size_t cur_size = seg_size;
unsigned long flags, start, seg_offset = offset;
int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
int seg_nr = 0;
local_irq_save(flags);
do {
start = (unsigned long)kmap_atomic(page + seg_nr,
KM_PPC_SYNC_PAGE) + seg_offset;
/* Sync this buffer segment */
__dma_sync((void *)start, seg_size, direction);
kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
seg_nr++;
/* Calculate next buffer segment size */
seg_size = min((size_t)PAGE_SIZE, size - cur_size);
/* Add the segment size to our running total */
cur_size += seg_size;
seg_offset = 0;
} while (seg_nr < nr_segs);
local_irq_restore(flags);
}
#endif /* CONFIG_HIGHMEM */
/*
* __dma_sync_page makes memory consistent. identical to __dma_sync, but
* takes a struct page instead of a virtual address
*/
void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction)
{
#ifdef CONFIG_HIGHMEM
__dma_sync_page_highmem(page, offset, size, direction);
#else
unsigned long start = (unsigned long)page_address(page) + offset;
__dma_sync((void *)start, size, direction);
#endif
}
EXPORT_SYMBOL(__dma_sync_page);

View file

@ -26,3 +26,4 @@ obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o

View file

@ -0,0 +1,400 @@
/*
* PowerPC version derived from arch/arm/mm/consistent.c
* Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
*
* Copyright (C) 2000 Russell King
*
* Consistent memory allocators. Used for DMA devices that want to
* share uncached memory with the processor core. The function return
* is the virtual address and 'dma_handle' is the physical address.
* Mostly stolen from the ARM port, with some changes for PowerPC.
* -- Dan
*
* Reorganized to get rid of the arch-specific consistent_* functions
* and provide non-coherent implementations for the DMA API. -Matt
*
* Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
* implementation. This is pulled straight from ARM and barely
* modified. -Matt
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/dma-mapping.h>
#include <asm/tlbflush.h>
#include "mmu_decl.h"
/*
* This address range defaults to a value that is safe for all
* platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
* can be further configured for specific applications under
* the "Advanced Setup" menu. -Matt
*/
#define CONSISTENT_BASE (IOREMAP_TOP)
#define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
/*
* This is the page table (2MB) covering uncached, DMA consistent allocations
*/
static DEFINE_SPINLOCK(consistent_lock);
/*
* VM region handling support.
*
* This should become something generic, handling VM region allocations for
* vmalloc and similar (ioremap, module space, etc).
*
* I envisage vmalloc()'s supporting vm_struct becoming:
*
* struct vm_struct {
* struct vm_region region;
* unsigned long flags;
* struct page **pages;
* unsigned int nr_pages;
* unsigned long phys_addr;
* };
*
* get_vm_area() would then call vm_region_alloc with an appropriate
* struct vm_region head (eg):
*
* struct vm_region vmalloc_head = {
* .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
* .vm_start = VMALLOC_START,
* .vm_end = VMALLOC_END,
* };
*
* However, vmalloc_head.vm_start is variable (typically, it is dependent on
* the amount of RAM found at boot time.) I would imagine that get_vm_area()
* would have to initialise this each time prior to calling vm_region_alloc().
*/
struct ppc_vm_region {
struct list_head vm_list;
unsigned long vm_start;
unsigned long vm_end;
};
static struct ppc_vm_region consistent_head = {
.vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
.vm_start = CONSISTENT_BASE,
.vm_end = CONSISTENT_END,
};
static struct ppc_vm_region *
ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
{
unsigned long addr = head->vm_start, end = head->vm_end - size;
unsigned long flags;
struct ppc_vm_region *c, *new;
new = kmalloc(sizeof(struct ppc_vm_region), gfp);
if (!new)
goto out;
spin_lock_irqsave(&consistent_lock, flags);
list_for_each_entry(c, &head->vm_list, vm_list) {
if ((addr + size) < addr)
goto nospc;
if ((addr + size) <= c->vm_start)
goto found;
addr = c->vm_end;
if (addr > end)
goto nospc;
}
found:
/*
* Insert this entry _before_ the one we found.
*/
list_add_tail(&new->vm_list, &c->vm_list);
new->vm_start = addr;
new->vm_end = addr + size;
spin_unlock_irqrestore(&consistent_lock, flags);
return new;
nospc:
spin_unlock_irqrestore(&consistent_lock, flags);
kfree(new);
out:
return NULL;
}
static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
{
struct ppc_vm_region *c;
list_for_each_entry(c, &head->vm_list, vm_list) {
if (c->vm_start == addr)
goto out;
}
c = NULL;
out:
return c;
}
/*
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
void *
__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{
struct page *page;
struct ppc_vm_region *c;
unsigned long order;
u64 mask = ISA_DMA_THRESHOLD, limit;
if (dev) {
mask = dev->coherent_dma_mask;
/*
* Sanity check the DMA mask - it must be non-zero, and
* must be able to be satisfied by a DMA allocation.
*/
if (mask == 0) {
dev_warn(dev, "coherent DMA mask is unset\n");
goto no_page;
}
if ((~mask) & ISA_DMA_THRESHOLD) {
dev_warn(dev, "coherent DMA mask %#llx is smaller "
"than system GFP_DMA mask %#llx\n",
mask, (unsigned long long)ISA_DMA_THRESHOLD);
goto no_page;
}
}
size = PAGE_ALIGN(size);
limit = (mask + 1) & ~mask;
if ((limit && size >= limit) ||
size >= (CONSISTENT_END - CONSISTENT_BASE)) {
printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
size, mask);
return NULL;
}
order = get_order(size);
/* Might be useful if we ever have a real legacy DMA zone... */
if (mask != 0xffffffff)
gfp |= GFP_DMA;
page = alloc_pages(gfp, order);
if (!page)
goto no_page;
/*
* Invalidate any data that might be lurking in the
* kernel direct-mapped region for device DMA.
*/
{
unsigned long kaddr = (unsigned long)page_address(page);
memset(page_address(page), 0, size);
flush_dcache_range(kaddr, kaddr + size);
}
/*
* Allocate a virtual address in the consistent mapping region.
*/
c = ppc_vm_region_alloc(&consistent_head, size,
gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
if (c) {
unsigned long vaddr = c->vm_start;
struct page *end = page + (1 << order);
split_page(page, order);
/*
* Set the "dma handle"
*/
*handle = page_to_phys(page);
do {
SetPageReserved(page);
map_page(vaddr, page_to_phys(page),
pgprot_noncached(PAGE_KERNEL));
page++;
vaddr += PAGE_SIZE;
} while (size -= PAGE_SIZE);
/*
* Free the otherwise unused pages.
*/
while (page < end) {
__free_page(page);
page++;
}
return (void *)c->vm_start;
}
if (page)
__free_pages(page, order);
no_page:
return NULL;
}
EXPORT_SYMBOL(__dma_alloc_coherent);
/*
* free a page as defined by the above mapping.
*/
void __dma_free_coherent(size_t size, void *vaddr)
{
struct ppc_vm_region *c;
unsigned long flags, addr;
size = PAGE_ALIGN(size);
spin_lock_irqsave(&consistent_lock, flags);
c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
if (!c)
goto no_area;
if ((c->vm_end - c->vm_start) != size) {
printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
__func__, c->vm_end - c->vm_start, size);
dump_stack();
size = c->vm_end - c->vm_start;
}
addr = c->vm_start;
do {
pte_t *ptep;
unsigned long pfn;
ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
addr),
addr),
addr);
if (!pte_none(*ptep) && pte_present(*ptep)) {
pfn = pte_pfn(*ptep);
pte_clear(&init_mm, addr, ptep);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
__free_page(page);
}
}
addr += PAGE_SIZE;
} while (size -= PAGE_SIZE);
flush_tlb_kernel_range(c->vm_start, c->vm_end);
list_del(&c->vm_list);
spin_unlock_irqrestore(&consistent_lock, flags);
kfree(c);
return;
no_area:
spin_unlock_irqrestore(&consistent_lock, flags);
printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
__func__, vaddr);
dump_stack();
}
EXPORT_SYMBOL(__dma_free_coherent);
/*
* make an area consistent.
*/
void __dma_sync(void *vaddr, size_t size, int direction)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
switch (direction) {
case DMA_NONE:
BUG();
case DMA_FROM_DEVICE:
/*
* invalidate only when cache-line aligned otherwise there is
* the potential for discarding uncommitted data from the cache
*/
if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
flush_dcache_range(start, end);
else
invalidate_dcache_range(start, end);
break;
case DMA_TO_DEVICE: /* writeback only */
clean_dcache_range(start, end);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
flush_dcache_range(start, end);
break;
}
}
EXPORT_SYMBOL(__dma_sync);
#ifdef CONFIG_HIGHMEM
/*
* __dma_sync_page() implementation for systems using highmem.
* In this case, each page of a buffer must be kmapped/kunmapped
* in order to have a virtual address for __dma_sync(). This must
* not sleep so kmap_atomic()/kunmap_atomic() are used.
*
* Note: yes, it is possible and correct to have a buffer extend
* beyond the first page.
*/
static inline void __dma_sync_page_highmem(struct page *page,
unsigned long offset, size_t size, int direction)
{
size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
size_t cur_size = seg_size;
unsigned long flags, start, seg_offset = offset;
int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
int seg_nr = 0;
local_irq_save(flags);
do {
start = (unsigned long)kmap_atomic(page + seg_nr,
KM_PPC_SYNC_PAGE) + seg_offset;
/* Sync this buffer segment */
__dma_sync((void *)start, seg_size, direction);
kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
seg_nr++;
/* Calculate next buffer segment size */
seg_size = min((size_t)PAGE_SIZE, size - cur_size);
/* Add the segment size to our running total */
cur_size += seg_size;
seg_offset = 0;
} while (seg_nr < nr_segs);
local_irq_restore(flags);
}
#endif /* CONFIG_HIGHMEM */
/*
* __dma_sync_page makes memory consistent. identical to __dma_sync, but
* takes a struct page instead of a virtual address
*/
void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction)
{
#ifdef CONFIG_HIGHMEM
__dma_sync_page_highmem(page, offset, size, direction);
#else
unsigned long start = (unsigned long)page_address(page) + offset;
__dma_sync((void *)start, size, direction);
#endif
}
EXPORT_SYMBOL(__dma_sync_page);

View file

@ -168,12 +168,8 @@ void __init MMU_init(void)
ppc_md.progress("MMU:mapin", 0x301);
mapin_ram();
#ifdef CONFIG_HIGHMEM
ioremap_base = PKMAP_BASE;
#else
ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
#endif /* CONFIG_HIGHMEM */
ioremap_bot = ioremap_base;
/* Initialize early top-down ioremap allocator */
ioremap_bot = IOREMAP_TOP;
/* Map in I/O resources */
if (ppc_md.progress)

View file

@ -380,6 +380,23 @@ void __init mem_init(void)
bsssize >> 10,
initsize >> 10);
#ifdef CONFIG_PPC32
pr_info("Kernel virtual memory layout:\n");
pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
#ifdef CONFIG_HIGHMEM
pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
#endif /* CONFIG_HIGHMEM */
#ifdef CONFIG_NOT_COHERENT_CACHE
pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
#endif /* CONFIG_NOT_COHERENT_CACHE */
pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
ioremap_bot, IOREMAP_TOP);
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
VMALLOC_START, VMALLOC_END);
#endif /* CONFIG_PPC32 */
mem_init_done = 1;
}

View file

@ -127,12 +127,12 @@ static unsigned int steal_context_up(unsigned int id)
pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm);
/* Mark this mm has having no context anymore */
mm->context.id = MMU_NO_CONTEXT;
/* Flush the TLB for that context */
local_flush_tlb_mm(mm);
/* Mark this mm has having no context anymore */
mm->context.id = MMU_NO_CONTEXT;
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
__clear_bit(id, stale_map[cpu]);

View file

@ -219,7 +219,8 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
entry = do_dcache_icache_coherency(entry);
changed = !pte_same(*(ptep), entry);
if (changed) {
assert_pte_locked(vma->vm_mm, address);
if (!(vma->vm_flags & VM_HUGETLB))
assert_pte_locked(vma->vm_mm, address);
__ptep_set_access_flags(ptep, entry);
flush_tlb_page_nohash(vma, address);
}

View file

@ -399,8 +399,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
#endif /* CONFIG_DEBUG_PAGEALLOC */
static int fixmaps;
unsigned long FIXADDR_TOP = (-PAGE_SIZE);
EXPORT_SYMBOL(FIXADDR_TOP);
void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
{

View file

@ -592,3 +592,17 @@ int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel)
}
return irq;
}
static void __devinit quirk_ipr_msi(struct pci_dev *dev)
{
/* Something prevents MSIs from the IPR from working on Bimini,
* and the driver has no smarts to recover. So disable MSI
* on it for now. */
if (machine_is(maple)) {
dev->no_msi = 1;
dev_info(&dev->dev, "Quirk disabled MSI\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
quirk_ipr_msi);

View file

@ -263,6 +263,9 @@ static int camera_probe(void)
struct i2c_msg msg;
int ret;
if (!a)
return -ENODEV;
camera_power(1);
msg.addr = 0x6e;
msg.buf = camera_ncm03j_magic;

View file

@ -498,6 +498,19 @@ config PARAVIRT
over full virtualization. However, when run without a hypervisor
the kernel is theoretically slower and slightly larger.
config PARAVIRT_SPINLOCKS
bool "Paravirtualization layer for spinlocks"
depends on PARAVIRT && SMP && EXPERIMENTAL
---help---
Paravirtualized spinlocks allow a pvops backend to replace the
spinlock implementation with something virtualization-friendly
(for example, block the virtual CPU rather than spinning).
Unfortunately the downside is an up to 5% performance hit on
native kernels, with various workloads.
If you are unsure how to answer this question, answer N.
config PARAVIRT_CLOCK
bool
default n

View file

@ -504,8 +504,11 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
if (sym->st_shndx == SHN_ABS) {
continue;
}
if (r_type == R_386_PC32) {
/* PC relative relocations don't need to be adjusted */
if (r_type == R_386_NONE || r_type == R_386_PC32) {
/*
* NONE can be ignored and and PC relative
* relocations don't need to be adjusted.
*/
}
else if (r_type == R_386_32) {
/* Visit relocations that need to be adjusted */

View file

@ -17,11 +17,6 @@
#define SMAP 0x534d4150 /* ASCII "SMAP" */
struct e820_ext_entry {
struct e820entry std;
u32 ext_flags;
} __attribute__((packed));
static int detect_memory_e820(void)
{
int count = 0;
@ -29,13 +24,21 @@ static int detect_memory_e820(void)
u32 size, id, edi;
u8 err;
struct e820entry *desc = boot_params.e820_map;
static struct e820_ext_entry buf; /* static so it is zeroed */
static struct e820entry buf; /* static so it is zeroed */
/*
* Set this here so that if the BIOS doesn't change this field
* but still doesn't change %ecx, we're still okay...
* Note: at least one BIOS is known which assumes that the
* buffer pointed to by one e820 call is the same one as
* the previous call, and only changes modified fields. Therefore,
* we use a temporary buffer and copy the results entry by entry.
*
* This routine deliberately does not try to account for
* ACPI 3+ extended attributes. This is because there are
* BIOSes in the field which report zero for the valid bit for
* all ranges, and we don't currently make any use of the
* other attribute bits. Revisit this if we see the extended
* attribute bits deployed in a meaningful way in the future.
*/
buf.ext_flags = 1;
do {
size = sizeof buf;
@ -66,13 +69,7 @@ static int detect_memory_e820(void)
break;
}
/* ACPI 3.0 added the extended flags support. If bit 0
in the extended flags is zero, we're supposed to simply
ignore the entry -- a backwards incompatible change! */
if (size > 20 && !(buf.ext_flags & 1))
continue;
*desc++ = buf.std;
*desc++ = buf;
count++;
} while (next && count < ARRAY_SIZE(boot_params.e820_map));

View file

@ -1443,7 +1443,7 @@ u64 _paravirt_ident_64(u64);
#define paravirt_nop ((void *)_paravirt_nop)
#ifdef CONFIG_SMP
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
{

View file

@ -82,22 +82,22 @@ do { \
case 1: \
asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
: "qi" ((T__)(val))); \
break; \
case 2: \
asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
: "ri" ((T__)(val))); \
break; \
case 4: \
asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
: "ri" ((T__)(val))); \
break; \
case 8: \
asm(op "q %1,"__percpu_arg(0) \
: "+m" (var) \
: "re" ((T__)val)); \
: "re" ((T__)(val))); \
break; \
default: __bad_percpu_size(); \
} \
@ -109,7 +109,7 @@ do { \
switch (sizeof(var)) { \
case 1: \
asm(op "b "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "=q" (ret__) \
: "m" (var)); \
break; \
case 2: \

View file

@ -187,14 +187,15 @@ static inline int v8086_mode(struct pt_regs *regs)
/*
* X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
* when it traps. So regs will be the current sp.
* when it traps. The previous stack will be directly underneath the saved
* registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
*
* This is valid only for kernel mode traps.
*/
static inline unsigned long kernel_trap_sp(struct pt_regs *regs)
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
#ifdef CONFIG_X86_32
return (unsigned long)regs;
return (unsigned long)(&regs->sp);
#else
return regs->sp;
#endif

View file

@ -172,7 +172,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
}
#ifndef CONFIG_PARAVIRT
#ifndef CONFIG_PARAVIRT_SPINLOCKS
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
{
@ -206,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
__raw_spin_lock(lock);
}
#endif
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
{

View file

@ -89,7 +89,8 @@ obj-$(CONFIG_DEBUG_NX_TEST) += test_nx.o
obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
obj-$(CONFIG_KVM_GUEST) += kvm.o
obj-$(CONFIG_KVM_CLOCK) += kvmclock.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch_$(BITS).o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o

View file

@ -254,7 +254,7 @@ static int parse_unisys_oem(char *oemptr)
}
#ifdef CONFIG_ACPI
static int find_unisys_acpi_oem_table(unsigned long *oem_addr)
static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
{
struct acpi_table_header *header = NULL;
struct es7000_oem_table *table;
@ -285,7 +285,7 @@ static int find_unisys_acpi_oem_table(unsigned long *oem_addr)
return 0;
}
static void unmap_unisys_acpi_oem_table(unsigned long oem_addr)
static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
{
if (!oem_addr)
return;
@ -306,7 +306,7 @@ static int es7000_check_dsdt(void)
static int es7000_acpi_ret;
/* Hook from generic ACPI tables.c */
static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
unsigned long oem_addr = 0;
int check_dsdt;
@ -717,7 +717,7 @@ struct apic apic_es7000_cluster = {
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
};
struct apic apic_es7000 = {
struct apic __refdata apic_es7000 = {
.name = "es7000",
.probe = probe_es7000,

View file

@ -114,6 +114,13 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
} };
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
static int __init x86_xsave_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
return 1;
}
__setup("noxsave", x86_xsave_setup);
#ifdef CONFIG_X86_32
static int cachesize_override __cpuinitdata = -1;
static int disable_x86_serial_nr __cpuinitdata = 1;

View file

@ -168,6 +168,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
case 0x0E: /* Core */
case 0x0F: /* Core Duo */
case 0x16: /* Celeron Core */
case 0x1C: /* Atom */
p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
case 0x0D: /* Pentium M (Dothan) */

View file

@ -168,10 +168,12 @@ static int check_powernow(void)
return 1;
}
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
static void invalidate_entry(unsigned int entry)
{
powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
}
#endif
static int get_ranges(unsigned char *pst)
{

View file

@ -649,6 +649,20 @@ static void print_basics(struct powernow_k8_data *data)
data->batps);
}
static u32 freq_from_fid_did(u32 fid, u32 did)
{
u32 mhz = 0;
if (boot_cpu_data.x86 == 0x10)
mhz = (100 * (fid + 0x10)) >> did;
else if (boot_cpu_data.x86 == 0x11)
mhz = (100 * (fid + 8)) >> did;
else
BUG();
return mhz * 1000;
}
static int fill_powernow_table(struct powernow_k8_data *data,
struct pst_s *pst, u8 maxvid)
{
@ -923,8 +937,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
powernow_table[i].index = index;
powernow_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000;
/* Frequency may be rounded for these */
if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) {
powernow_table[i].frequency =
freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
} else
powernow_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000;
}
return 0;
}
@ -1215,13 +1234,16 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
return cpufreq_frequency_table_verify(pol, data->powernow_table);
}
static const char ACPI_PSS_BIOS_BUG_MSG[] =
KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n";
/* per CPU init entry point to the driver */
static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data;
cpumask_t oldmask;
int rc;
static int print_once;
if (!cpu_online(pol->cpu))
return -ENODEV;
@ -1244,19 +1266,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
* an UP version, and is deprecated by AMD.
*/
if (num_online_cpus() != 1) {
/*
* Replace this one with print_once as soon as such a
* thing gets introduced
*/
if (!print_once) {
WARN_ONCE(1, KERN_ERR FW_BUG PFX "Your BIOS "
"does not provide ACPI _PSS objects "
"in a way that Linux understands. "
"Please report this to the Linux ACPI"
" maintainers and complain to your "
"BIOS vendor.\n");
print_once++;
}
printk_once(ACPI_PSS_BIOS_BUG_MSG);
goto err_out;
}
if (pol->cpu != 0) {

View file

@ -275,7 +275,11 @@ static void __init print_mtrr_state(void)
}
printk(KERN_DEBUG "MTRR variable ranges %sabled:\n",
mtrr_state.enabled & 2 ? "en" : "dis");
high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
if (size_or_mask & 0xffffffffUL)
high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
else
high_width = ffs(size_or_mask>>32) + 32 - 1;
high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
for (i = 0; i < num_var_ranges; ++i) {
if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n",

View file

@ -442,7 +442,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
_ASM_EXTABLE(1b, 4b)
_ASM_EXTABLE(2b, 4b)
: [old] "=r" (old), [faulted] "=r" (faulted)
: [old] "=&r" (old), [faulted] "=r" (faulted)
: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);

View file

@ -134,7 +134,9 @@ static void *get_call_destination(u8 type)
.pv_irq_ops = pv_irq_ops,
.pv_apic_ops = pv_apic_ops,
.pv_mmu_ops = pv_mmu_ops,
#ifdef CONFIG_PARAVIRT_SPINLOCKS
.pv_lock_ops = pv_lock_ops,
#endif
};
return *((void **)&tmpl + type);
}

View file

@ -232,6 +232,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
},
},
{ /* Handle problems with rebooting on Sony VGN-Z540N */
.callback = set_bios_reboot,
.ident = "Sony VGN-Z540N",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
},
},
{ }
};

View file

@ -160,8 +160,10 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
/*
* If large page isn't supported, there's no benefit in doing
* this. Also, on non-NUMA, embedding is better.
*
* NOTE: disabled for now.
*/
if (!cpu_has_pse || !pcpu_need_numa())
if (true || !cpu_has_pse || !pcpu_need_numa())
return -EINVAL;
/*

View file

@ -2897,8 +2897,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
kvm_x86_ops->tlb_flush(vcpu);
set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
kvm_set_cr3(vcpu, vcpu->arch.cr3);
return 1;
}

View file

@ -338,6 +338,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = vcpu->arch.cr4;
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
if (cr4 & CR4_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
kvm_inject_gp(vcpu, 0);
@ -351,7 +354,8 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
kvm_inject_gp(vcpu, 0);
return;
}
} else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.cr3)) {
printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
kvm_inject_gp(vcpu, 0);

View file

@ -153,7 +153,7 @@ static void __cpa_flush_all(void *arg)
*/
__flush_tlb_all();
if (cache && boot_cpu_data.x86_model >= 4)
if (cache && boot_cpu_data.x86 >= 4)
wbinvd();
}
@ -208,20 +208,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
int in_flags, struct page **pages)
{
unsigned int i, level;
unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
BUG_ON(irqs_disabled());
on_each_cpu(__cpa_flush_range, NULL, 1);
on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
if (!cache)
if (!cache || do_wbinvd)
return;
/* 4M threshold */
if (numpages >= 1024) {
if (boot_cpu_data.x86_model >= 4)
wbinvd();
return;
}
/*
* We only need to flush on one CPU,
* clflush is a MESI-coherent instruction that

View file

@ -76,9 +76,9 @@ void
x86_backtrace(struct pt_regs * const regs, unsigned int depth)
{
struct frame_head *head = (struct frame_head *)frame_pointer(regs);
unsigned long stack = kernel_trap_sp(regs);
if (!user_mode_vm(regs)) {
unsigned long stack = kernel_stack_pointer(regs);
if (depth)
dump_trace(NULL, regs, (unsigned long *)stack, 0,
&backtrace_ops, &depth);

View file

@ -9,5 +9,6 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm.o xen-asm_$(BITS).o \
grant-table.o suspend.o
obj-$(CONFIG_SMP) += smp.o spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o

View file

@ -42,6 +42,7 @@
#include <linux/highmem.h>
#include <linux/debugfs.h>
#include <linux/bug.h>
#include <linux/module.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>

View file

@ -62,15 +62,26 @@ void xen_setup_vcpu_info_placement(void);
#ifdef CONFIG_SMP
void xen_smp_init(void);
void __init xen_init_spinlocks(void);
__cpuinit void xen_init_lock_cpu(int cpu);
void xen_uninit_lock_cpu(int cpu);
extern cpumask_var_t xen_cpu_initialized_map;
#else
static inline void xen_smp_init(void) {}
#endif
#ifdef CONFIG_PARAVIRT_SPINLOCKS
void __init xen_init_spinlocks(void);
__cpuinit void xen_init_lock_cpu(int cpu);
void xen_uninit_lock_cpu(int cpu);
#else
static inline void xen_init_spinlocks(void)
{
}
static inline void xen_init_lock_cpu(int cpu)
{
}
static inline void xen_uninit_lock_cpu(int cpu)
{
}
#endif
/* Declare an asm function, along with symbols needed to make it
inlineable */

View file

@ -221,7 +221,8 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
request_module(name);
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) &&
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
CRYPTO_ALG_NEED_FALLBACK) &&
snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp))
request_module(tmp);

View file

@ -153,7 +153,8 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
if (err)
goto out;
eseqiv_complete2(req);
if (giv != req->giv)
eseqiv_complete2(req);
out:
return err;

View file

@ -357,6 +357,7 @@ static void dpm_power_up(pm_message_t state)
{
struct device *dev;
mutex_lock(&dpm_list_mtx);
list_for_each_entry(dev, &dpm_list, power.entry)
if (dev->power.status > DPM_OFF) {
int error;
@ -366,6 +367,7 @@ static void dpm_power_up(pm_message_t state)
if (error)
pm_dev_err(dev, state, " early", error);
}
mutex_unlock(&dpm_list_mtx);
}
/**
@ -614,6 +616,7 @@ int device_power_down(pm_message_t state)
int error = 0;
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
error = suspend_device_noirq(dev, state);
if (error) {
@ -622,6 +625,7 @@ int device_power_down(pm_message_t state)
}
dev->power.status = DPM_OFF_IRQ;
}
mutex_unlock(&dpm_list_mtx);
if (error)
device_power_up(resume_event(state));
return error;

View file

@ -934,8 +934,6 @@ static void blkfront_closing(struct xenbus_device *dev)
spin_lock_irqsave(&blkif_io_lock, flags);
del_gendisk(info->gd);
/* No more blkif_request(). */
blk_stop_queue(info->rq);
@ -949,6 +947,8 @@ static void blkfront_closing(struct xenbus_device *dev)
blk_cleanup_queue(info->rq);
info->rq = NULL;
del_gendisk(info->gd);
out:
xenbus_frontend_closed(dev);
}
@ -977,8 +977,10 @@ static void backend_changed(struct xenbus_device *dev,
break;
case XenbusStateClosing:
if (info->gd == NULL)
xenbus_dev_fatal(dev, -ENODEV, "gd is NULL");
if (info->gd == NULL) {
xenbus_frontend_closed(dev);
break;
}
bd = bdget_disk(info->gd, 0);
if (bd == NULL)
xenbus_dev_fatal(dev, -ENODEV, "bdget failed");

View file

@ -587,7 +587,7 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct device_node *node = vdev->dev.archdata.of_node;
deviceno = vdev->unit_address;
if (deviceno > VIOCD_MAX_CD)
if (deviceno >= VIOCD_MAX_CD)
return -ENODEV;
if (!node)
return -ENODEV;

Some files were not shown because too many files have changed in this diff Show more