Merge branch 'linus' into perf/core

Merge reason: pick up tools/perf/ changes from upstream.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2009-10-15 08:44:42 +02:00
commit b226f744d4
130 changed files with 1291 additions and 790 deletions

View file

@ -64,14 +64,14 @@ be used to view the printk buffer of a remote machine, even with live update.
Bernhard Kaindl enhanced firescope to support accessing 64-bit machines
from 32-bit firescope and vice versa:
- ftp://ftp.suse.de/private/bk/firewire/tools/firescope-0.2.2.tar.bz2
- http://halobates.de/firewire/firescope-0.2.2.tar.bz2
and he implemented fast system dump (alpha version - read README.txt):
- ftp://ftp.suse.de/private/bk/firewire/tools/firedump-0.1.tar.bz2
- http://halobates.de/firewire/firedump-0.1.tar.bz2
There is also a gdb proxy for firewire which allows to use gdb to access
data which can be referenced from symbols found by gdb in vmlinux:
- ftp://ftp.suse.de/private/bk/firewire/tools/fireproxy-0.33.tar.bz2
- http://halobates.de/firewire/fireproxy-0.33.tar.bz2
The latest version of this gdb proxy (fireproxy-0.34) can communicate (not
yet stable) with kgdb over an memory-based communication module (kgdbom).
@ -178,7 +178,7 @@ Step-by-step instructions for using firescope with early OHCI initialization:
Notes
-----
Documentation and specifications: ftp://ftp.suse.de/private/bk/firewire/docs
Documentation and specifications: http://halobates.de/firewire/
FireWire is a trademark of Apple Inc. - for more information please refer to:
http://en.wikipedia.org/wiki/FireWire

View file

@ -451,3 +451,33 @@ Why: OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR
will also allow making ALSA OSS emulation independent of
sound_core. The dependency will be broken then too.
Who: Tejun Heo <tj@kernel.org>
----------------------------
What: Support for VMware's guest paravirtuliazation technique [VMI] will be
dropped.
When: 2.6.37 or earlier.
Why: With the recent innovations in CPU hardware acceleration technologies
from Intel and AMD, VMware ran a few experiments to compare these
techniques to guest paravirtualization technique on VMware's platform.
These hardware assisted virtualization techniques have outperformed the
performance benefits provided by VMI in most of the workloads. VMware
expects that these hardware features will be ubiquitous in a couple of
years, as a result, VMware has started a phased retirement of this
feature from the hypervisor. We will be removing this feature from the
Kernel too. Right now we are targeting 2.6.37 but can retire earlier if
technical reasons (read opportunity to remove major chunk of pvops)
arise.
Please note that VMI has always been an optimization and non-VMI kernels
still work fine on VMware's platform.
Latest versions of VMware's product which support VMI are,
Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence
releases for these products will continue supporting VMI.
For more details about VMI retirement take a look at this,
http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html
Who: Alok N Kataria <akataria@vmware.com>
----------------------------

View file

@ -123,10 +123,18 @@ resuid=n The user ID which may use the reserved blocks.
sb=n Use alternate superblock at this location.
quota
noquota
grpquota
usrquota
quota These options are ignored by the filesystem. They
noquota are used only by quota tools to recognize volumes
grpquota where quota should be turned on. See documentation
usrquota in the quota-tools package for more details
(http://sourceforge.net/projects/linuxquota).
jqfmt=<quota type> These options tell filesystem details about quota
usrjquota=<file> so that quota information can be properly updated
grpjquota=<file> during journal replay. They replace the above
quota options. See documentation in the quota-tools
package for more details
(http://sourceforge.net/projects/linuxquota).
bh (*) ext3 associates buffer heads to data pages to
nobh (a) cache disk block mapping information

View file

@ -359,6 +359,7 @@ STAC9227/9228/9229/927x
5stack-no-fp D965 5stack without front panel
dell-3stack Dell Dimension E520
dell-bios Fixes with Dell BIOS setup
volknob Fixes with volume-knob widget 0x24
auto BIOS setup (default)
STAC92HD71B*

View file

@ -577,6 +577,11 @@ M: Mike Rapoport <mike@compulab.co.il>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
ARM/CONTEC MICRO9 MACHINE SUPPORT
M: Hubert Feurstein <hubert.feurstein@contec.at>
S: Maintained
F: arch/arm/mach-ep93xx/micro9.c
ARM/CORGI MACHINE SUPPORT
M: Richard Purdie <rpurdie@rpsys.net>
S: Maintained
@ -2610,6 +2615,7 @@ L: linux1394-devel@lists.sourceforge.net
W: http://www.linux1394.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
S: Maintained
F: Documentation/debugging-via-ohci1394.txt
F: drivers/ieee1394/
IEEE 1394 RAW I/O DRIVER
@ -4071,6 +4077,13 @@ M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu>
S: Supported
F: kernel/perf_event.c
F: include/linux/perf_event.h
F: arch/*/*/kernel/perf_event.c
F: arch/*/include/asm/perf_event.h
F: arch/*/lib/perf_event.c
F: arch/*/kernel/perf_callchain.c
F: tools/perf/
PERSONALITY HANDLING
M: Christoph Hellwig <hch@infradead.org>

View file

@ -179,46 +179,9 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
# Alternatively CROSS_COMPILE can be set in the environment.
# Default value for CROSS_COMPILE is not to prefix executables
# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
#
# To force ARCH and CROSS_COMPILE settings include kernel.* files
# in the kernel tree - do not patch this file.
export KBUILD_BUILDHOST := $(SUBARCH)
# Kbuild save the ARCH and CROSS_COMPILE setting in kernel.* files.
# Restore these settings and check that user did not specify
# conflicting values.
saved_arch := $(shell cat include/generated/kernel.arch 2> /dev/null)
saved_cross := $(shell cat include/generated/kernel.cross 2> /dev/null)
ifneq ($(CROSS_COMPILE),)
ifneq ($(saved_cross),)
ifneq ($(CROSS_COMPILE),$(saved_cross))
$(error CROSS_COMPILE changed from \
"$(saved_cross)" to \
to "$(CROSS_COMPILE)". \
Use "make mrproper" to fix it up)
endif
endif
else
CROSS_COMPILE := $(saved_cross)
endif
ifneq ($(ARCH),)
ifneq ($(saved_arch),)
ifneq ($(saved_arch),$(ARCH))
$(error ARCH changed from \
"$(saved_arch)" to "$(ARCH)". \
Use "make mrproper" to fix it up)
endif
endif
else
ifneq ($(saved_arch),)
ARCH := $(saved_arch)
else
ARCH := $(SUBARCH)
endif
endif
ARCH ?= $(SUBARCH)
CROSS_COMPILE ?=
# Architecture as present in compile.h
UTS_MACHINE := $(ARCH)
@ -483,11 +446,6 @@ ifeq ($(config-targets),1)
include $(srctree)/arch/$(SRCARCH)/Makefile
export KBUILD_DEFCONFIG KBUILD_KCONFIG
# save ARCH & CROSS_COMPILE settings
$(shell mkdir -p include/generated && \
echo $(ARCH) > include/generated/kernel.arch && \
echo $(CROSS_COMPILE) > include/generated/kernel.cross)
config: scripts_basic outputmakefile FORCE
$(Q)mkdir -p include/linux include/config
$(Q)$(MAKE) $(build)=scripts/kconfig $@

View file

@ -84,7 +84,7 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
*p = res | mask;
raw_local_irq_restore(flags);
return res & mask;
return (res & mask) != 0;
}
static inline int
@ -101,7 +101,7 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
*p = res & ~mask;
raw_local_irq_restore(flags);
return res & mask;
return (res & mask) != 0;
}
static inline int
@ -118,7 +118,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
*p = res ^ mask;
raw_local_irq_restore(flags);
return res & mask;
return (res & mask) != 0;
}
#include <asm-generic/bitops/non-atomic.h>

View file

@ -45,21 +45,21 @@ static int __init user_debug_setup(char *str)
__setup("user_debug=", user_debug_setup);
#endif
static void dump_mem(const char *str, unsigned long bottom, unsigned long top);
static void dump_mem(const char *, const char *, unsigned long, unsigned long);
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{
#ifdef CONFIG_KALLSYMS
printk("[<%08lx>] ", where);
print_symbol("(%s) ", where);
printk("from [<%08lx>] ", from);
print_symbol("(%s)\n", from);
char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
sprint_symbol(sym1, where);
sprint_symbol(sym2, from);
printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
#else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
if (in_exception_text(where))
dump_mem("Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
}
#ifndef CONFIG_ARM_UNWIND
@ -81,9 +81,10 @@ static int verify_stack(unsigned long sp)
/*
* Dump out the contents of some memory nicely...
*/
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
unsigned long top)
{
unsigned long p = bottom & ~31;
unsigned long first;
mm_segment_t fs;
int i;
@ -95,33 +96,37 @@ static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
fs = get_fs();
set_fs(KERNEL_DS);
printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
for (p = bottom & ~31; p < top;) {
printk("%04lx: ", p & 0xffff);
for (first = bottom & ~31; first < top; first += 32) {
unsigned long p;
char str[sizeof(" 12345678") * 8 + 1];
for (i = 0; i < 8; i++, p += 4) {
unsigned int val;
memset(str, ' ', sizeof(str));
str[sizeof(str) - 1] = '\0';
if (p < bottom || p >= top)
printk(" ");
else {
__get_user(val, (unsigned long *)p);
printk("%08x ", val);
for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
if (p >= bottom && p < top) {
unsigned long val;
if (__get_user(val, (unsigned long *)p) == 0)
sprintf(str + i * 9, " %08lx", val);
else
sprintf(str + i * 9, " ????????");
}
}
printk ("\n");
printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
}
set_fs(fs);
}
static void dump_instr(struct pt_regs *regs)
static void dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
@ -132,7 +137,6 @@ static void dump_instr(struct pt_regs *regs)
fs = get_fs();
set_fs(KERNEL_DS);
printk("Code: ");
for (i = -4; i < 1; i++) {
unsigned int val, bad;
@ -142,13 +146,14 @@ static void dump_instr(struct pt_regs *regs)
bad = __get_user(val, &((u32 *)addr)[i]);
if (!bad)
printk(i == 0 ? "(%0*x) " : "%0*x ", width, val);
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
width, val);
else {
printk("bad PC value.");
p += sprintf(p, "bad PC value");
break;
}
}
printk("\n");
printk("%sCode: %s\n", lvl, str);
set_fs(fs);
}
@ -224,18 +229,19 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
struct task_struct *tsk = thread->task;
static int die_counter;
printk("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
str, err, ++die_counter);
sysfs_printk_last_file();
print_modules();
__show_regs(regs);
printk("Process %s (pid: %d, stack limit = 0x%p)\n",
tsk->comm, task_pid_nr(tsk), thread + 1);
printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
if (!user_mode(regs) || in_interrupt()) {
dump_mem("Stack: ", regs->ARM_sp,
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
dump_backtrace(regs, tsk);
dump_instr(regs);
dump_instr(KERN_EMERG, regs);
}
}
@ -250,13 +256,14 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
oops_enter();
console_verbose();
spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
__die(str, err, thread, regs);
bust_spinlocks(0);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
@ -264,7 +271,6 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
if (panic_on_oops)
panic("Fatal exception");
oops_exit();
do_exit(SIGSEGV);
}
@ -349,7 +355,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (user_debug & UDBG_UNDEFINED) {
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
current->comm, task_pid_nr(current), pc);
dump_instr(regs);
dump_instr(KERN_INFO, regs);
}
#endif
@ -400,7 +406,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
if (user_debug & UDBG_SYSCALL) {
printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
task_pid_nr(current), current->comm, n);
dump_instr(regs);
dump_instr(KERN_ERR, regs);
}
#endif
@ -579,7 +585,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
if (user_debug & UDBG_SYSCALL) {
printk("[%d] %s: arm syscall %d\n",
task_pid_nr(current), current->comm, no);
dump_instr(regs);
dump_instr("", regs);
if (user_mode(regs)) {
__show_regs(regs);
c_backtrace(regs->ARM_fp, processor_mode(regs));
@ -656,7 +662,7 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
if (user_debug & UDBG_BADABORT) {
printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
task_pid_nr(current), current->comm, code, instr);
dump_instr(regs);
dump_instr(KERN_ERR, regs);
show_pte(current->mm, addr);
}
#endif

View file

@ -271,12 +271,12 @@ static struct irqaction bcmring_timer_irq = {
.handler = bcmring_timer_interrupt,
};
static cycle_t bcmring_get_cycles_timer1(void)
static cycle_t bcmring_get_cycles_timer1(struct clocksource *cs)
{
return ~readl(TIMER1_VA_BASE + TIMER_VALUE);
}
static cycle_t bcmring_get_cycles_timer3(void)
static cycle_t bcmring_get_cycles_timer3(struct clocksource *cs)
{
return ~readl(TIMER3_VA_BASE + TIMER_VALUE);
}

View file

@ -29,7 +29,7 @@ static inline void arch_idle(void)
cpu_do_idle();
}
static inline void arch_reset(char mode, char *cmd)
static inline void arch_reset(char mode, const char *cmd)
{
printk("arch_reset:%c %x\n", mode, bcmring_arch_warm_reboot);

View file

@ -17,13 +17,31 @@ config EP93XX_SDCE3_SYNC_PHYS_OFFSET
bool "0x00000000 - SDCE3/SyncBoot"
help
Select this option if you want support for EP93xx boards with the
first SDRAM bank at 0x00000000
first SDRAM bank at 0x00000000.
config EP93XX_SDCE0_PHYS_OFFSET
bool "0xc0000000 - SDCEO"
help
Select this option if you want support for EP93xx boards with the
first SDRAM bank at 0xc0000000
first SDRAM bank at 0xc0000000.
config EP93XX_SDCE1_PHYS_OFFSET
bool "0xd0000000 - SDCE1"
help
Select this option if you want support for EP93xx boards with the
first SDRAM bank at 0xd0000000.
config EP93XX_SDCE2_PHYS_OFFSET
bool "0xe0000000 - SDCE2"
help
Select this option if you want support for EP93xx boards with the
first SDRAM bank at 0xe0000000.
config EP93XX_SDCE3_ASYNC_PHYS_OFFSET
bool "0xf0000000 - SDCE3/AsyncBoot"
help
Select this option if you want support for EP93xx boards with the
first SDRAM bank at 0xf0000000.
endchoice
@ -112,28 +130,36 @@ config MACH_MICRO9
bool
config MACH_MICRO9H
bool "Support Contec Hypercontrol Micro9-H"
bool "Support Contec Micro9-High"
depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
select MACH_MICRO9
help
Say 'Y' here if you want your kernel to support the
Contec Hypercontrol Micro9-H board.
Contec Micro9-High board.
config MACH_MICRO9M
bool "Support Contec Hypercontrol Micro9-M"
depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
bool "Support Contec Micro9-Mid"
depends on EP93XX_SDCE3_ASYNC_PHYS_OFFSET
select MACH_MICRO9
help
Say 'Y' here if you want your kernel to support the
Contec Hypercontrol Micro9-M board.
Contec Micro9-Mid board.
config MACH_MICRO9L
bool "Support Contec Hypercontrol Micro9-L"
bool "Support Contec Micro9-Lite"
depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
select MACH_MICRO9
help
Say 'Y' here if you want your kernel to support the
Contec Hypercontrol Micro9-L board.
Contec Micro9-Lite board.
config MACH_MICRO9S
bool "Support Contec Micro9-Slim"
depends on EP93XX_SDCE3_ASYNC_PHYS_OFFSET
select MACH_MICRO9
help
Say 'Y' here if you want your kernel to support the
Contec Micro9-Slim board.
config MACH_TS72XX
bool "Support Technologic Systems TS-72xx SBC"

View file

@ -3,3 +3,12 @@ params_phys-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) := 0x00000100
zreladdr-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) := 0xc0008000
params_phys-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) := 0xc0000100
zreladdr-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) := 0xd0008000
params_phys-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) := 0xd0000100
zreladdr-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) := 0xe0008000
params_phys-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) := 0xe0000100
zreladdr-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) := 0xf0008000
params_phys-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) := 0xf0000100

View file

@ -16,13 +16,16 @@
#include <linux/module.h>
#include <linux/string.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <mach/hardware.h>
#include <asm/clkdev.h>
#include <asm/div64.h>
#include <mach/hardware.h>
struct clk {
struct clk *parent;
unsigned long rate;
int users;
int sw_locked;
@ -39,40 +42,60 @@ static unsigned long get_uart_rate(struct clk *clk);
static int set_keytchclk_rate(struct clk *clk, unsigned long rate);
static int set_div_rate(struct clk *clk, unsigned long rate);
static struct clk clk_xtali = {
.rate = EP93XX_EXT_CLK_RATE,
};
static struct clk clk_uart1 = {
.parent = &clk_xtali,
.sw_locked = 1,
.enable_reg = EP93XX_SYSCON_DEVCFG,
.enable_mask = EP93XX_SYSCON_DEVCFG_U1EN,
.get_rate = get_uart_rate,
};
static struct clk clk_uart2 = {
.parent = &clk_xtali,
.sw_locked = 1,
.enable_reg = EP93XX_SYSCON_DEVCFG,
.enable_mask = EP93XX_SYSCON_DEVCFG_U2EN,
.get_rate = get_uart_rate,
};
static struct clk clk_uart3 = {
.parent = &clk_xtali,
.sw_locked = 1,
.enable_reg = EP93XX_SYSCON_DEVCFG,
.enable_mask = EP93XX_SYSCON_DEVCFG_U3EN,
.get_rate = get_uart_rate,
};
static struct clk clk_pll1;
static struct clk clk_f;
static struct clk clk_h;
static struct clk clk_p;
static struct clk clk_pll2;
static struct clk clk_pll1 = {
.parent = &clk_xtali,
};
static struct clk clk_f = {
.parent = &clk_pll1,
};
static struct clk clk_h = {
.parent = &clk_pll1,
};
static struct clk clk_p = {
.parent = &clk_pll1,
};
static struct clk clk_pll2 = {
.parent = &clk_xtali,
};
static struct clk clk_usb_host = {
.parent = &clk_pll2,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_USH_EN,
};
static struct clk clk_keypad = {
.parent = &clk_xtali,
.sw_locked = 1,
.enable_reg = EP93XX_SYSCON_KEYTCHCLKDIV,
.enable_mask = EP93XX_SYSCON_KEYTCHCLKDIV_KEN,
.set_rate = set_keytchclk_rate,
};
static struct clk clk_pwm = {
.parent = &clk_xtali,
.rate = EP93XX_EXT_CLK_RATE,
};
@ -85,50 +108,62 @@ static struct clk clk_video = {
/* DMA Clocks */
static struct clk clk_m2p0 = {
.parent = &clk_h,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P0,
};
static struct clk clk_m2p1 = {
.parent = &clk_h,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P1,
};
static struct clk clk_m2p2 = {
.parent = &clk_h,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P2,
};
static struct clk clk_m2p3 = {
.parent = &clk_h,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P3,
};
static struct clk clk_m2p4 = {
.parent = &clk_h,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P4,
};
static struct clk clk_m2p5 = {
.parent = &clk_h,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P5,
};
static struct clk clk_m2p6 = {
.parent = &clk_h,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P6,
};
static struct clk clk_m2p7 = {
.parent = &clk_h,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P7,
};
static struct clk clk_m2p8 = {
.parent = &clk_h,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P8,
};
static struct clk clk_m2p9 = {
.parent = &clk_h,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2P9,
};
static struct clk clk_m2m0 = {
.parent = &clk_h,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2M0,
};
static struct clk clk_m2m1 = {
.parent = &clk_h,
.enable_reg = EP93XX_SYSCON_PWRCNT,
.enable_mask = EP93XX_SYSCON_PWRCNT_DMA_M2M1,
};
@ -137,6 +172,7 @@ static struct clk clk_m2m1 = {
{ .dev_id = dev, .con_id = con, .clk = ck }
static struct clk_lookup clocks[] = {
INIT_CK(NULL, "xtali", &clk_xtali),
INIT_CK("apb:uart1", NULL, &clk_uart1),
INIT_CK("apb:uart2", NULL, &clk_uart2),
INIT_CK("apb:uart3", NULL, &clk_uart3),
@ -163,48 +199,84 @@ static struct clk_lookup clocks[] = {
INIT_CK(NULL, "m2m1", &clk_m2m1),
};
static DEFINE_SPINLOCK(clk_lock);
static void __clk_enable(struct clk *clk)
{
if (!clk->users++) {
if (clk->parent)
__clk_enable(clk->parent);
if (clk->enable_reg) {
u32 v;
v = __raw_readl(clk->enable_reg);
v |= clk->enable_mask;
if (clk->sw_locked)
ep93xx_syscon_swlocked_write(v, clk->enable_reg);
else
__raw_writel(v, clk->enable_reg);
}
}
}
int clk_enable(struct clk *clk)
{
if (!clk->users++ && clk->enable_reg) {
u32 value;
unsigned long flags;
value = __raw_readl(clk->enable_reg);
value |= clk->enable_mask;
if (clk->sw_locked)
ep93xx_syscon_swlocked_write(value, clk->enable_reg);
else
__raw_writel(value, clk->enable_reg);
}
if (!clk)
return -EINVAL;
spin_lock_irqsave(&clk_lock, flags);
__clk_enable(clk);
spin_unlock_irqrestore(&clk_lock, flags);
return 0;
}
EXPORT_SYMBOL(clk_enable);
static void __clk_disable(struct clk *clk)
{
if (!--clk->users) {
if (clk->enable_reg) {
u32 v;
v = __raw_readl(clk->enable_reg);
v &= ~clk->enable_mask;
if (clk->sw_locked)
ep93xx_syscon_swlocked_write(v, clk->enable_reg);
else
__raw_writel(v, clk->enable_reg);
}
if (clk->parent)
__clk_disable(clk->parent);
}
}
void clk_disable(struct clk *clk)
{
if (!--clk->users && clk->enable_reg) {
u32 value;
unsigned long flags;
value = __raw_readl(clk->enable_reg);
value &= ~clk->enable_mask;
if (clk->sw_locked)
ep93xx_syscon_swlocked_write(value, clk->enable_reg);
else
__raw_writel(value, clk->enable_reg);
}
if (!clk)
return;
spin_lock_irqsave(&clk_lock, flags);
__clk_disable(clk);
spin_unlock_irqrestore(&clk_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
static unsigned long get_uart_rate(struct clk *clk)
{
unsigned long rate = clk_get_rate(clk->parent);
u32 value;
value = __raw_readl(EP93XX_SYSCON_PWRCNT);
if (value & EP93XX_SYSCON_PWRCNT_UARTBAUD)
return EP93XX_EXT_CLK_RATE;
return rate;
else
return EP93XX_EXT_CLK_RATE / 2;
return rate / 2;
}
unsigned long clk_get_rate(struct clk *clk)
@ -244,16 +316,16 @@ static int set_keytchclk_rate(struct clk *clk, unsigned long rate)
return 0;
}
static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
int *pdiv, int *div)
static int calc_clk_div(struct clk *clk, unsigned long rate,
int *psel, int *esel, int *pdiv, int *div)
{
unsigned long max_rate, best_rate = 0,
actual_rate = 0, mclk_rate = 0, rate_err = -1;
struct clk *mclk;
unsigned long max_rate, actual_rate, mclk_rate, rate_err = -1;
int i, found = 0, __div = 0, __pdiv = 0;
/* Don't exceed the maximum rate */
max_rate = max(max(clk_pll1.rate / 4, clk_pll2.rate / 4),
(unsigned long)EP93XX_EXT_CLK_RATE / 4);
clk_xtali.rate / 4);
rate = min(rate, max_rate);
/*
@ -267,11 +339,12 @@ static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
*/
for (i = 0; i < 3; i++) {
if (i == 0)
mclk_rate = EP93XX_EXT_CLK_RATE * 2;
mclk = &clk_xtali;
else if (i == 1)
mclk_rate = clk_pll1.rate * 2;
else if (i == 2)
mclk_rate = clk_pll2.rate * 2;
mclk = &clk_pll1;
else
mclk = &clk_pll2;
mclk_rate = mclk->rate * 2;
/* Try each predivider value */
for (__pdiv = 4; __pdiv <= 6; __pdiv++) {
@ -286,7 +359,8 @@ static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
*div = __div;
*psel = (i == 2);
*esel = (i != 0);
best_rate = actual_rate;
clk->parent = mclk;
clk->rate = actual_rate;
rate_err = abs(actual_rate - rate);
found = 1;
}
@ -294,21 +368,19 @@ static unsigned long calc_clk_div(unsigned long rate, int *psel, int *esel,
}
if (!found)
return 0;
return -EINVAL;
return best_rate;
return 0;
}
static int set_div_rate(struct clk *clk, unsigned long rate)
{
unsigned long actual_rate;
int psel = 0, esel = 0, pdiv = 0, div = 0;
int err, psel = 0, esel = 0, pdiv = 0, div = 0;
u32 val;
actual_rate = calc_clk_div(rate, &psel, &esel, &pdiv, &div);
if (actual_rate == 0)
return -EINVAL;
clk->rate = actual_rate;
err = calc_clk_div(clk, rate, &psel, &esel, &pdiv, &div);
if (err)
return err;
/* Clear the esel, psel, pdiv and div bits */
val = __raw_readl(clk->enable_reg);
@ -344,7 +416,7 @@ static unsigned long calc_pll_rate(u32 config_word)
unsigned long long rate;
int i;
rate = EP93XX_EXT_CLK_RATE;
rate = clk_xtali.rate;
rate *= ((config_word >> 11) & 0x1f) + 1; /* X1FBD */
rate *= ((config_word >> 5) & 0x3f) + 1; /* X2FBD */
do_div(rate, (config_word & 0x1f) + 1); /* X2IPD */
@ -377,7 +449,7 @@ static int __init ep93xx_clock_init(void)
value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1);
if (!(value & 0x00800000)) { /* PLL1 bypassed? */
clk_pll1.rate = EP93XX_EXT_CLK_RATE;
clk_pll1.rate = clk_xtali.rate;
} else {
clk_pll1.rate = calc_pll_rate(value);
}
@ -388,7 +460,7 @@ static int __init ep93xx_clock_init(void)
value = __raw_readl(EP93XX_SYSCON_CLOCK_SET2);
if (!(value & 0x00080000)) { /* PLL2 bypassed? */
clk_pll2.rate = EP93XX_EXT_CLK_RATE;
clk_pll2.rate = clk_xtali.rate;
} else if (value & 0x00040000) { /* PLL2 enabled? */
clk_pll2.rate = calc_pll_rate(value);
} else {

View file

@ -550,13 +550,11 @@ void __init ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr)
platform_device_register(&ep93xx_eth_device);
}
static struct i2c_gpio_platform_data ep93xx_i2c_data = {
.sda_pin = EP93XX_GPIO_LINE_EEDAT,
.sda_is_open_drain = 0,
.scl_pin = EP93XX_GPIO_LINE_EECLK,
.scl_is_open_drain = 0,
.udelay = 2,
};
/*************************************************************************
* EP93xx i2c peripheral handling
*************************************************************************/
static struct i2c_gpio_platform_data ep93xx_i2c_data;
static struct platform_device ep93xx_i2c_device = {
.name = "i2c-gpio",
@ -564,8 +562,25 @@ static struct platform_device ep93xx_i2c_device = {
.dev.platform_data = &ep93xx_i2c_data,
};
void __init ep93xx_register_i2c(struct i2c_board_info *devices, int num)
void __init ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
struct i2c_board_info *devices, int num)
{
/*
* Set the EEPROM interface pin drive type control.
* Defines the driver type for the EECLK and EEDAT pins as either
* open drain, which will require an external pull-up, or a normal
* CMOS driver.
*/
if (data->sda_is_open_drain && data->sda_pin != EP93XX_GPIO_LINE_EEDAT)
pr_warning("ep93xx: sda != EEDAT, open drain has no effect\n");
if (data->scl_is_open_drain && data->scl_pin != EP93XX_GPIO_LINE_EECLK)
pr_warning("ep93xx: scl != EECLK, open drain has no effect\n");
__raw_writel((data->sda_is_open_drain << 1) |
(data->scl_is_open_drain << 0),
EP93XX_GPIO_EEDRIVE);
ep93xx_i2c_data = *data;
i2c_register_board_info(0, devices, num);
platform_device_register(&ep93xx_i2c_device);
}

View file

@ -27,8 +27,10 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/mtd/physmap.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
#include <mach/hardware.h>
@ -76,13 +78,26 @@ static struct ep93xx_eth_data edb93xx_eth_data = {
.phy_id = 1,
};
static struct i2c_board_info __initdata edb93xxa_i2c_data[] = {
/*************************************************************************
* EDB93xx i2c peripheral handling
*************************************************************************/
static struct i2c_gpio_platform_data edb93xx_i2c_gpio_data = {
.sda_pin = EP93XX_GPIO_LINE_EEDAT,
.sda_is_open_drain = 0,
.scl_pin = EP93XX_GPIO_LINE_EECLK,
.scl_is_open_drain = 0,
.udelay = 0, /* default to 100 kHz */
.timeout = 0, /* default to 100 ms */
};
static struct i2c_board_info __initdata edb93xxa_i2c_board_info[] = {
{
I2C_BOARD_INFO("isl1208", 0x6f),
},
};
static struct i2c_board_info __initdata edb93xx_i2c_data[] = {
static struct i2c_board_info __initdata edb93xx_i2c_board_info[] = {
{
I2C_BOARD_INFO("ds1337", 0x68),
},
@ -92,12 +107,14 @@ static void __init edb93xx_register_i2c(void)
{
if (machine_is_edb9302a() || machine_is_edb9307a() ||
machine_is_edb9315a()) {
ep93xx_register_i2c(edb93xxa_i2c_data,
ARRAY_SIZE(edb93xxa_i2c_data));
ep93xx_register_i2c(&edb93xx_i2c_gpio_data,
edb93xxa_i2c_board_info,
ARRAY_SIZE(edb93xxa_i2c_board_info));
} else if (machine_is_edb9307() || machine_is_edb9312() ||
machine_is_edb9315()) {
ep93xx_register_i2c(edb93xx_i2c_data,
ARRAY_SIZE(edb93xx_i2c_data));
ep93xx_register_i2c(&edb93xx_i2c_gpio_data
edb93xx_i2c_board_info,
ARRAY_SIZE(edb93xx_i2c_board_info));
}
}

View file

@ -52,25 +52,27 @@
#define EP93XX_AHB_VIRT_BASE 0xfef00000
#define EP93XX_AHB_SIZE 0x00100000
#define EP93XX_AHB_PHYS(x) (EP93XX_AHB_PHYS_BASE + (x))
#define EP93XX_AHB_IOMEM(x) IOMEM(EP93XX_AHB_VIRT_BASE + (x))
#define EP93XX_APB_PHYS_BASE 0x80800000
#define EP93XX_APB_VIRT_BASE 0xfed00000
#define EP93XX_APB_SIZE 0x00200000
#define EP93XX_APB_PHYS(x) (EP93XX_APB_PHYS_BASE + (x))
#define EP93XX_APB_IOMEM(x) IOMEM(EP93XX_APB_VIRT_BASE + (x))
/* AHB peripherals */
#define EP93XX_DMA_BASE EP93XX_AHB_IOMEM(0x00000000)
#define EP93XX_ETHERNET_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00010000)
#define EP93XX_ETHERNET_PHYS_BASE EP93XX_AHB_PHYS(0x00010000)
#define EP93XX_ETHERNET_BASE EP93XX_AHB_IOMEM(0x00010000)
#define EP93XX_USB_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00020000)
#define EP93XX_USB_PHYS_BASE EP93XX_AHB_PHYS(0x00020000)
#define EP93XX_USB_BASE EP93XX_AHB_IOMEM(0x00020000)
#define EP93XX_RASTER_PHYS_BASE (EP93XX_AHB_PHYS_BASE + 0x00030000)
#define EP93XX_RASTER_PHYS_BASE EP93XX_AHB_PHYS(0x00030000)
#define EP93XX_RASTER_BASE EP93XX_AHB_IOMEM(0x00030000)
#define EP93XX_GRAPHICS_ACCEL_BASE EP93XX_AHB_IOMEM(0x00040000)
@ -112,21 +114,10 @@
#define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000)
#define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x))
#define EP93XX_GPIO_F_INT_TYPE1 EP93XX_GPIO_REG(0x4c)
#define EP93XX_GPIO_F_INT_TYPE2 EP93XX_GPIO_REG(0x50)
#define EP93XX_GPIO_F_INT_ACK EP93XX_GPIO_REG(0x54)
#define EP93XX_GPIO_F_INT_ENABLE EP93XX_GPIO_REG(0x58)
#define EP93XX_GPIO_F_INT_STATUS EP93XX_GPIO_REG(0x5c)
#define EP93XX_GPIO_A_INT_TYPE1 EP93XX_GPIO_REG(0x90)
#define EP93XX_GPIO_A_INT_TYPE2 EP93XX_GPIO_REG(0x94)
#define EP93XX_GPIO_A_INT_ACK EP93XX_GPIO_REG(0x98)
#define EP93XX_GPIO_A_INT_ENABLE EP93XX_GPIO_REG(0x9c)
#define EP93XX_GPIO_A_INT_STATUS EP93XX_GPIO_REG(0xa0)
#define EP93XX_GPIO_B_INT_TYPE1 EP93XX_GPIO_REG(0xac)
#define EP93XX_GPIO_B_INT_TYPE2 EP93XX_GPIO_REG(0xb0)
#define EP93XX_GPIO_B_INT_ACK EP93XX_GPIO_REG(0xb4)
#define EP93XX_GPIO_B_INT_ENABLE EP93XX_GPIO_REG(0xb8)
#define EP93XX_GPIO_B_INT_STATUS EP93XX_GPIO_REG(0xbc)
#define EP93XX_GPIO_EEDRIVE EP93XX_GPIO_REG(0xc8)
#define EP93XX_AAC_BASE EP93XX_APB_IOMEM(0x00080000)
@ -134,13 +125,13 @@
#define EP93XX_IRDA_BASE EP93XX_APB_IOMEM(0x000b0000)
#define EP93XX_UART1_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x000c0000)
#define EP93XX_UART1_PHYS_BASE EP93XX_APB_PHYS(0x000c0000)
#define EP93XX_UART1_BASE EP93XX_APB_IOMEM(0x000c0000)
#define EP93XX_UART2_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x000d0000)
#define EP93XX_UART2_PHYS_BASE EP93XX_APB_PHYS(0x000d0000)
#define EP93XX_UART2_BASE EP93XX_APB_IOMEM(0x000d0000)
#define EP93XX_UART3_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x000e0000)
#define EP93XX_UART3_PHYS_BASE EP93XX_APB_PHYS(0x000e0000)
#define EP93XX_UART3_BASE EP93XX_APB_IOMEM(0x000e0000)
#define EP93XX_KEY_MATRIX_BASE EP93XX_APB_IOMEM(0x000f0000)
@ -148,10 +139,10 @@
#define EP93XX_ADC_BASE EP93XX_APB_IOMEM(0x00100000)
#define EP93XX_TOUCHSCREEN_BASE EP93XX_APB_IOMEM(0x00100000)
#define EP93XX_PWM_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x00110000)
#define EP93XX_PWM_PHYS_BASE EP93XX_APB_PHYS(0x00110000)
#define EP93XX_PWM_BASE EP93XX_APB_IOMEM(0x00110000)
#define EP93XX_RTC_PHYS_BASE (EP93XX_APB_PHYS_BASE + 0x00120000)
#define EP93XX_RTC_PHYS_BASE EP93XX_APB_PHYS(0x00120000)
#define EP93XX_RTC_BASE EP93XX_APB_IOMEM(0x00120000)
#define EP93XX_SYSCON_BASE EP93XX_APB_IOMEM(0x00130000)
@ -218,6 +209,17 @@
#define EP93XX_SYSCON_KEYTCHCLKDIV_ADIV (1<<16)
#define EP93XX_SYSCON_KEYTCHCLKDIV_KEN (1<<15)
#define EP93XX_SYSCON_KEYTCHCLKDIV_KDIV (1<<0)
#define EP93XX_SYSCON_SYSCFG EP93XX_SYSCON_REG(0x9c)
#define EP93XX_SYSCON_SYSCFG_REV_MASK (0xf0000000)
#define EP93XX_SYSCON_SYSCFG_REV_SHIFT (28)
#define EP93XX_SYSCON_SYSCFG_SBOOT (1<<8)
#define EP93XX_SYSCON_SYSCFG_LCSN7 (1<<7)
#define EP93XX_SYSCON_SYSCFG_LCSN6 (1<<6)
#define EP93XX_SYSCON_SYSCFG_LASDO (1<<5)
#define EP93XX_SYSCON_SYSCFG_LEEDA (1<<4)
#define EP93XX_SYSCON_SYSCFG_LEECLK (1<<3)
#define EP93XX_SYSCON_SYSCFG_LCSN2 (1<<1)
#define EP93XX_SYSCON_SYSCFG_LCSN1 (1<<0)
#define EP93XX_SYSCON_SWLOCK EP93XX_SYSCON_REG(0xc0)
#define EP93XX_WATCHDOG_BASE EP93XX_APB_IOMEM(0x00140000)

View file

@ -114,17 +114,9 @@ extern void ep93xx_gpio_int_debounce(unsigned int irq, int enable);
* B0..B7 (7..15) to irq 72..79, and
* F0..F7 (16..24) to irq 80..87.
*/
static inline int gpio_to_irq(unsigned gpio)
{
if (gpio <= EP93XX_GPIO_LINE_MAX_IRQ)
return 64 + gpio;
#define gpio_to_irq(gpio) \
(((gpio) <= EP93XX_GPIO_LINE_MAX_IRQ) ? (64 + (gpio)) : -EINVAL)
return -EINVAL;
}
static inline int irq_to_gpio(unsigned irq)
{
return irq - gpio_to_irq(0);
}
#define irq_to_gpio(irq) ((irq) - gpio_to_irq(0))
#endif

View file

@ -9,6 +9,12 @@
#define PHYS_OFFSET UL(0x00000000)
#elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)
#define PHYS_OFFSET UL(0xc0000000)
#elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)
#define PHYS_OFFSET UL(0xd0000000)
#elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)
#define PHYS_OFFSET UL(0xe0000000)
#elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)
#define PHYS_OFFSET UL(0xf0000000)
#else
#error "Kconfig bug: No EP93xx PHYS_OFFSET set"
#endif

View file

@ -4,6 +4,7 @@
#ifndef __ASSEMBLY__
struct i2c_gpio_platform_data;
struct i2c_board_info;
struct platform_device;
struct ep93xxfb_mach_info;
@ -33,7 +34,8 @@ static inline void ep93xx_devcfg_clear_bits(unsigned int bits)
}
void ep93xx_register_eth(struct ep93xx_eth_data *data, int copy_addr);
void ep93xx_register_i2c(struct i2c_board_info *devices, int num);
void ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
struct i2c_board_info *devices, int num);
void ep93xx_register_fb(struct ep93xxfb_mach_info *data);
void ep93xx_register_pwm(int pwm0, int pwm1);
int ep93xx_pwm_acquire_gpio(struct platform_device *pdev);

View file

@ -2,7 +2,9 @@
* linux/arch/arm/mach-ep93xx/micro9.c
*
* Copyright (C) 2006 Contec Steuerungstechnik & Automation GmbH
* Manfred Gruber <manfred.gruber@contec.at>
* Manfred Gruber <m.gruber@tirol.com>
* Copyright (C) 2009 Contec Steuerungstechnik & Automation GmbH
* Hubert Feurstein <hubert.feurstein@contec.at>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -20,104 +22,124 @@
#include <asm/mach/arch.h>
static struct ep93xx_eth_data micro9_eth_data = {
.phy_id = 0x1f,
};
/*************************************************************************
* Micro9 NOR Flash
*
* Micro9-High has up to 64MB of 32-bit flash on CS1
* Micro9-Mid has up to 64MB of either 32-bit or 16-bit flash on CS1
* Micro9-Lite uses a seperate MTD map driver for flash support
* Micro9-Slim has up to 64MB of either 32-bit or 16-bit flash on CS1
*************************************************************************/
static struct physmap_flash_data micro9_flash_data;
static void __init micro9_init(void)
{
ep93xx_register_eth(&micro9_eth_data, 1);
}
/*
* Micro9-H
*/
#ifdef CONFIG_MACH_MICRO9H
static struct physmap_flash_data micro9h_flash_data = {
.width = 4,
};
static struct resource micro9h_flash_resource = {
static struct resource micro9_flash_resource = {
.start = EP93XX_CS1_PHYS_BASE,
.end = EP93XX_CS1_PHYS_BASE + SZ_64M - 1,
.flags = IORESOURCE_MEM,
};
static struct platform_device micro9h_flash = {
static struct platform_device micro9_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &micro9h_flash_data,
.platform_data = &micro9_flash_data,
},
.num_resources = 1,
.resource = &micro9h_flash_resource,
.resource = &micro9_flash_resource,
};
static void __init micro9h_init(void)
static void __init __micro9_register_flash(unsigned int width)
{
platform_device_register(&micro9h_flash);
micro9_flash_data.width = width;
platform_device_register(&micro9_flash);
}
static void __init micro9h_init_machine(void)
static unsigned int __init micro9_detect_bootwidth(void)
{
u32 v;
/* Detect the bus width of the external flash memory */
v = __raw_readl(EP93XX_SYSCON_SYSCFG);
if (v & EP93XX_SYSCON_SYSCFG_LCSN7)
return 4; /* 32-bit */
else
return 2; /* 16-bit */
}
static void __init micro9_register_flash(void)
{
if (machine_is_micro9())
__micro9_register_flash(4);
else if (machine_is_micro9m() || machine_is_micro9s())
__micro9_register_flash(micro9_detect_bootwidth());
}
/*************************************************************************
* Micro9 Ethernet
*************************************************************************/
static struct ep93xx_eth_data micro9_eth_data = {
.phy_id = 0x1f,
};
static void __init micro9_init_machine(void)
{
ep93xx_init_devices();
micro9_init();
micro9h_init();
ep93xx_register_eth(&micro9_eth_data, 1);
micro9_register_flash();
}
MACHINE_START(MICRO9, "Contec Hypercontrol Micro9-H")
/* Maintainer: Manfred Gruber <manfred.gruber@contec.at> */
#ifdef CONFIG_MACH_MICRO9H
MACHINE_START(MICRO9, "Contec Micro9-High")
/* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
.phys_io = EP93XX_APB_PHYS_BASE,
.io_pg_offst = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer,
.init_machine = micro9h_init_machine,
.init_machine = micro9_init_machine,
MACHINE_END
#endif
/*
* Micro9-M
*/
#ifdef CONFIG_MACH_MICRO9M
static void __init micro9m_init_machine(void)
{
ep93xx_init_devices();
micro9_init();
}
MACHINE_START(MICRO9M, "Contec Hypercontrol Micro9-M")
/* Maintainer: Manfred Gruber <manfred.gruber@contec.at> */
MACHINE_START(MICRO9M, "Contec Micro9-Mid")
/* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
.phys_io = EP93XX_APB_PHYS_BASE,
.io_pg_offst = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100,
.boot_params = EP93XX_SDCE3_PHYS_BASE_ASYNC + 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer,
.init_machine = micro9m_init_machine,
.init_machine = micro9_init_machine,
MACHINE_END
#endif
/*
* Micro9-L
*/
#ifdef CONFIG_MACH_MICRO9L
static void __init micro9l_init_machine(void)
{
ep93xx_init_devices();
micro9_init();
}
MACHINE_START(MICRO9L, "Contec Hypercontrol Micro9-L")
/* Maintainer: Manfred Gruber <manfred.gruber@contec.at> */
MACHINE_START(MICRO9L, "Contec Micro9-Lite")
/* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
.phys_io = EP93XX_APB_PHYS_BASE,
.io_pg_offst = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
.boot_params = EP93XX_SDCE3_PHYS_BASE_SYNC + 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer,
.init_machine = micro9l_init_machine,
.init_machine = micro9_init_machine,
MACHINE_END
#endif
#ifdef CONFIG_MACH_MICRO9S
MACHINE_START(MICRO9S, "Contec Micro9-Slim")
/* Maintainer: Hubert Feurstein <hubert.feurstein@contec.at> */
.phys_io = EP93XX_APB_PHYS_BASE,
.io_pg_offst = ((EP93XX_APB_VIRT_BASE) >> 18) & 0xfffc,
.boot_params = EP93XX_SDCE3_PHYS_BASE_ASYNC + 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
MACHINE_END
#endif

View file

@ -155,7 +155,7 @@ MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table
static pxa_freqs_t pxa27x_freqs[] = {
{104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 },
{156000, 104000, PXA27x_CCCR(1, 8, 6), 0, CCLKCFG2(1, 1, 1), 1000000, 1705000 },
{156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 },
{208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 },
{312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 },
{416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 },

View file

@ -238,7 +238,7 @@ static struct resource csb726_lan_resources[] = {
};
struct smsc911x_platform_config csb726_lan_config = {
.irq_type = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
.flags = SMSC911X_USE_32BIT,
.phy_interface = PHY_INTERFACE_MODE_MII,

View file

@ -25,6 +25,7 @@ led-$(CONFIG_SA1100_CERF) += leds-cerf.o
obj-$(CONFIG_SA1100_COLLIE) += collie.o
obj-$(CONFIG_SA1100_H3100) += h3600.o
obj-$(CONFIG_SA1100_H3600) += h3600.o
obj-$(CONFIG_SA1100_HACKKIT) += hackkit.o

View file

@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include "proc-macros.S"
@ -121,11 +122,13 @@ ENTRY(v6_coherent_kern_range)
* - the Icache does not read data from the write buffer
*/
ENTRY(v6_coherent_user_range)
UNWIND(.fnstart )
#ifdef HARVARD_CACHE
bic r0, r0, #CACHE_LINE_SIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D line
1:
USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line
add r0, r0, #CACHE_LINE_SIZE
2:
cmp r0, r1
blo 1b
#endif
@ -142,6 +145,19 @@ ENTRY(v6_coherent_user_range)
#endif
mov pc, lr
/*
* Fault handling for the cache operation above. If the virtual address in r0
* isn't mapped, just try the next page.
*/
9001:
mov r0, r0, lsr #12
mov r0, r0, lsl #12
add r0, r0, #4096
b 2b
UNWIND(.fnend )
ENDPROC(v6_coherent_user_range)
ENDPROC(v6_coherent_kern_range)
/*
* v6_flush_kern_dcache_page(kaddr)
*

View file

@ -13,6 +13,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/unwind.h>
#include "proc-macros.S"
@ -153,13 +154,16 @@ ENTRY(v7_coherent_kern_range)
* - the Icache does not read data from the write buffer
*/
ENTRY(v7_coherent_user_range)
UNWIND(.fnstart )
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
1: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification
1:
USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification
dsb
mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line
add r0, r0, r2
2:
cmp r0, r1
blo 1b
mov r0, #0
@ -167,6 +171,17 @@ ENTRY(v7_coherent_user_range)
dsb
isb
mov pc, lr
/*
* Fault handling for the cache operation above. If the virtual address in r0
* isn't mapped, just try the next page.
*/
9001:
mov r0, r0, lsr #12
mov r0, r0, lsl #12
add r0, r0, #4096
b 2b
UNWIND(.fnend )
ENDPROC(v7_coherent_kern_range)
ENDPROC(v7_coherent_user_range)

View file

@ -153,14 +153,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
page = pfn_to_page(pfn);
mapping = page_mapping(page);
if (mapping) {
#ifndef CONFIG_SMP
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty)
__flush_dcache_page(mapping, page);
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(mapping, page);
#endif
if (mapping) {
if (cache_is_vivt())
make_coherent(mapping, vma, addr, pfn);
else if (vma->vm_flags & VM_EXEC)

View file

@ -292,6 +292,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* down_read()
*/
might_sleep();
#ifdef CONFIG_DEBUG_VM
if (!user_mode(regs) &&
!search_exception_tables(regs->ARM_pc))
goto no_context;
#endif
}
fault = __do_page_fault(mm, addr, fsr, tsk);

View file

@ -46,6 +46,8 @@ void *kmap_atomic(struct page *page, enum km_type type)
if (!PageHighMem(page))
return page_address(page);
debug_kmap_atomic(type);
kmap = kmap_high_get(page);
if (kmap)
return kmap;

View file

@ -483,7 +483,7 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
/*
* Convert start_pfn/end_pfn to a struct page pointer.
*/
start_pg = pfn_to_page(start_pfn);
start_pg = pfn_to_page(start_pfn - 1) + 1;
end_pg = pfn_to_page(end_pfn);
/*

View file

@ -438,7 +438,7 @@ static int diag204_probe(void)
}
if (diag204((unsigned long)SUBC_STIB6 |
(unsigned long)INFO_EXT, pages, buf) >= 0) {
diag204_store_sc = SUBC_STIB7;
diag204_store_sc = SUBC_STIB6;
diag204_info_type = INFO_EXT;
goto out;
}

View file

@ -31,9 +31,9 @@ void __cpuinit print_cpu_info(void)
static int show_cpuinfo(struct seq_file *m, void *v)
{
static const char *hwcap_str[9] = {
static const char *hwcap_str[10] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh"
"edat", "etf3eh", "highgprs"
};
struct _lowcore *lc;
unsigned long n = (unsigned long) v - 1;
@ -48,7 +48,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
num_online_cpus(), loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ))%100);
seq_puts(m, "features\t: ");
for (i = 0; i < 9; i++)
for (i = 0; i < 10; i++)
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
seq_printf(m, "%s ", hwcap_str[i]);
seq_puts(m, "\n");

View file

@ -121,7 +121,7 @@ noresched:
ENTRY(resume_userspace)
! r8: current_thread_info
cli
TRACE_IRQS_OfF
TRACE_IRQS_OFF
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
tst #(_TIF_WORK_MASK & 0xff), r0
bt/s __restore_all

View file

@ -291,31 +291,48 @@ struct syscall_metadata *syscall_nr_to_meta(int nr)
return syscalls_metadata[nr];
}
void arch_init_ftrace_syscalls(void)
int syscall_name_to_nr(char *name)
{
int i;
if (!syscalls_metadata)
return -1;
for (i = 0; i < NR_syscalls; i++)
if (syscalls_metadata[i])
if (!strcmp(syscalls_metadata[i]->name, name))
return i;
return -1;
}
void set_syscall_enter_id(int num, int id)
{
syscalls_metadata[num]->enter_id = id;
}
void set_syscall_exit_id(int num, int id)
{
syscalls_metadata[num]->exit_id = id;
}
static int __init arch_init_ftrace_syscalls(void)
{
int i;
struct syscall_metadata *meta;
unsigned long **psys_syscall_table = &sys_call_table;
static atomic_t refs;
if (atomic_inc_return(&refs) != 1)
goto end;
syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
FTRACE_SYSCALL_MAX, GFP_KERNEL);
if (!syscalls_metadata) {
WARN_ON(1);
return;
return -ENOMEM;
}
for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
meta = find_syscall_meta(psys_syscall_table[i]);
syscalls_metadata[i] = meta;
}
return;
/* Paranoid: avoid overflow */
end:
atomic_dec(&refs);
return 0;
}
arch_initcall(arch_init_ftrace_syscalls);
#endif /* CONFIG_FTRACE_SYSCALLS */

View file

@ -549,6 +549,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu == 0)
seq_printf(m, "machine\t\t: %s\n", get_system_type());
else
seq_printf(m, "\n");
seq_printf(m, "processor\t: %d\n", cpu);
seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);

View file

@ -145,7 +145,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
{
struct task_struct *tsk = current;
if (!(current_cpu_data.flags & CPU_HAS_FPU))
if (!(boot_cpu_data.flags & CPU_HAS_FPU))
return 0;
set_used_math();
@ -158,7 +158,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
{
struct task_struct *tsk = current;
if (!(current_cpu_data.flags & CPU_HAS_FPU))
if (!(boot_cpu_data.flags & CPU_HAS_FPU))
return 0;
if (!used_math()) {
@ -199,7 +199,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
#undef COPY
#ifdef CONFIG_SH_FPU
if (current_cpu_data.flags & CPU_HAS_FPU) {
if (boot_cpu_data.flags & CPU_HAS_FPU) {
int owned_fp;
struct task_struct *tsk = current;
@ -472,6 +472,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __put_user(OR_R0_R0, &frame->retcode[6]);
err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
regs->pr = (unsigned long) frame->retcode;
flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
}
if (err)
@ -497,8 +498,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
return 0;
give_sigsegv:

View file

@ -35,6 +35,8 @@ static inline void __init smp_store_cpu_info(unsigned int cpu)
{
struct sh_cpuinfo *c = cpu_data + cpu;
memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
c->loops_per_jiffy = loops_per_jiffy;
}

View file

@ -25,6 +25,7 @@
#include <linux/kexec.h>
#include <linux/limits.h>
#include <linux/proc_fs.h>
#include <linux/sysfs.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/fpu.h>
@ -159,12 +160,12 @@ void die(const char * str, struct pt_regs * regs, long err)
oops_enter();
console_verbose();
spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
sysfs_printk_last_file();
print_modules();
show_regs(regs);
@ -180,6 +181,7 @@ void die(const char * str, struct pt_regs * regs, long err)
bust_spinlocks(0);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
oops_exit();
if (kexec_should_crash(current))
crash_kexec(regs);
@ -190,7 +192,6 @@ void die(const char * str, struct pt_regs * regs, long err)
if (panic_on_oops)
panic("Fatal exception");
oops_exit();
do_exit(SIGSEGV);
}

View file

@ -128,7 +128,7 @@ void __update_cache(struct vm_area_struct *vma,
return;
page = pfn_to_page(pfn);
if (pfn_valid(pfn) && page_mapping(page)) {
if (pfn_valid(pfn)) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty) {
unsigned long addr = (unsigned long)page_address(page);

View file

@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
err = request_irq(lp->cfg.rx_irq, ldc_rx,
IRQF_SAMPLE_RANDOM | IRQF_SHARED,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
lp->rx_irq_name, lp);
if (err)
return err;
err = request_irq(lp->cfg.tx_irq, ldc_tx,
IRQF_SAMPLE_RANDOM | IRQF_SHARED,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
lp->tx_irq_name, lp);
if (err) {
free_irq(lp->cfg.rx_irq, lp);

View file

@ -437,7 +437,7 @@ static const struct sparc_pmu niagara2_pmu = {
.lower_shift = 6,
.event_mask = 0xfff,
.hv_bit = 0x8,
.irq_bit = 0x03,
.irq_bit = 0x30,
.upper_nop = 0x220,
.lower_nop = 0x220,
};

View file

@ -265,7 +265,7 @@ static void flush_dcache(unsigned long pfn)
struct page *page;
page = pfn_to_page(pfn);
if (page && page_mapping(page)) {
if (page) {
unsigned long pg_flags;
pg_flags = page->flags;

View file

@ -491,7 +491,7 @@ if PARAVIRT_GUEST
source "arch/x86/xen/Kconfig"
config VMI
bool "VMI Guest support"
bool "VMI Guest support (DEPRECATED)"
select PARAVIRT
depends on X86_32
---help---
@ -500,6 +500,15 @@ config VMI
at the moment), by linking the kernel to a GPL-ed ROM module
provided by the hypervisor.
As of September 2009, VMware has started a phased retirement
of this feature from VMware's products. Please see
feature-removal-schedule.txt for details. If you are
planning to enable this option, please note that you cannot
live migrate a VMI enabled VM to a future VMware product,
which doesn't support VMI. So if you expect your kernel to
seamlessly migrate to newer VMware products, keep this
disabled.
config KVM_CLOCK
bool "KVM paravirtualized clock"
select PARAVIRT

View file

@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
static inline unsigned long __raw_local_save_flags(void)
{
unsigned long f;
asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: paravirt_type(pv_irq_ops.save_fl),
paravirt_clobber(CLBR_EAX)
: "memory", "cc");
return f;
return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
}
static inline void raw_local_irq_restore(unsigned long f)
{
asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: PV_FLAGS_ARG(f),
paravirt_type(pv_irq_ops.restore_fl),
paravirt_clobber(CLBR_EAX)
: "memory", "cc");
PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
}
static inline void raw_local_irq_disable(void)
{
asm volatile(paravirt_alt(PARAVIRT_CALL)
:
: paravirt_type(pv_irq_ops.irq_disable),
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc");
PVOP_VCALLEE0(pv_irq_ops.irq_disable);
}
static inline void raw_local_irq_enable(void)
{
asm volatile(paravirt_alt(PARAVIRT_CALL)
:
: paravirt_type(pv_irq_ops.irq_enable),
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc");
PVOP_VCALLEE0(pv_irq_ops.irq_enable);
}
static inline unsigned long __raw_local_irq_save(void)

View file

@ -494,10 +494,11 @@ int paravirt_disable_iospace(void);
#define EXTRA_CLOBBERS
#define VEXTRA_CLOBBERS
#else /* CONFIG_X86_64 */
/* [re]ax isn't an arg, but the return val */
#define PVOP_VCALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \
__edx = __edx, __ecx = __ecx
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
__edx = __edx, __ecx = __ecx, __eax = __eax
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
@ -509,6 +510,7 @@ int paravirt_disable_iospace(void);
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
/* void functions are still allowed [re]ax for scratch */
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
@ -583,8 +585,8 @@ int paravirt_disable_iospace(void);
VEXTRA_CLOBBERS, \
pre, post, ##__VA_ARGS__)
#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
#define __PVOP_VCALLEESAVE(op, pre, post, ...) \
____PVOP_VCALL(op.func, CLBR_RET_REG, \
PVOP_VCALLEE_CLOBBERS, , \
pre, post, ##__VA_ARGS__)

View file

@ -56,6 +56,6 @@ SECTIONS
/DISCARD/ : {
*(.note*)
}
. = ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!");
}
ASSERT(_end <= WAKEUP_SIZE, "Wakeup too big!");

View file

@ -244,7 +244,6 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
__func__, smp_processor_id(), vector, irq);
}
run_local_timers();
irq_exit();
set_irq_regs(old_regs);
@ -269,7 +268,6 @@ void smp_generic_interrupt(struct pt_regs *regs)
if (generic_interrupt_extension)
generic_interrupt_extension();
run_local_timers();
irq_exit();
set_irq_regs(old_regs);

View file

@ -311,7 +311,7 @@ void pci_iommu_shutdown(void)
amd_iommu_shutdown();
}
/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);
rootfs_initcall(pci_iommu_init);
#ifdef CONFIG_PCI
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */

View file

@ -198,7 +198,6 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
inc_irq_stat(irq_resched_count);
run_local_timers();
/*
* KVM uses this interrupt to force a cpu out of guest mode
*/

View file

@ -38,7 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
#ifdef CONFIG_FRAME_POINTER
return *(unsigned long *)(regs->bp + sizeof(long));
#else
unsigned long *sp = (unsigned long *)regs->sp;
unsigned long *sp =
(unsigned long *)kernel_stack_pointer(regs);
/*
* Return address is either directly at stack pointer
* or above a saved flags. Eflags has bits 22-31 zero,

View file

@ -3,8 +3,16 @@
#include <asm/trampoline.h>
#include <asm/e820.h>
#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
#define __trampinit
#define __trampinitdata
#else
#define __trampinit __cpuinit
#define __trampinitdata __cpuinitdata
#endif
/* ready for x86_64 and x86 */
unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE);
unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE);
void __init reserve_trampoline_memory(void)
{
@ -26,7 +34,7 @@ void __init reserve_trampoline_memory(void)
* bootstrap into the page concerned. The caller
* has made sure it's suitably aligned.
*/
unsigned long __cpuinit setup_trampoline(void)
unsigned long __trampinit setup_trampoline(void)
{
memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
return virt_to_phys(trampoline_base);

View file

@ -32,8 +32,12 @@
#include <asm/segment.h>
#include <asm/processor-flags.h>
#ifdef CONFIG_ACPI_SLEEP
.section .rodata, "a", @progbits
#else
/* We can free up the trampoline after bootup if cpu hotplug is not supported. */
__CPUINITRODATA
#endif
.code16
ENTRY(trampoline_data)

View file

@ -648,7 +648,7 @@ static inline int __init activate_vmi(void)
pv_info.paravirt_enabled = 1;
pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
pv_info.name = "vmi";
pv_info.name = "vmi [deprecated]";
pv_init_ops.patch = vmi_patch;

View file

@ -305,8 +305,8 @@ SECTIONS
#ifdef CONFIG_X86_32
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE");
ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE");
#else
/*
* Per-cpu symbols which need to be offset from __per_cpu_load
@ -319,12 +319,12 @@ INIT_PER_CPU(irq_stack_union);
/*
* Build-time check on the image size:
*/
. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE");
ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE");
#ifdef CONFIG_SMP
. = ASSERT((per_cpu__irq_stack_union == 0),
"irq_stack_union is not at start of per-cpu area");
ASSERT((per_cpu__irq_stack_union == 0),
"irq_stack_union is not at start of per-cpu area");
#endif
#endif /* CONFIG_X86_32 */
@ -332,7 +332,6 @@ INIT_PER_CPU(irq_stack_union);
#ifdef CONFIG_KEXEC
#include <asm/kexec.h>
. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
"kexec control code size is too big");
ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
"kexec control code size is too big");
#endif

View file

@ -70,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
part_stat_inc(cpu, part, merges[rw]);
else {
part_round_stats(cpu, part);
part_inc_in_flight(part);
part_inc_in_flight(part, rw);
}
part_stat_unlock();
@ -1030,9 +1030,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
if (now == part->stamp)
return;
if (part->in_flight) {
if (part_in_flight(part)) {
__part_stat_add(cpu, part, time_in_queue,
part->in_flight * (now - part->stamp));
part_in_flight(part) * (now - part->stamp));
__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
}
part->stamp = now;
@ -1739,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, ticks[rw], duration);
part_round_stats(cpu, part);
part_dec_in_flight(part);
part_dec_in_flight(part, rw);
part_stat_unlock();
}
@ -2492,14 +2492,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
int kblockd_schedule_delayed_work(struct request_queue *q,
struct delayed_work *work,
unsigned long delay)
{
return queue_delayed_work(kblockd_workqueue, work, delay);
}
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *

View file

@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_round_stats(cpu, part);
part_dec_in_flight(part);
part_dec_in_flight(part, rq_data_dir(req));
part_stat_unlock();
}

View file

@ -242,7 +242,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors);
/**
* blk_queue_max_discard_sectors - set max sectors for a single discard
* @q: the request queue for the device
* @max_discard: maximum number of sectors to discard
* @max_discard_sectors: maximum number of sectors to discard
**/
void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors)

View file

@ -359,7 +359,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
max_depth -= 2;
if (!max_depth)
max_depth = 1;
if (q->in_flight[0] > max_depth)
if (q->in_flight[BLK_RW_ASYNC] > max_depth)
return 1;
}

View file

@ -150,7 +150,7 @@ struct cfq_data {
* idle window management
*/
struct timer_list idle_slice_timer;
struct delayed_work unplug_work;
struct work_struct unplug_work;
struct cfq_queue *active_queue;
struct cfq_io_context *active_cic;
@ -230,7 +230,7 @@ CFQ_CFQQ_FNS(coop);
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
struct io_context *, gfp_t);
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
struct io_context *);
@ -241,40 +241,35 @@ static inline int rq_in_driver(struct cfq_data *cfqd)
}
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
int is_sync)
bool is_sync)
{
return cic->cfqq[!!is_sync];
return cic->cfqq[is_sync];
}
static inline void cic_set_cfqq(struct cfq_io_context *cic,
struct cfq_queue *cfqq, int is_sync)
struct cfq_queue *cfqq, bool is_sync)
{
cic->cfqq[!!is_sync] = cfqq;
cic->cfqq[is_sync] = cfqq;
}
/*
* We regard a request as SYNC, if it's either a read or has the SYNC bit
* set (in which case it could also be direct WRITE).
*/
static inline int cfq_bio_sync(struct bio *bio)
static inline bool cfq_bio_sync(struct bio *bio)
{
if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
return 1;
return 0;
return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
}
/*
* scheduler run of queue, if there are requests pending and no one in the
* driver that will restart queueing
*/
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
unsigned long delay)
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
if (cfqd->busy_queues) {
cfq_log(cfqd, "schedule dispatch");
kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
delay);
kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
}
}
@ -290,7 +285,7 @@ static int cfq_queue_empty(struct request_queue *q)
* if a queue is marked sync and has sync io queued. A sync queue with async
* io only, should not get full sync slice length.
*/
static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
unsigned short prio)
{
const int base_slice = cfqd->cfq_slice[sync];
@ -318,7 +313,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* isn't valid until the first request from the dispatch is activated
* and the slice time set.
*/
static inline int cfq_slice_used(struct cfq_queue *cfqq)
static inline bool cfq_slice_used(struct cfq_queue *cfqq)
{
if (cfq_cfqq_slice_new(cfqq))
return 0;
@ -493,7 +488,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
* we will service the queues.
*/
static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
int add_front)
bool add_front)
{
struct rb_node **p, *parent;
struct cfq_queue *__cfqq;
@ -509,11 +504,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
} else
rb_key += jiffies;
} else if (!add_front) {
/*
* Get our rb key offset. Subtract any residual slice
* value carried from last service. A negative resid
* count indicates slice overrun, and this should position
* the next service time further away in the tree.
*/
rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
rb_key += cfqq->slice_resid;
rb_key -= cfqq->slice_resid;
cfqq->slice_resid = 0;
} else
rb_key = 0;
} else {
rb_key = -HZ;
__cfqq = cfq_rb_first(&cfqd->service_tree);
rb_key += __cfqq ? __cfqq->rb_key : jiffies;
}
if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
/*
@ -547,7 +551,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
n = &(*p)->rb_left;
else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
n = &(*p)->rb_right;
else if (rb_key < __cfqq->rb_key)
else if (time_before(rb_key, __cfqq->rb_key))
n = &(*p)->rb_left;
else
n = &(*p)->rb_right;
@ -827,8 +831,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
time_before(next->start_time, rq->start_time))
time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
list_move(&rq->queuelist, &next->queuelist);
rq_set_fifo_time(rq, rq_fifo_time(next));
}
cfq_remove_request(next);
}
@ -844,7 +850,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
* Disallow merge of a sync bio into an async request.
*/
if (cfq_bio_sync(bio) && !rq_is_sync(rq))
return 0;
return false;
/*
* Lookup the cfqq that this bio will be queued with. Allow
@ -852,13 +858,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
*/
cic = cfq_cic_lookup(cfqd, current->io_context);
if (!cic)
return 0;
return false;
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
if (cfqq == RQ_CFQQ(rq))
return 1;
return 0;
return cfqq == RQ_CFQQ(rq);
}
static void __cfq_set_active_queue(struct cfq_data *cfqd,
@ -886,7 +889,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
*/
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
int timed_out)
bool timed_out)
{
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
@ -914,7 +917,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
}
static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
{
struct cfq_queue *cfqq = cfqd->active_queue;
@ -1026,7 +1029,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
*/
static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
struct cfq_queue *cur_cfqq,
int probe)
bool probe)
{
struct cfq_queue *cfqq;
@ -1090,6 +1093,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
if (!cic || !atomic_read(&cic->ioc->nr_tasks))
return;
/*
* If our average think time is larger than the remaining time
* slice, then don't idle. This avoids overrunning the allotted
* time slice.
*/
if (sample_valid(cic->ttime_samples) &&
(cfqq->slice_end - jiffies < cic->ttime_mean))
return;
cfq_mark_cfqq_wait_request(cfqq);
/*
@ -1129,9 +1141,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
*/
static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
{
struct cfq_data *cfqd = cfqq->cfqd;
struct request *rq;
int fifo;
struct request *rq = NULL;
if (cfq_cfqq_fifo_expire(cfqq))
return NULL;
@ -1141,13 +1151,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
if (list_empty(&cfqq->fifo))
return NULL;
fifo = cfq_cfqq_sync(cfqq);
rq = rq_entry_fifo(cfqq->fifo.next);
if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
if (time_before(jiffies, rq_fifo_time(rq)))
rq = NULL;
cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
return rq;
}
@ -1248,67 +1256,21 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
return dispatched;
}
/*
* Dispatch a request from cfqq, moving them to the request queue
* dispatch list.
*/
static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
struct request *rq;
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
/*
* follow expired path, else get first next available
*/
rq = cfq_check_fifo(cfqq);
if (!rq)
rq = cfqq->next_rq;
/*
* insert request into driver dispatch list
*/
cfq_dispatch_insert(cfqd->queue, rq);
if (!cfqd->active_cic) {
struct cfq_io_context *cic = RQ_CIC(rq);
atomic_long_inc(&cic->ioc->refcount);
cfqd->active_cic = cic;
}
}
/*
* Find the cfqq that we need to service and move a request from that to the
* dispatch list
*/
static int cfq_dispatch_requests(struct request_queue *q, int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq;
unsigned int max_dispatch;
if (!cfqd->busy_queues)
return 0;
if (unlikely(force))
return cfq_forced_dispatch(cfqd);
cfqq = cfq_select_queue(cfqd);
if (!cfqq)
return 0;
/*
* Drain async requests before we start sync IO
*/
if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
return 0;
return false;
/*
* If this is an async queue and we have sync IO in flight, let it wait
*/
if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
return 0;
return false;
max_dispatch = cfqd->cfq_quantum;
if (cfq_class_idle(cfqq))
@ -1322,13 +1284,13 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
* idle queue must always only have a single IO in flight
*/
if (cfq_class_idle(cfqq))
return 0;
return false;
/*
* We have other queues, don't allow more IO from this one
*/
if (cfqd->busy_queues > 1)
return 0;
return false;
/*
* Sole queue user, allow bigger slice
@ -1352,13 +1314,72 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
max_dispatch = depth;
}
if (cfqq->dispatched >= max_dispatch)
/*
* If we're below the current max, allow a dispatch
*/
return cfqq->dispatched < max_dispatch;
}
/*
* Dispatch a request from cfqq, moving them to the request queue
* dispatch list.
*/
static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
struct request *rq;
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
if (!cfq_may_dispatch(cfqd, cfqq))
return false;
/*
* follow expired path, else get first next available
*/
rq = cfq_check_fifo(cfqq);
if (!rq)
rq = cfqq->next_rq;
/*
* insert request into driver dispatch list
*/
cfq_dispatch_insert(cfqd->queue, rq);
if (!cfqd->active_cic) {
struct cfq_io_context *cic = RQ_CIC(rq);
atomic_long_inc(&cic->ioc->refcount);
cfqd->active_cic = cic;
}
return true;
}
/*
* Find the cfqq that we need to service and move a request from that to the
* dispatch list
*/
static int cfq_dispatch_requests(struct request_queue *q, int force)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq;
if (!cfqd->busy_queues)
return 0;
if (unlikely(force))
return cfq_forced_dispatch(cfqd);
cfqq = cfq_select_queue(cfqd);
if (!cfqq)
return 0;
/*
* Dispatch a request from this cfqq
* Dispatch a request from this cfqq, if it is allowed
*/
cfq_dispatch_request(cfqd, cfqq);
if (!cfq_dispatch_request(cfqd, cfqq))
return 0;
cfqq->slice_dispatch++;
cfq_clear_cfqq_must_dispatch(cfqq);
@ -1399,7 +1420,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
if (unlikely(cfqd->active_queue == cfqq)) {
__cfq_slice_expired(cfqd, cfqq, 0);
cfq_schedule_dispatch(cfqd, 0);
cfq_schedule_dispatch(cfqd);
}
kmem_cache_free(cfq_pool, cfqq);
@ -1494,7 +1515,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
if (unlikely(cfqq == cfqd->active_queue)) {
__cfq_slice_expired(cfqd, cfqq, 0);
cfq_schedule_dispatch(cfqd, 0);
cfq_schedule_dispatch(cfqd);
}
cfq_put_queue(cfqq);
@ -1658,7 +1679,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
}
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
pid_t pid, int is_sync)
pid_t pid, bool is_sync)
{
RB_CLEAR_NODE(&cfqq->rb_node);
RB_CLEAR_NODE(&cfqq->p_node);
@ -1678,7 +1699,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
static struct cfq_queue *
cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
struct io_context *ioc, gfp_t gfp_mask)
{
struct cfq_queue *cfqq, *new_cfqq = NULL;
@ -1742,7 +1763,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
}
static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
gfp_t gfp_mask)
{
const int ioprio = task_ioprio(ioc);
@ -1977,7 +1998,10 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
(!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {
if (cic->ttime_mean > cfqd->cfq_slice_idle)
unsigned int slice_idle = cfqd->cfq_slice_idle;
if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
if (cic->ttime_mean > slice_idle)
enable_idle = 0;
else
enable_idle = 1;
@ -1996,7 +2020,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* Check if new_cfqq should preempt the currently active queue. Return 0 for
* no or if we aren't sure, a 1 will cause a preempt.
*/
static int
static bool
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
struct request *rq)
{
@ -2004,48 +2028,48 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
cfqq = cfqd->active_queue;
if (!cfqq)
return 0;
return false;
if (cfq_slice_used(cfqq))
return 1;
return true;
if (cfq_class_idle(new_cfqq))
return 0;
return false;
if (cfq_class_idle(cfqq))
return 1;
return true;
/*
* if the new request is sync, but the currently running queue is
* not, let the sync request have priority.
*/
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
return 1;
return true;
/*
* So both queues are sync. Let the new request get disk time if
* it's a metadata request and the current queue is doing regular IO.
*/
if (rq_is_meta(rq) && !cfqq->meta_pending)
return 1;
return false;
/*
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
*/
if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
return 1;
return true;
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
return 0;
return false;
/*
* if this request is as-good as one we would expect from the
* current cfqq, let it preempt
*/
if (cfq_rq_close(cfqd, rq))
return 1;
return true;
return 0;
return false;
}
/*
@ -2130,6 +2154,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
cfq_add_rq_rb(rq);
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_rq_enqueued(cfqd, cfqq, rq);
@ -2211,7 +2236,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
}
if (!rq_in_driver(cfqd))
cfq_schedule_dispatch(cfqd, 0);
cfq_schedule_dispatch(cfqd);
}
/*
@ -2309,7 +2334,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_io_context *cic;
const int rw = rq_data_dir(rq);
const int is_sync = rq_is_sync(rq);
const bool is_sync = rq_is_sync(rq);
struct cfq_queue *cfqq;
unsigned long flags;
@ -2341,7 +2366,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
if (cic)
put_io_context(cic->ioc);
cfq_schedule_dispatch(cfqd, 0);
cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(q->queue_lock, flags);
cfq_log(cfqd, "set_request fail");
return 1;
@ -2350,7 +2375,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
static void cfq_kick_queue(struct work_struct *work)
{
struct cfq_data *cfqd =
container_of(work, struct cfq_data, unplug_work.work);
container_of(work, struct cfq_data, unplug_work);
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
@ -2404,7 +2429,7 @@ static void cfq_idle_slice_timer(unsigned long data)
expire:
cfq_slice_expired(cfqd, timed_out);
out_kick:
cfq_schedule_dispatch(cfqd, 0);
cfq_schedule_dispatch(cfqd);
out_cont:
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}
@ -2412,7 +2437,7 @@ static void cfq_idle_slice_timer(unsigned long data)
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
{
del_timer_sync(&cfqd->idle_slice_timer);
cancel_delayed_work_sync(&cfqd->unplug_work);
cancel_work_sync(&cfqd->unplug_work);
}
static void cfq_put_async_queues(struct cfq_data *cfqd)
@ -2494,7 +2519,7 @@ static void *cfq_init_queue(struct request_queue *q)
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
cfqd->idle_slice_timer.data = (unsigned long) cfqd;
INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
cfqd->cfq_quantum = cfq_quantum;
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];

View file

@ -1059,9 +1059,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
return count;
strlcpy(elevator_name, name, sizeof(elevator_name));
strstrip(elevator_name);
e = elevator_get(elevator_name);
e = elevator_get(strstrip(elevator_name));
if (!e) {
printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
return -EINVAL;

View file

@ -869,6 +869,7 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
#ifdef CONFIG_FAIL_MAKE_REQUEST
static struct device_attribute dev_attr_fail =
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@ -888,6 +889,7 @@ static struct attribute *disk_attrs[] = {
&dev_attr_alignment_offset.attr,
&dev_attr_capability.attr,
&dev_attr_stat.attr,
&dev_attr_inflight.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST
&dev_attr_fail.attr,
#endif
@ -1053,7 +1055,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
part_stat_read(hd, merges[1]),
(unsigned long long)part_stat_read(hd, sectors[1]),
jiffies_to_msecs(part_stat_read(hd, ticks[1])),
hd->in_flight,
part_in_flight(hd),
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
jiffies_to_msecs(part_stat_read(hd, time_in_queue))
);

View file

@ -68,6 +68,12 @@ MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
MODULE_VERSION("3.6.20");
MODULE_LICENSE("GPL");
static int cciss_allow_hpsa;
module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(cciss_allow_hpsa,
"Prevent cciss driver from accessing hardware known to be "
" supported by the hpsa driver");
#include "cciss_cmd.h"
#include "cciss.h"
#include <linux/cciss_ioctl.h>
@ -101,8 +107,6 @@ static const struct pci_device_id cciss_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
};
@ -123,8 +127,6 @@ static struct board_type products[] = {
{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
{0x40910E11, "Smart Array 6i", &SA5_access},
{0x3225103C, "Smart Array P600", &SA5_access},
{0x3223103C, "Smart Array P800", &SA5_access},
{0x3234103C, "Smart Array P400", &SA5_access},
{0x3235103C, "Smart Array P400i", &SA5_access},
{0x3211103C, "Smart Array E200i", &SA5_access},
{0x3212103C, "Smart Array E200", &SA5_access},
@ -132,6 +134,10 @@ static struct board_type products[] = {
{0x3214103C, "Smart Array E200i", &SA5_access},
{0x3215103C, "Smart Array E200i", &SA5_access},
{0x3237103C, "Smart Array E500", &SA5_access},
/* controllers below this line are also supported by the hpsa driver. */
#define HPSA_BOUNDARY 0x3223103C
{0x3223103C, "Smart Array P800", &SA5_access},
{0x3234103C, "Smart Array P400", &SA5_access},
{0x323D103C, "Smart Array P700m", &SA5_access},
{0x3241103C, "Smart Array P212", &SA5_access},
{0x3243103C, "Smart Array P410", &SA5_access},
@ -140,7 +146,6 @@ static struct board_type products[] = {
{0x3249103C, "Smart Array P812", &SA5_access},
{0x324A103C, "Smart Array P712m", &SA5_access},
{0x324B103C, "Smart Array P711m", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
/* How long to wait (in milliseconds) for board to go into simple mode */
@ -3754,7 +3759,27 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
__u64 cfg_offset;
__u32 cfg_base_addr;
__u64 cfg_base_addr_index;
int i, err;
int i, prod_index, err;
subsystem_vendor_id = pdev->subsystem_vendor;
subsystem_device_id = pdev->subsystem_device;
board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
subsystem_vendor_id);
for (i = 0; i < ARRAY_SIZE(products); i++) {
/* Stand aside for hpsa driver on request */
if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
return -ENODEV;
if (board_id == products[i].board_id)
break;
}
prod_index = i;
if (prod_index == ARRAY_SIZE(products)) {
dev_warn(&pdev->dev,
"unrecognized board ID: 0x%08lx, ignoring.\n",
(unsigned long) board_id);
return -ENODEV;
}
/* check to see if controller has been disabled */
/* BEFORE trying to enable it */
@ -3778,11 +3803,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
return err;
}
subsystem_vendor_id = pdev->subsystem_vendor;
subsystem_device_id = pdev->subsystem_device;
board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
subsystem_vendor_id);
#ifdef CCISS_DEBUG
printk("command = %x\n", command);
printk("irq = %x\n", pdev->irq);
@ -3868,14 +3888,9 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
* leave a little room for ioctl calls.
*/
c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
for (i = 0; i < ARRAY_SIZE(products); i++) {
if (board_id == products[i].board_id) {
c->product_name = products[i].product_name;
c->access = *(products[i].access);
c->nr_cmds = c->max_commands - 4;
break;
}
}
c->product_name = products[prod_index].product_name;
c->access = *(products[prod_index].access);
c->nr_cmds = c->max_commands - 4;
if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
(readb(&c->cfgtable->Signature[1]) != 'I') ||
(readb(&c->cfgtable->Signature[2]) != 'S') ||
@ -3884,27 +3899,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
err = -ENODEV;
goto err_out_free_res;
}
/* We didn't find the controller in our list. We know the
* signature is valid. If it's an HP device let's try to
* bind to the device and fire it up. Otherwise we bail.
*/
if (i == ARRAY_SIZE(products)) {
if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
c->product_name = products[i-1].product_name;
c->access = *(products[i-1].access);
c->nr_cmds = c->max_commands - 4;
printk(KERN_WARNING "cciss: This is an unknown "
"Smart Array controller.\n"
"cciss: Please update to the latest driver "
"available from www.hp.com.\n");
} else {
printk(KERN_WARNING "cciss: Sorry, I don't know how"
" to access the Smart Array controller %08lx\n"
, (unsigned long)board_id);
err = -ENODEV;
goto err_out_free_res;
}
}
#ifdef CONFIG_X86
{
/* Need to enable prefetch in the SCSI core for 6400 in x86 */
@ -4254,7 +4248,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
mutex_init(&hba[i]->busy_shutting_down);
if (cciss_pci_init(hba[i], pdev) != 0)
goto clean0;
goto clean_no_release_regions;
sprintf(hba[i]->devname, "cciss%d", i);
hba[i]->ctlr = i;
@ -4391,13 +4385,14 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
clean1:
cciss_destroy_hba_sysfs_entry(hba[i]);
clean0:
pci_release_regions(pdev);
clean_no_release_regions:
hba[i]->busy_initializing = 0;
/*
* Deliberately omit pci_disable_device(): it does something nasty to
* Smart Array controllers that pci_enable_device does not undo
*/
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
free_hba(i);
return -1;

View file

@ -43,6 +43,7 @@
#define RTC_VERSION "1.07"
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/fcntl.h>

View file

@ -74,6 +74,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/sysctl.h>
#include <linux/wait.h>
#include <linux/bcd.h>

View file

@ -36,6 +36,7 @@
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/input.h>
#include <linux/pci.h>
#include <linux/init.h>

View file

@ -402,28 +402,26 @@ static void flush_to_ldisc(struct work_struct *work)
container_of(work, struct tty_struct, buf.work.work);
unsigned long flags;
struct tty_ldisc *disc;
struct tty_buffer *tbuf, *head;
char *char_buf;
unsigned char *flag_buf;
disc = tty_ldisc_ref(tty);
if (disc == NULL) /* !TTY_LDISC */
return;
spin_lock_irqsave(&tty->buf.lock, flags);
/* So we know a flush is running */
set_bit(TTY_FLUSHING, &tty->flags);
head = tty->buf.head;
if (head != NULL) {
tty->buf.head = NULL;
for (;;) {
int count = head->commit - head->read;
if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
struct tty_buffer *head;
while ((head = tty->buf.head) != NULL) {
int count;
char *char_buf;
unsigned char *flag_buf;
count = head->commit - head->read;
if (!count) {
if (head->next == NULL)
break;
tbuf = head;
head = head->next;
tty_buffer_free(tty, tbuf);
tty->buf.head = head->next;
tty_buffer_free(tty, head);
continue;
}
/* Ldisc or user is trying to flush the buffers
@ -445,9 +443,9 @@ static void flush_to_ldisc(struct work_struct *work)
flag_buf, count);
spin_lock_irqsave(&tty->buf.lock, flags);
}
/* Restore the queue head */
tty->buf.head = head;
clear_bit(TTY_FLUSHING, &tty->flags);
}
/* We may have a deferred request to flush the input buffer,
if so pull the chain under the lock and empty the queue */
if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
@ -455,7 +453,6 @@ static void flush_to_ldisc(struct work_struct *work)
clear_bit(TTY_FLUSHPENDING, &tty->flags);
wake_up(&tty->read_wait);
}
clear_bit(TTY_FLUSHING, &tty->flags);
spin_unlock_irqrestore(&tty->buf.lock, flags);
tty_ldisc_deref(disc);
@ -471,7 +468,7 @@ static void flush_to_ldisc(struct work_struct *work)
*/
void tty_flush_to_ldisc(struct tty_struct *tty)
{
flush_to_ldisc(&tty->buf.work.work);
flush_delayed_work(&tty->buf.work);
}
/**

View file

@ -188,14 +188,7 @@ static struct fw_device *target_device(struct sbp2_target *tgt)
/* Impossible login_id, to detect logout attempt before successful login */
#define INVALID_LOGIN_ID 0x10000
/*
* Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
* provided in the config rom. Most devices do provide a value, which
* we'll use for login management orbs, but with some sane limits.
*/
#define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */
#define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */
#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */
#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */
#define SBP2_ORB_NULL 0x80000000
#define SBP2_RETRY_LIMIT 0xf /* 15 retries */
#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */
@ -1034,7 +1027,6 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
{
struct fw_csr_iterator ci;
int key, value;
unsigned int timeout;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) {
@ -1059,17 +1051,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
case SBP2_CSR_UNIT_CHARACTERISTICS:
/* the timeout value is stored in 500ms units */
timeout = ((unsigned int) value >> 8 & 0xff) * 500;
timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT);
tgt->mgt_orb_timeout =
min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT);
if (timeout > tgt->mgt_orb_timeout)
fw_notify("%s: config rom contains %ds "
"management ORB timeout, limiting "
"to %ds\n", tgt->bus_id,
timeout / 1000,
tgt->mgt_orb_timeout / 1000);
tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500;
break;
case SBP2_CSR_LOGICAL_UNIT_NUMBER:
@ -1087,6 +1069,22 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
return 0;
}
/*
* Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
* provided in the config rom. Most devices do provide a value, which
* we'll use for login management orbs, but with some sane limits.
*/
static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt)
{
unsigned int timeout = tgt->mgt_orb_timeout;
if (timeout > 40000)
fw_notify("%s: %ds mgt_ORB_timeout limited to 40s\n",
tgt->bus_id, timeout / 1000);
tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000);
}
static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
u32 firmware_revision)
{
@ -1171,6 +1169,7 @@ static int sbp2_probe(struct device *dev)
&firmware_revision) < 0)
goto fail_tgt_put;
sbp2_clamp_management_orb_timeout(tgt);
sbp2_init_workarounds(tgt, model, firmware_revision);
/*

View file

@ -1066,7 +1066,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
* @type: HID report type (HID_*_REPORT)
* @data: report contents
* @size: size of data parameter
* @interrupt: called from atomic?
* @interrupt: distinguish between interrupt and control transfers
*
* This is data entry for lower layers.
*/

View file

@ -132,12 +132,12 @@ static struct hid_driver twinhan_driver = {
.input_mapping = twinhan_input_mapping,
};
static int twinhan_init(void)
static int __init twinhan_init(void)
{
return hid_register_driver(&twinhan_driver);
}
static void twinhan_exit(void)
static void __exit twinhan_exit(void)
{
hid_unregister_driver(&twinhan_driver);
}

View file

@ -48,10 +48,9 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
char *report;
DECLARE_WAITQUEUE(wait, current);
mutex_lock(&list->read_mutex);
while (ret == 0) {
mutex_lock(&list->read_mutex);
if (list->head == list->tail) {
add_wait_queue(&list->hidraw->wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);

View file

@ -130,7 +130,7 @@ struct mapped_device {
/*
* A list of ios that arrived while we were suspended.
*/
atomic_t pending;
atomic_t pending[2];
wait_queue_head_t wait;
struct work_struct work;
struct bio_list deferred;
@ -453,13 +453,14 @@ static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
int cpu;
int rw = bio_data_dir(io->bio);
io->start_time = jiffies;
cpu = part_stat_lock();
part_round_stats(cpu, &dm_disk(md)->part0);
part_stat_unlock();
dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
}
static void end_io_acct(struct dm_io *io)
@ -479,8 +480,9 @@ static void end_io_acct(struct dm_io *io)
* After this is decremented the bio must not be touched if it is
* a barrier.
*/
dm_disk(md)->part0.in_flight = pending =
atomic_dec_return(&md->pending);
dm_disk(md)->part0.in_flight[rw] = pending =
atomic_dec_return(&md->pending[rw]);
pending += atomic_read(&md->pending[rw^0x1]);
/* nudge anyone waiting on suspend queue */
if (!pending)
@ -1785,7 +1787,8 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->disk)
goto bad_disk;
atomic_set(&md->pending, 0);
atomic_set(&md->pending[0], 0);
atomic_set(&md->pending[1], 0);
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
@ -2088,7 +2091,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
break;
}
spin_unlock_irqrestore(q->queue_lock, flags);
} else if (!atomic_read(&md->pending))
} else if (!atomic_read(&md->pending[0]) &&
!atomic_read(&md->pending[1]))
break;
if (interruptible == TASK_INTERRUPTIBLE &&

View file

@ -480,7 +480,6 @@ static int
add_children(struct twl4030_platform_data *pdata, unsigned long features)
{
struct device *child;
struct device *usb_transceiver = NULL;
if (twl_has_bci() && pdata->bci && !(features & TPS_SUBSET)) {
child = add_child(3, "twl4030_bci",
@ -532,16 +531,61 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
}
if (twl_has_usb() && pdata->usb) {
static struct regulator_consumer_supply usb1v5 = {
.supply = "usb1v5",
};
static struct regulator_consumer_supply usb1v8 = {
.supply = "usb1v8",
};
static struct regulator_consumer_supply usb3v1 = {
.supply = "usb3v1",
};
/* First add the regulators so that they can be used by transceiver */
if (twl_has_regulator()) {
/* this is a template that gets copied */
struct regulator_init_data usb_fixed = {
.constraints.valid_modes_mask =
REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.constraints.valid_ops_mask =
REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
};
child = add_regulator_linked(TWL4030_REG_VUSB1V5,
&usb_fixed, &usb1v5, 1);
if (IS_ERR(child))
return PTR_ERR(child);
child = add_regulator_linked(TWL4030_REG_VUSB1V8,
&usb_fixed, &usb1v8, 1);
if (IS_ERR(child))
return PTR_ERR(child);
child = add_regulator_linked(TWL4030_REG_VUSB3V1,
&usb_fixed, &usb3v1, 1);
if (IS_ERR(child))
return PTR_ERR(child);
}
child = add_child(0, "twl4030_usb",
pdata->usb, sizeof(*pdata->usb),
true,
/* irq0 = USB_PRES, irq1 = USB */
pdata->irq_base + 8 + 2, pdata->irq_base + 4);
if (IS_ERR(child))
return PTR_ERR(child);
/* we need to connect regulators to this transceiver */
usb_transceiver = child;
if (twl_has_regulator() && child) {
usb1v5.dev = child;
usb1v8.dev = child;
usb3v1.dev = child;
}
}
if (twl_has_watchdog()) {
@ -580,47 +624,6 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
return PTR_ERR(child);
}
if (twl_has_regulator() && usb_transceiver) {
static struct regulator_consumer_supply usb1v5 = {
.supply = "usb1v5",
};
static struct regulator_consumer_supply usb1v8 = {
.supply = "usb1v8",
};
static struct regulator_consumer_supply usb3v1 = {
.supply = "usb3v1",
};
/* this is a template that gets copied */
struct regulator_init_data usb_fixed = {
.constraints.valid_modes_mask =
REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.constraints.valid_ops_mask =
REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
};
usb1v5.dev = usb_transceiver;
usb1v8.dev = usb_transceiver;
usb3v1.dev = usb_transceiver;
child = add_regulator_linked(TWL4030_REG_VUSB1V5, &usb_fixed,
&usb1v5, 1);
if (IS_ERR(child))
return PTR_ERR(child);
child = add_regulator_linked(TWL4030_REG_VUSB1V8, &usb_fixed,
&usb1v8, 1);
if (IS_ERR(child))
return PTR_ERR(child);
child = add_regulator_linked(TWL4030_REG_VUSB3V1, &usb_fixed,
&usb3v1, 1);
if (IS_ERR(child))
return PTR_ERR(child);
}
/* maybe add LDOs that are omitted on cost-reduced parts */
if (twl_has_regulator() && !(features & TPS_SUBSET)) {
child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2);

View file

@ -693,7 +693,7 @@ static int pxamci_probe(struct platform_device *pdev)
if (gpio_is_valid(gpio_ro)) {
ret = gpio_request(gpio_ro, "mmc card read only");
if (ret) {
dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_power);
dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
goto err_gpio_ro;
}
gpio_direction_input(gpio_ro);
@ -701,7 +701,7 @@ static int pxamci_probe(struct platform_device *pdev)
if (gpio_is_valid(gpio_cd)) {
ret = gpio_request(gpio_cd, "mmc card detect");
if (ret) {
dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_power);
dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
goto err_gpio_cd;
}
gpio_direction_input(gpio_cd);

View file

@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>

View file

@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fcntl.h>

View file

@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fcntl.h>

View file

@ -35,12 +35,23 @@ static size_t buffer_pos;
/* atomic_t because wait_event checks it outside of buffer_mutex */
static atomic_t buffer_ready = ATOMIC_INIT(0);
/* Add an entry to the event buffer. When we
* get near to the end we wake up the process
* sleeping on the read() of the file.
/*
* Add an entry to the event buffer. When we get near to the end we
* wake up the process sleeping on the read() of the file. To protect
* the event_buffer this function may only be called when buffer_mutex
* is set.
*/
void add_event_entry(unsigned long value)
{
/*
* This shouldn't happen since all workqueues or handlers are
* canceled or flushed before the event buffer is freed.
*/
if (!event_buffer) {
WARN_ON_ONCE(1);
return;
}
if (buffer_pos == buffer_size) {
atomic_inc(&oprofile_stats.event_lost_overflow);
return;
@ -69,7 +80,6 @@ void wake_up_buffer_waiter(void)
int alloc_event_buffer(void)
{
int err = -ENOMEM;
unsigned long flags;
spin_lock_irqsave(&oprofilefs_lock, flags);
@ -80,21 +90,22 @@ int alloc_event_buffer(void)
if (buffer_watershed >= buffer_size)
return -EINVAL;
buffer_pos = 0;
event_buffer = vmalloc(sizeof(unsigned long) * buffer_size);
if (!event_buffer)
goto out;
return -ENOMEM;
err = 0;
out:
return err;
return 0;
}
void free_event_buffer(void)
{
mutex_lock(&buffer_mutex);
vfree(event_buffer);
buffer_pos = 0;
event_buffer = NULL;
mutex_unlock(&buffer_mutex);
}
@ -167,6 +178,12 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf,
mutex_lock(&buffer_mutex);
/* May happen if the buffer is freed during pending reads. */
if (!event_buffer) {
retval = -EINTR;
goto out;
}
atomic_set(&buffer_ready, 0);
retval = -EFAULT;

View file

@ -354,6 +354,7 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_reserved_memory *rmrr;
struct acpi_dmar_atsr *atsr;
struct acpi_dmar_rhsa *rhsa;
switch (header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
@ -375,6 +376,12 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
atsr = container_of(header, struct acpi_dmar_atsr, header);
printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
break;
case ACPI_DMAR_HARDWARE_AFFINITY:
rhsa = container_of(header, struct acpi_dmar_rhsa, header);
printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
(unsigned long long)rhsa->base_address,
rhsa->proximity_domain);
break;
}
}
@ -459,9 +466,13 @@ parse_dmar_table(void)
ret = dmar_parse_one_atsr(entry_header);
#endif
break;
case ACPI_DMAR_HARDWARE_AFFINITY:
/* We don't do anything with RHSA (yet?) */
break;
default:
printk(KERN_WARNING PREFIX
"Unknown DMAR structure type\n");
"Unknown DMAR structure type %d\n",
entry_header->type);
ret = 0; /* for forward compatibility */
break;
}

View file

@ -32,6 +32,7 @@
#include <asm/io.h> /* for read? and write? functions */
#include <linux/delay.h> /* for delays */
#include <linux/mutex.h>
#include <linux/sched.h> /* for signal_pending() */
#define MY_NAME "cpqphp"

View file

@ -48,6 +48,7 @@
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
#define IOAPIC_RANGE_START (0xfee00000)
#define IOAPIC_RANGE_END (0xfeefffff)
@ -94,6 +95,7 @@ static inline unsigned long virt_to_dma_pfn(void *p)
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
/*
@ -1934,6 +1936,9 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
}
static int iommu_identity_mapping;
#define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long long start,
@ -2151,8 +2156,14 @@ static int domain_add_dev_info(struct dmar_domain *domain,
static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
{
if (iommu_identity_mapping == 2)
return IS_GFX_DEVICE(pdev);
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
return 1;
if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
return 1;
if (!(iommu_identity_mapping & IDENTMAP_ALL))
return 0;
/*
* We want to start off with all devices in the 1:1 domain, and
@ -2332,11 +2343,14 @@ int __init init_dmars(void)
}
if (iommu_pass_through)
iommu_identity_mapping = 1;
iommu_identity_mapping |= IDENTMAP_ALL;
#ifdef CONFIG_DMAR_BROKEN_GFX_WA
else
iommu_identity_mapping = 2;
iommu_identity_mapping |= IDENTMAP_GFX;
#endif
check_tylersburg_isoch();
/*
* If pass through is not set or not enabled, setup context entries for
* identity mappings for rmrr, gfx, and isa and may fall back to static
@ -3670,3 +3684,61 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
/* On Tylersburg chipsets, some BIOSes have been known to enable the
ISOCH DMAR unit for the Azalia sound device, but not give it any
TLB entries, which causes it to deadlock. Check for that. We do
this in a function called from init_dmars(), instead of in a PCI
quirk, because we don't want to print the obnoxious "BIOS broken"
message if VT-d is actually disabled.
*/
static void __init check_tylersburg_isoch(void)
{
struct pci_dev *pdev;
uint32_t vtisochctrl;
/* If there's no Azalia in the system anyway, forget it. */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
if (!pdev)
return;
pci_dev_put(pdev);
/* System Management Registers. Might be hidden, in which case
we can't do the sanity check. But that's OK, because the
known-broken BIOSes _don't_ actually hide it, so far. */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
if (!pdev)
return;
if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
pci_dev_put(pdev);
return;
}
pci_dev_put(pdev);
/* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
if (vtisochctrl & 1)
return;
/* Drop all bits other than the number of TLB entries */
vtisochctrl &= 0x1c;
/* If we have the recommended number of TLB entries (16), fine. */
if (vtisochctrl == 0x10)
return;
/* Zero TLB entries? You get to ride the short bus to school. */
if (!vtisochctrl) {
WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
iommu_identity_mapping |= IDENTMAP_AZALIA;
return;
}
printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
vtisochctrl);
}

View file

@ -513,7 +513,11 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(PCI_PM_D2_DELAY);
dev->current_state = state;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
if (dev->current_state != state && printk_ratelimit())
dev_info(&dev->dev, "Refused to change power state, "
"currently in D%d\n", dev->current_state);
/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
* INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
@ -2542,10 +2546,10 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
/**
* pci_set_vga_state - set VGA decode state on device and parents if requested
* @dev the PCI device
* @decode - true = enable decoding, false = disable decoding
* @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
* @change_bridge - traverse ancestors and change bridges
* @dev: the PCI device
* @decode: true = enable decoding, false = disable decoding
* @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
* @change_bridge: traverse ancestors and change bridges
*/
int pci_set_vga_state(struct pci_dev *dev, bool decode,
unsigned int command_bits, bool change_bridge)
@ -2719,17 +2723,6 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
return 1;
}
static int __devinit pci_init(void)
{
struct pci_dev *dev = NULL;
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
pci_fixup_device(pci_fixup_final, dev);
}
return 0;
}
static int __init pci_setup(char *str)
{
while (str) {
@ -2767,8 +2760,6 @@ static int __init pci_setup(char *str)
}
early_param("pci", pci_setup);
device_initcall(pci_init);
EXPORT_SYMBOL(pci_reenable_device);
EXPORT_SYMBOL(pci_enable_device_io);
EXPORT_SYMBOL(pci_enable_device_mem);

View file

@ -53,7 +53,7 @@ static struct pci_error_handlers aer_error_handlers = {
static struct pcie_port_service_driver aerdriver = {
.name = "aer",
.port_type = PCIE_ANY_PORT,
.port_type = PCIE_RC_PORT,
.service = PCIE_PORT_SERVICE_AER,
.probe = aer_probe,

View file

@ -30,7 +30,6 @@ MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/* global data */
static const char device_name[] = "pcieport-driver";
static int pcie_portdrv_restore_config(struct pci_dev *dev)
{
@ -262,7 +261,7 @@ static struct pci_error_handlers pcie_portdrv_err_handler = {
};
static struct pci_driver pcie_portdriver = {
.name = (char *)device_name,
.name = "pcieport",
.id_table = &port_pci_ids[0],
.probe = pcie_portdrv_probe,

View file

@ -670,6 +670,25 @@ static void __devinit quirk_vt8235_acpi(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
/*
* TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back:
* Disable fast back-to-back on the secondary bus segment
*/
static void __devinit quirk_xio2000a(struct pci_dev *dev)
{
struct pci_dev *pdev;
u16 command;
dev_warn(&dev->dev, "TI XIO2000a quirk detected; "
"secondary bus fast back-to-back transfers disabled\n");
list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
pci_read_config_word(pdev, PCI_COMMAND, &command);
if (command & PCI_COMMAND_FAST_BACK)
pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
quirk_xio2000a);
#ifdef CONFIG_X86_IO_APIC
@ -2572,6 +2591,19 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
}
pci_do_fixups(dev, start, end);
}
static int __init pci_apply_final_quirks(void)
{
struct pci_dev *dev = NULL;
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
pci_fixup_device(pci_fixup_final, dev);
}
return 0;
}
fs_initcall_sync(pci_apply_final_quirks);
#else
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
#endif

View file

@ -299,8 +299,17 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon
r = bus->resource[i];
if (r == &ioport_resource || r == &iomem_resource)
continue;
if (r && (r->flags & type_mask) == type && !r->parent)
return r;
if (r && (r->flags & type_mask) == type) {
if (!r->parent)
return r;
/*
* if there is no child under that, we should release
* and use it. don't need to reset it, pbus_size_* will
* set it again
*/
if (!r->child && !release_resource(r))
return r;
}
}
return NULL;
}

View file

@ -205,43 +205,6 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
return ret;
}
#if 0
int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
{
struct pci_bus *bus = dev->bus;
struct resource *res = dev->resource + resno;
unsigned int type_mask;
int i, ret = -EBUSY;
type_mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH;
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *r = bus->resource[i];
if (!r)
continue;
/* type_mask must match */
if ((res->flags ^ r->flags) & type_mask)
continue;
ret = request_resource(r, res);
if (ret == 0)
break;
}
if (ret) {
dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
} else if (resno < PCI_BRIDGE_RESOURCES) {
pci_update_resource(dev, resno);
}
return ret;
}
EXPORT_SYMBOL_GPL(pci_assign_resource_fixed);
#endif
/* Sort resources by alignment */
void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
{

View file

@ -2533,6 +2533,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
{
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
unsigned long *idaw;
cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
@ -2546,9 +2547,17 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
ccw = cqr->cpaddr;
ccw->cmd_code = CCW_CMD_RDC;
ccw->cda = (__u32)(addr_t)rdc_buffer;
ccw->count = rdc_buffer_size;
if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
idaw = (unsigned long *) (cqr->data);
ccw->cda = (__u32)(addr_t) idaw;
ccw->flags = CCW_FLAG_IDA;
idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
} else {
ccw->cda = (__u32)(addr_t) rdc_buffer;
ccw->flags = 0;
}
ccw->count = rdc_buffer_size;
cqr->startdev = device;
cqr->memdev = device;
cqr->expires = 10*HZ;

View file

@ -3216,6 +3216,7 @@ int dasd_eckd_restore_device(struct dasd_device *device)
struct dasd_eckd_characteristics temp_rdc_data;
int is_known, rc;
struct dasd_uid temp_uid;
unsigned long flags;
private = (struct dasd_eckd_private *) device->private;
@ -3228,7 +3229,8 @@ int dasd_eckd_restore_device(struct dasd_device *device)
rc = dasd_eckd_generate_uid(device, &private->uid);
dasd_get_uid(device->cdev, &temp_uid);
if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
dev_err(&device->cdev->dev, "The UID of the DASD has changed\n");
dev_err(&device->cdev->dev, "The UID of the DASD has "
"changed\n");
if (rc)
goto out_err;
dasd_set_uid(device->cdev, &private->uid);
@ -3256,9 +3258,9 @@ int dasd_eckd_restore_device(struct dasd_device *device)
"device: %s", rc, dev_name(&device->cdev->dev));
goto out_err;
}
spin_lock(get_ccwdev_lock(device->cdev));
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
spin_unlock(get_ccwdev_lock(device->cdev));
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
/* add device to alias management */
dasd_alias_add_device(device);

View file

@ -62,7 +62,7 @@ static struct notifier_block call_home_panic_nb = {
.priority = INT_MAX,
};
static int proc_handler_callhome(ctl_table *ctl, int write, struct file *filp,
static int proc_handler_callhome(struct ctl_table *ctl, int write,
void __user *buffer, size_t *count,
loff_t *ppos)
{
@ -100,7 +100,7 @@ static struct ctl_table callhome_table[] = {
{
.procname = "callhome",
.mode = 0644,
.proc_handler = &proc_handler_callhome,
.proc_handler = proc_handler_callhome,
},
{ .ctl_name = 0 }
};

View file

@ -705,21 +705,6 @@ static int __init sclp_vt220_tty_init(void)
}
__initcall(sclp_vt220_tty_init);
#ifdef CONFIG_SCLP_VT220_CONSOLE
static void
sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
{
__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
}
static struct tty_driver *
sclp_vt220_con_device(struct console *c, int *index)
{
*index = 0;
return sclp_vt220_driver;
}
static void __sclp_vt220_flush_buffer(void)
{
unsigned long flags;
@ -776,6 +761,21 @@ static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
}
}
#ifdef CONFIG_SCLP_VT220_CONSOLE
static void
sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
{
__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
}
static struct tty_driver *
sclp_vt220_con_device(struct console *c, int *index)
{
*index = 0;
return sclp_vt220_driver;
}
static int
sclp_vt220_notify(struct notifier_block *self,
unsigned long event, void *data)

View file

@ -162,9 +162,10 @@ tapeblock_requeue(struct work_struct *work) {
spin_lock_irq(&device->blk_data.request_queue_lock);
while (
!blk_queue_plugged(queue) &&
(req = blk_fetch_request(queue)) &&
blk_peek_request(queue) &&
nr_queued < TAPEBLOCK_MIN_REQUEUE
) {
req = blk_fetch_request(queue);
if (rq_data_dir(req) == WRITE) {
DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
spin_unlock_irq(&device->blk_data.request_queue_lock);

View file

@ -1250,8 +1250,7 @@ static int io_subchannel_probe(struct subchannel *sch)
unsigned long flags;
struct ccw_dev_id dev_id;
cdev = sch_get_cdev(sch);
if (cdev) {
if (cio_is_console(sch->schid)) {
rc = sysfs_create_group(&sch->dev.kobj,
&io_subchannel_attr_group);
if (rc)
@ -1260,13 +1259,13 @@ static int io_subchannel_probe(struct subchannel *sch)
"0.%x.%04x (rc=%d)\n",
sch->schid.ssid, sch->schid.sch_no, rc);
/*
* This subchannel already has an associated ccw_device.
* The console subchannel already has an associated ccw_device.
* Throw the delayed uevent for the subchannel, register
* the ccw_device and exit. This happens for all early
* devices, e.g. the console.
* the ccw_device and exit.
*/
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
cdev = sch_get_cdev(sch);
cdev->dev.groups = ccwdev_attr_groups;
device_initialize(&cdev->dev);
ccw_device_register(cdev);

View file

@ -1270,6 +1270,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
BUG_ON(!kernel_locked());
if (!state)
return;
uport = state->uart_port;
port = &state->port;
@ -1316,9 +1319,9 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
*/
if (port->flags & ASYNC_INITIALIZED) {
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
spin_lock_irqsave(&uport->lock, flags);
uport->ops->stop_rx(uport);
spin_unlock_irqrestore(&port->lock, flags);
spin_unlock_irqrestore(&uport->lock, flags);
/*
* Before we drop DTR, make sure the UART transmitter
* has completely drained; this is especially

View file

@ -1826,7 +1826,7 @@ static struct amba_id pl022_ids[] = {
* ST Micro derivative, this has 32bit wide
* and 32 locations deep TX/RX FIFO
*/
.id = 0x00108022,
.id = 0x01080022,
.mask = 0xffffffff,
.data = &vendor_st,
},

View file

@ -475,4 +475,4 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
quirk_usb_handoff_xhci(pdev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);

View file

@ -9,7 +9,7 @@ comment "Enable Host or Gadget support to see Inventra options"
# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
config USB_MUSB_HDRC
depends on (USB || USB_GADGET)
depends on !SUPERH
depends on (ARM || BLACKFIN)
select NOP_USB_XCEIV if ARCH_DAVINCI
select TWL4030_USB if MACH_OMAP_3430SDP
select NOP_USB_XCEIV if MACH_OMAP3EVM

View file

@ -206,7 +206,7 @@ static int __devinit riowd_probe(struct of_device *op,
dev_set_drvdata(&op->dev, p);
riowd_device = p;
err = 0;
return 0;
out_iounmap:
of_iounmap(&op->resource[0], p->regs, 2);

View file

@ -2321,7 +2321,18 @@ static int ext3_commit_super(struct super_block *sb,
if (!sbh)
return error;
es->s_wtime = cpu_to_le32(get_seconds());
/*
* If the file system is mounted read-only, don't update the
* superblock write time. This avoids updating the superblock
* write time when we are mounting the root file system
* read/only but we need to replay the journal; at that point,
* for people who are east of GMT and who make their clock
* tick in localtime for Windows bug-for-bug compatibility,
* the clock is set in the future, and this will cause e2fsck
* to complain and force a full file system check.
*/
if (!(sb->s_flags & MS_RDONLY))
es->s_wtime = cpu_to_le32(get_seconds());
es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb));
es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
BUFFER_TRACE(sbh, "marking dirty");

View file

@ -1848,8 +1848,8 @@ nfs_compare_remount_data(struct nfs_server *nfss,
data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
data->nfs_server.port != nfss->port ||
data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
!rpc_cmp_addr(&data->nfs_server.address,
&nfss->nfs_client->cl_addr))
!rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
(struct sockaddr *)&nfss->nfs_client->cl_addr))
return -EINVAL;
return 0;

View file

@ -248,11 +248,19 @@ ssize_t part_stat_show(struct device *dev,
part_stat_read(p, merges[WRITE]),
(unsigned long long)part_stat_read(p, sectors[WRITE]),
jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
p->in_flight,
part_in_flight(p),
jiffies_to_msecs(part_stat_read(p, io_ticks)),
jiffies_to_msecs(part_stat_read(p, time_in_queue)));
}
ssize_t part_inflight_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hd_struct *p = dev_to_part(dev);
return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]);
}
#ifdef CONFIG_FAIL_MAKE_REQUEST
ssize_t part_fail_show(struct device *dev,
struct device_attribute *attr, char *buf)
@ -281,6 +289,7 @@ static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL);
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
#ifdef CONFIG_FAIL_MAKE_REQUEST
static struct device_attribute dev_attr_fail =
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
@ -292,6 +301,7 @@ static struct attribute *part_attrs[] = {
&dev_attr_size.attr,
&dev_attr_alignment_offset.attr,
&dev_attr_stat.attr,
&dev_attr_inflight.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST
&dev_attr_fail.attr,
#endif

View file

@ -1172,11 +1172,7 @@ static inline void put_dev_sector(Sector p)
}
struct work_struct;
struct delayed_work;
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
int kblockd_schedule_delayed_work(struct request_queue *q,
struct delayed_work *work,
unsigned long delay);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))

Some files were not shown because too many files have changed in this diff Show more