Merge commit 'v2.6.30-rc3' into x86/urgent

Merge reason: hpet.c changed upstream, make sure we test against that

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2009-04-22 12:44:16 +02:00
commit 3568b71d46
168 changed files with 1602 additions and 1214 deletions

View file

@ -277,8 +277,7 @@ or bottom half).
unfreeze_fs: called when VFS is unlocking a filesystem and making it writable
again.
statfs: called when the VFS needs to get filesystem statistics. This
is called with the kernel lock held
statfs: called when the VFS needs to get filesystem statistics.
remount_fs: called when the filesystem is remounted. This is called
with the kernel lock held

View file

@ -511,10 +511,16 @@ SPI MASTER METHODS
This sets up the device clock rate, SPI mode, and word sizes.
Drivers may change the defaults provided by board_info, and then
call spi_setup(spi) to invoke this routine. It may sleep.
Unless each SPI slave has its own configuration registers, don't
change them right away ... otherwise drivers could corrupt I/O
that's in progress for other SPI devices.
** BUG ALERT: for some reason the first version of
** many spi_master drivers seems to get this wrong.
** When you code setup(), ASSUME that the controller
** is actively processing transfers for another device.
master->transfer(struct spi_device *spi, struct spi_message *message)
This must not sleep. Its responsibility is arrange that the
transfer happens and its complete() callback is issued. The two

View file

@ -1287,6 +1287,14 @@ S: Maintained
F: Documentation/video4linux/bttv/
F: drivers/media/video/bt8xx/bttv*
CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
P: David Howells
M: dhowells@redhat.com
L: linux-cachefs@redhat.com
S: Supported
F: Documentation/filesystems/caching/cachefiles.txt
F: fs/cachefiles/
CAFE CMOS INTEGRATED CAMERA CONTROLLER DRIVER
P: Jonathan Corbet
M: corbet@lwn.net
@ -2057,6 +2065,8 @@ F: drivers/infiniband/hw/ehca/
EMBEDDED LINUX
P: Paul Gortmaker
M: paul.gortmaker@windriver.com
P: Matt Mackall
M: mpm@selenic.com
P: David Woodhouse
M: dwmw2@infradead.org
L: linux-embedded@vger.kernel.org
@ -2325,6 +2335,15 @@ F: Documentation/power/freezing-of-tasks.txt
F: include/linux/freezer.h
F: kernel/freezer.c
FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
P: David Howells
M: dhowells@redhat.com
L: linux-cachefs@redhat.com
S: Supported
F: Documentation/filesystems/caching/
F: fs/fscache/
F: include/linux/fscache*.h
FTRACE
P: Steven Rostedt
M: rostedt@goodmis.org
@ -2545,7 +2564,6 @@ F: kernel/power/
F: include/linux/suspend.h
F: include/linux/freezer.h
F: include/linux/pm.h
F: include/asm-*/suspend*.h
F: arch/*/include/asm/suspend*.h
HID CORE LAYER
@ -3323,7 +3341,7 @@ P: Eduard - Gabriel Munteanu
M: eduard.munteanu@linux360.ro
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/vm/kmemtrace.txt
F: Documentation/trace/kmemtrace.txt
F: include/trace/kmemtrace.h
F: kernel/trace/kmemtrace.c
@ -5387,7 +5405,6 @@ F: kernel/power/
F: include/linux/suspend.h
F: include/linux/freezer.h
F: include/linux/pm.h
F: include/asm-*/suspend.h
SVGA HANDLING
P: Martin Mares
@ -5621,7 +5638,7 @@ L: uclinux-dev@uclinux.org (subscribers-only)
S: Maintained
F: arch/m68knommu/
UCLINUX FOR RENESAS H8/300
UCLINUX FOR RENESAS H8/300 (H8300)
P: Yoshinori Sato
M: ysato@users.sourceforge.jp
W: http://uclinux-h8.sourceforge.jp/

View file

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 30
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Temporary Tasmanian Devil
# *DOCUMENTATION*

View file

@ -73,6 +73,6 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
#endif /* SMP */
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name)
#include <asm-generic/percpu.h>
#endif /* __ALPHA_PERCPU_H */

View file

@ -177,21 +177,12 @@ asmlinkage long sys_oabi_fstatat64(int dfd,
int flag)
{
struct kstat stat;
int error = -EINVAL;
int error;
if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
goto out;
if (flag & AT_SYMLINK_NOFOLLOW)
error = vfs_lstat_fd(dfd, filename, &stat);
else
error = vfs_stat_fd(dfd, filename, &stat);
if (!error)
error = cp_oldabi_stat64(&stat, statbuf);
out:
return error;
error = vfs_fstatat(dfd, filename, &stat, flag);
if (error)
return error;
return cp_oldabi_stat64(&stat, statbuf);
}
struct oabi_flock64 {

View file

@ -85,7 +85,7 @@ static struct irqaction at91rm9200_timer_irq = {
.handler = at91rm9200_timer_interrupt
};
static cycle_t read_clk32k(void)
static cycle_t read_clk32k(struct clocksource *cs)
{
return read_CRTR();
}

View file

@ -31,7 +31,7 @@ static u32 pit_cnt; /* access only w/system irq blocked */
* Clocksource: just a monotonic counter of MCK/16 cycles.
* We don't care whether or not PIT irqs are enabled.
*/
static cycle_t read_pit_clk(void)
static cycle_t read_pit_clk(struct clocksource *cs)
{
unsigned long flags;
u32 elapsed;

View file

@ -238,7 +238,7 @@ static void __init timer_init(void)
/*
* clocksource
*/
static cycle_t read_cycles(void)
static cycle_t read_cycles(struct clocksource *cs)
{
struct timer_s *t = &timers[TID_CLOCKSOURCE];

View file

@ -73,7 +73,7 @@ static void __init imx_timer_hardware_init(void)
IMX_TCTL(TIMER_BASE) = TCTL_FRR | TCTL_CLK_PCLK1 | TCTL_TEN;
}
cycle_t imx_get_cycles(void)
cycle_t imx_get_cycles(struct clocksource *cs)
{
return IMX_TCN(TIMER_BASE);
}

View file

@ -401,7 +401,7 @@ void __init ixp4xx_sys_init(void)
/*
* clocksource
*/
cycle_t ixp4xx_get_cycles(void)
cycle_t ixp4xx_get_cycles(struct clocksource *cs)
{
return *IXP4XX_OSTS;
}

View file

@ -57,12 +57,12 @@ static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
static cycle_t msm_gpt_read(void)
static cycle_t msm_gpt_read(struct clocksource *cs)
{
return readl(MSM_GPT_BASE + TIMER_COUNT_VAL);
}
static cycle_t msm_dgt_read(void)
static cycle_t msm_dgt_read(struct clocksource *cs)
{
return readl(MSM_DGT_BASE + TIMER_COUNT_VAL) >> MSM_DGT_SHIFT;
}

View file

@ -104,7 +104,7 @@ static struct irqaction netx_timer_irq = {
.handler = netx_timer_interrupt,
};
cycle_t netx_get_cycles(void)
cycle_t netx_get_cycles(struct clocksource *cs)
{
return readl(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE));
}

View file

@ -25,7 +25,7 @@
#define TIMER_CLOCKEVENT 1
static u32 latch;
static cycle_t ns9360_clocksource_read(void)
static cycle_t ns9360_clocksource_read(struct clocksource *cs)
{
return __raw_readl(SYS_TR(TIMER_CLOCKSOURCE));
}

View file

@ -198,7 +198,7 @@ static struct irqaction omap_mpu_timer2_irq = {
.handler = omap_mpu_timer2_interrupt,
};
static cycle_t mpu_read(void)
static cycle_t mpu_read(struct clocksource *cs)
{
return ~omap_mpu_timer_read(1);
}

View file

@ -138,7 +138,7 @@ static inline void __init omap2_gp_clocksource_init(void) {}
* clocksource
*/
static struct omap_dm_timer *gpt_clocksource;
static cycle_t clocksource_read_cycles(void)
static cycle_t clocksource_read_cycles(struct clocksource *cs)
{
return (cycle_t)omap_dm_timer_read_counter(gpt_clocksource);
}

View file

@ -125,7 +125,7 @@ static struct clock_event_device ckevt_pxa_osmr0 = {
.set_mode = pxa_osmr0_set_mode,
};
static cycle_t pxa_read_oscr(void)
static cycle_t pxa_read_oscr(struct clocksource *cs)
{
return OSCR;
}

View file

@ -715,7 +715,7 @@ static struct irqaction realview_timer_irq = {
.handler = realview_timer_interrupt,
};
static cycle_t realview_get_cycles(void)
static cycle_t realview_get_cycles(struct clocksource *cs)
{
return ~readl(timer3_va_base + TIMER_VALUE);
}

View file

@ -948,7 +948,7 @@ static struct irqaction versatile_timer_irq = {
.handler = versatile_timer_interrupt,
};
static cycle_t versatile_get_cycles(void)
static cycle_t versatile_get_cycles(struct clocksource *cs)
{
return ~readl(TIMER3_VA_BASE + TIMER_VALUE);
}

View file

@ -36,7 +36,7 @@ static enum clock_event_mode clockevent_mode = CLOCK_EVT_MODE_UNUSED;
/* clock source */
static cycle_t mxc_get_cycles(void)
static cycle_t mxc_get_cycles(struct clocksource *cs)
{
return __raw_readl(TIMER_BASE + MXC_TCN);
}

View file

@ -185,7 +185,7 @@ console_initcall(omap_add_serial_console);
#include <linux/clocksource.h>
static cycle_t omap_32k_read(void)
static cycle_t omap_32k_read(struct clocksource *cs)
{
return omap_readl(TIMER_32K_SYNCHRONIZED);
}
@ -207,7 +207,7 @@ unsigned long long sched_clock(void)
{
unsigned long long ret;
ret = (unsigned long long)omap_32k_read();
ret = (unsigned long long)omap_32k_read(&clocksource_32k);
ret = (ret * clocksource_32k.mult_orig) >> clocksource_32k.shift;
return ret;
}

View file

@ -41,7 +41,7 @@ static u32 ticks_per_jiffy;
/*
* Clocksource handling.
*/
static cycle_t orion_clksrc_read(void)
static cycle_t orion_clksrc_read(struct clocksource *cs)
{
return 0xffffffff - readl(TIMER0_VAL);
}

View file

@ -18,7 +18,7 @@
#include <mach/pm.h>
static cycle_t read_cycle_count(void)
static cycle_t read_cycle_count(struct clocksource *cs)
{
return (cycle_t)sysreg_read(COUNT);
}

View file

@ -58,16 +58,11 @@ static inline unsigned long long cycles_2_ns(cycle_t cyc)
return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
}
static cycle_t read_cycles(void)
static cycle_t read_cycles(struct clocksource *cs)
{
return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
}
unsigned long long sched_clock(void)
{
return cycles_2_ns(read_cycles());
}
static struct clocksource clocksource_bfin = {
.name = "bfin_cycles",
.rating = 350,
@ -77,6 +72,11 @@ static struct clocksource clocksource_bfin = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
unsigned long long sched_clock(void)
{
return cycles_2_ns(read_cycles(&clocksource_bfin));
}
static int __init bfin_clocksource_init(void)
{
set_cyc2ns_scale(get_cclk() / 1000);

View file

@ -46,7 +46,6 @@
#include <asm/io.h>
#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/blk.h>
#include <asm/pgtable.h>
#endif

View file

@ -30,6 +30,29 @@ int __nongpreldata pcibios_last_bus = -1;
struct pci_bus *__nongpreldata pci_root_bus;
struct pci_ops *__nongpreldata pci_root_ops;
/*
* The accessible PCI window does not cover the entire CPU address space, but
* there are devices we want to access outside of that window, so we need to
* insert specific PCI bus resources instead of using the platform-level bus
* resources directly for the PCI root bus.
*
* These are configured and inserted by pcibios_init() and are attached to the
* root bus by pcibios_fixup_bus().
*/
static struct resource pci_ioport_resource = {
.name = "PCI IO",
.start = 0,
.end = IO_SPACE_LIMIT,
.flags = IORESOURCE_IO,
};
static struct resource pci_iomem_resource = {
.name = "PCI mem",
.start = 0,
.end = -1,
.flags = IORESOURCE_MEM,
};
/*
* Functions for accessing PCI configuration space
*/
@ -304,6 +327,12 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
#if 0
printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
#endif
if (bus->number == 0) {
bus->resource[0] = &pci_ioport_resource;
bus->resource[1] = &pci_iomem_resource;
}
pci_read_bridge_bases(bus);
if (bus->number == 0) {
@ -350,28 +379,36 @@ int __init pcibios_init(void)
/* enable PCI arbitration */
__reg_MB86943_pci_arbiter = MB86943_PCIARB_EN;
ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00;
ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff;
ioport_resource.end += ioport_resource.start;
pci_ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00;
pci_ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff;
pci_ioport_resource.end += pci_ioport_resource.start;
printk("PCI IO window: %08llx-%08llx\n",
(unsigned long long) ioport_resource.start,
(unsigned long long) ioport_resource.end);
(unsigned long long) pci_ioport_resource.start,
(unsigned long long) pci_ioport_resource.end);
iomem_resource.start = (__reg_MB86943_sl_pci_mem_base << 9) & 0xfffffc00;
pci_iomem_resource.start = (__reg_MB86943_sl_pci_mem_base << 9) & 0xfffffc00;
pci_iomem_resource.end = (__reg_MB86943_sl_pci_mem_range << 9) | 0x3ff;
pci_iomem_resource.end += pci_iomem_resource.start;
/* Reserve somewhere to write to flush posted writes. */
iomem_resource.start += 0x400;
iomem_resource.end = (__reg_MB86943_sl_pci_mem_range << 9) | 0x3ff;
iomem_resource.end += iomem_resource.start;
/* Reserve somewhere to write to flush posted writes. This is used by
* __flush_PCI_writes() from asm/io.h to force the write FIFO in the
* CPU-PCI bridge to flush as this doesn't happen automatically when a
* read is performed on the MB93090 development kit motherboard.
*/
pci_iomem_resource.start += 0x400;
printk("PCI MEM window: %08llx-%08llx\n",
(unsigned long long) iomem_resource.start,
(unsigned long long) iomem_resource.end);
(unsigned long long) pci_iomem_resource.start,
(unsigned long long) pci_iomem_resource.end);
printk("PCI DMA memory: %08lx-%08lx\n",
dma_coherent_mem_start, dma_coherent_mem_end);
if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0)
panic("Unable to insert PCI IOMEM resource\n");
if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0)
panic("Unable to insert PCI IOPORT resource\n");
if (!pci_probe)
return -ENXIO;

View file

@ -445,7 +445,6 @@ paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
register unsigned long ia64_intri_res asm ("r8"); \
register unsigned long __reg asm ("r8") = (reg); \
\
BUILD_BUG_ON(!__builtin_constant_p(reg)); \
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(GETREG) \
+ (reg)) \
@ -464,7 +463,6 @@ paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
register unsigned long ia64_clobber1 asm ("r8"); \
register unsigned long ia64_clobber2 asm ("r9"); \
\
BUILD_BUG_ON(!__builtin_constant_p(reg)); \
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(SETREG) \
+ (reg)) \

View file

@ -58,7 +58,7 @@ extern struct smp_boot_data {
extern char no_int_routing __devinitdata;
extern cpumask_t cpu_core_map[NR_CPUS];
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
extern int smp_num_siblings;
extern void __iomem *ipi_base_addr;
extern unsigned char smp_int_redirect;

View file

@ -21,7 +21,7 @@ void __init cyclone_setup(void)
static void __iomem *cyclone_mc;
static cycle_t read_cyclone(void)
static cycle_t read_cyclone(struct clocksource *cs)
{
return (cycle_t)readq((void __iomem *)cyclone_mc);
}

View file

@ -225,6 +225,7 @@ smp_send_reschedule (int cpu)
{
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
EXPORT_SYMBOL_GPL(smp_send_reschedule);
/*
* Called with preemption disabled.
@ -300,15 +301,12 @@ smp_flush_tlb_mm (struct mm_struct *mm)
return;
}
smp_call_function_mask(mm->cpu_vm_mask,
(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
local_irq_disable();
local_finish_flush_tlb_mm(mm);
local_irq_enable();
preempt_enable();
/*
* We could optimize this further by using mm->cpu_vm_mask to track which CPUs
* have been running in the address space. It's not clear that this is worth the
* trouble though: to avoid races, we have to raise the IPI on the target CPU
* anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
* rather trivial.
*/
on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
}
void arch_send_call_function_single_ipi(int cpu)

View file

@ -33,7 +33,7 @@
#include "fsyscall_gtod_data.h"
static cycle_t itc_get_cycles(void);
static cycle_t itc_get_cycles(struct clocksource *cs);
struct fsyscall_gtod_data_t fsyscall_gtod_data = {
.lock = SEQLOCK_UNLOCKED,
@ -383,7 +383,7 @@ ia64_init_itm (void)
}
}
static cycle_t itc_get_cycles(void)
static cycle_t itc_get_cycles(struct clocksource *cs)
{
u64 lcycle, now, ret;

View file

@ -23,7 +23,7 @@
extern unsigned long sn_rtc_cycles_per_second;
static cycle_t read_sn2(void)
static cycle_t read_sn2(struct clocksource *cs)
{
return (cycle_t)readq(RTC_COUNTER_ADDR);
}

View file

@ -75,7 +75,7 @@ static struct irqaction m68328_timer_irq = {
/***************************************************************************/
static cycle_t m68328_read_clk(void)
static cycle_t m68328_read_clk(struct clocksource *cs)
{
unsigned long flags;
u32 cycles;

View file

@ -34,7 +34,7 @@
#define DMA_DTMR_CLK_DIV_16 (2 << 1)
#define DMA_DTMR_ENABLE (1 << 0)
static cycle_t cf_dt_get_cycles(void)
static cycle_t cf_dt_get_cycles(struct clocksource *cs)
{
return __raw_readl(DTCN0);
}

View file

@ -125,7 +125,7 @@ static struct irqaction pit_irq = {
/***************************************************************************/
static cycle_t pit_read_clk(void)
static cycle_t pit_read_clk(struct clocksource *cs)
{
unsigned long flags;
u32 cycles;

View file

@ -78,7 +78,7 @@ static struct irqaction mcftmr_timer_irq = {
/***************************************************************************/
static cycle_t mcftmr_read_clk(void)
static cycle_t mcftmr_read_clk(struct clocksource *cs)
{
unsigned long flags;
u32 cycles;

View file

@ -22,7 +22,7 @@
static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr;
static cycle_t txx9_cs_read(void)
static cycle_t txx9_cs_read(struct clocksource *cs)
{
return __raw_readl(&txx9_cs_tmrptr->trr);
}

View file

@ -28,7 +28,7 @@
#include <asm/sibyte/sb1250.h>
static cycle_t bcm1480_hpt_read(void)
static cycle_t bcm1480_hpt_read(struct clocksource *cs)
{
return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
}

View file

@ -25,7 +25,7 @@
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
static cycle_t dec_ioasic_hpt_read(void)
static cycle_t dec_ioasic_hpt_read(struct clocksource *cs)
{
return ioasic_read(IO_REG_FCTR);
}
@ -47,13 +47,13 @@ void __init dec_ioasic_clocksource_init(void)
while (!ds1287_timer_state())
;
start = dec_ioasic_hpt_read();
start = dec_ioasic_hpt_read(&clocksource_dec);
while (i--)
while (!ds1287_timer_state())
;
end = dec_ioasic_hpt_read();
end = dec_ioasic_hpt_read(&clocksource_dec);
freq = (end - start) * 10;
printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);

View file

@ -10,7 +10,7 @@
#include <asm/time.h>
static cycle_t c0_hpt_read(void)
static cycle_t c0_hpt_read(struct clocksource *cs)
{
return read_c0_count();
}

View file

@ -33,7 +33,7 @@
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
* again.
*/
static cycle_t sb1250_hpt_read(void)
static cycle_t sb1250_hpt_read(struct clocksource *cs)
{
unsigned int count;

View file

@ -128,7 +128,7 @@ void __init setup_pit_timer(void)
* to just read by itself. So use jiffies to emulate a free
* running counter:
*/
static cycle_t pit_read(void)
static cycle_t pit_read(struct clocksource *cs)
{
unsigned long flags;
int count;

View file

@ -35,7 +35,7 @@
static unsigned long cpj;
static cycle_t hpt_read(void)
static cycle_t hpt_read(struct clocksource *cs)
{
return read_c0_count2();
}

View file

@ -159,7 +159,7 @@ static void __init hub_rt_clock_event_global_init(void)
setup_irq(irq, &hub_rt_irqaction);
}
static cycle_t hub_rt_read(void)
static cycle_t hub_rt_read(struct clocksource *cs)
{
return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
}

View file

@ -77,7 +77,7 @@
#include <linux/clockchips.h>
#include <linux/clocksource.h>
static cycle_t rtc_read(void);
static cycle_t rtc_read(struct clocksource *);
static struct clocksource clocksource_rtc = {
.name = "rtc",
.rating = 400,
@ -88,7 +88,7 @@ static struct clocksource clocksource_rtc = {
.read = rtc_read,
};
static cycle_t timebase_read(void);
static cycle_t timebase_read(struct clocksource *);
static struct clocksource clocksource_timebase = {
.name = "timebase",
.rating = 400,
@ -766,12 +766,12 @@ unsigned long read_persistent_clock(void)
}
/* clocksource code */
static cycle_t rtc_read(void)
static cycle_t rtc_read(struct clocksource *cs)
{
return (cycle_t)get_rtc();
}
static cycle_t timebase_read(void)
static cycle_t timebase_read(struct clocksource *cs)
{
return (cycle_t)get_tb();
}

View file

@ -702,20 +702,12 @@ asmlinkage long sys32_fstatat64(unsigned int dfd, char __user *filename,
struct stat64_emu31 __user* statbuf, int flag)
{
struct kstat stat;
int error = -EINVAL;
int error;
if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
goto out;
if (flag & AT_SYMLINK_NOFOLLOW)
error = vfs_lstat_fd(dfd, filename, &stat);
else
error = vfs_stat_fd(dfd, filename, &stat);
if (!error)
error = cp_stat64(statbuf, &stat);
out:
return error;
error = vfs_fstatat(dfd, filename, &stat, flag);
if (error)
return error;
return cp_stat64(statbuf, &stat);
}
/*

View file

@ -201,7 +201,7 @@ unsigned long read_persistent_clock(void)
return ts.tv_sec;
}
static cycle_t read_tod_clock(void)
static cycle_t read_tod_clock(struct clocksource *cs)
{
return get_clock();
}

View file

@ -256,7 +256,6 @@ static int __init sh7722_devices_setup(void)
{
clk_always_enable("uram0"); /* URAM */
clk_always_enable("xymem0"); /* XYMEM */
clk_always_enable("rtc0"); /* RTC */
clk_always_enable("veu0"); /* VEU */
clk_always_enable("vpu0"); /* VPU */
clk_always_enable("jpu0"); /* JPU */

View file

@ -267,7 +267,6 @@ static struct platform_device *sh7723_devices[] __initdata = {
static int __init sh7723_devices_setup(void)
{
clk_always_enable("meram0"); /* MERAM */
clk_always_enable("rtc0"); /* RTC */
clk_always_enable("veu1"); /* VEU2H1 */
clk_always_enable("veu0"); /* VEU2H0 */
clk_always_enable("vpu0"); /* VPU */

View file

@ -63,6 +63,15 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
/*
* The shift for mmap2 is constant, regardless of PAGE_SIZE
* setting.
*/
if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
return -EINVAL;
pgoff >>= PAGE_SHIFT - 12;
return do_mmap2(addr, len, prot, flags, fd, pgoff);
}

View file

@ -208,7 +208,7 @@ unsigned long long sched_clock(void)
if (!clocksource_sh.rating)
return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
cycles = clocksource_sh.read();
cycles = clocksource_sh.read(&clocksource_sh);
return cyc2ns(&clocksource_sh, cycles);
}
#endif

View file

@ -81,7 +81,7 @@ static int tmu_timer_stop(void)
*/
static int tmus_are_scaled;
static cycle_t tmu_timer_read(void)
static cycle_t tmu_timer_read(struct clocksource *cs)
{
return ((cycle_t)(~_tmu_read(TMU1)))<<tmus_are_scaled;
}

View file

@ -206,21 +206,12 @@ asmlinkage long compat_sys_fstatat64(unsigned int dfd, char __user *filename,
struct compat_stat64 __user * statbuf, int flag)
{
struct kstat stat;
int error = -EINVAL;
int error;
if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
goto out;
if (flag & AT_SYMLINK_NOFOLLOW)
error = vfs_lstat_fd(dfd, filename, &stat);
else
error = vfs_stat_fd(dfd, filename, &stat);
if (!error)
error = cp_compat_stat64(&stat, statbuf);
out:
return error;
error = vfs_fstatat(dfd, filename, &stat, flag);
if (error)
return error;
return cp_compat_stat64(&stat, statbuf);
}
asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2)

View file

@ -814,6 +814,11 @@ void udelay(unsigned long usecs)
}
EXPORT_SYMBOL(udelay);
static cycle_t clocksource_tick_read(struct clocksource *cs)
{
return tick_ops->get_tick();
}
void __init time_init(void)
{
unsigned long freq = sparc64_init_timers();
@ -827,7 +832,7 @@ void __init time_init(void)
clocksource_tick.mult =
clocksource_hz2mult(freq,
clocksource_tick.shift);
clocksource_tick.read = tick_ops->get_tick;
clocksource_tick.read = clocksource_tick_read;
printk("clocksource: mult[%x] shift[%d]\n",
clocksource_tick.mult, clocksource_tick.shift);

View file

@ -36,7 +36,7 @@ source "drivers/leds/Kconfig"
#This is just to shut up some Kconfig warnings, so no prompt.
config INPUT
bool
tristate
default n
source "arch/um/Kconfig.debug"

View file

@ -65,7 +65,7 @@ static irqreturn_t um_timer(int irq, void *dev)
return IRQ_HANDLED;
}
static cycle_t itimer_read(void)
static cycle_t itimer_read(struct clocksource *cs)
{
return os_nsecs() / 1000;
}

View file

@ -129,21 +129,12 @@ asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename,
struct stat64 __user *statbuf, int flag)
{
struct kstat stat;
int error = -EINVAL;
int error;
if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
goto out;
if (flag & AT_SYMLINK_NOFOLLOW)
error = vfs_lstat_fd(dfd, filename, &stat);
else
error = vfs_stat_fd(dfd, filename, &stat);
if (!error)
error = cp_stat64(statbuf, &stat);
out:
return error;
error = vfs_fstatat(dfd, filename, &stat, flag);
if (error)
return error;
return cp_stat64(statbuf, &stat);
}
/*

View file

@ -37,7 +37,7 @@ extern gate_desc idt_table[];
struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE)));
DECLARE_PER_CPU(struct gdt_page, gdt_page);
DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
{

View file

@ -26,7 +26,7 @@ typedef struct {
#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS

View file

@ -138,7 +138,7 @@ extern struct tss_struct doublefault_tss;
extern __u32 cleared_cpu_caps[NCAPINTS];
#ifdef CONFIG_SMP
DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
#define cpu_data(cpu) per_cpu(cpu_info, cpu)
#define current_cpu_data __get_cpu_var(cpu_info)
#else
@ -270,7 +270,7 @@ struct tss_struct {
} ____cacheline_aligned;
DECLARE_PER_CPU(struct tss_struct, init_tss);
DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
/*
* Save the original ist values for checking stack pointers during debugging
@ -393,7 +393,7 @@ union irq_stack_union {
};
};
DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
DECLARE_INIT_PER_CPU(irq_stack_union);
DECLARE_PER_CPU(char *, irq_stack_ptr);

View file

@ -152,7 +152,7 @@ struct tlb_state {
struct mm_struct *active_mm;
int state;
};
DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
static inline void reset_lazy_tlbstate(void)
{

View file

@ -722,7 +722,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
/*
* Clock source related code
*/
static cycle_t read_hpet(void)
static cycle_t read_hpet(struct clocksource *cs)
{
return (cycle_t)hpet_readl(HPET_COUNTER);
}
@ -756,7 +756,7 @@ static int hpet_clocksource_register(void)
hpet_restart_counter();
/* Verify whether hpet counter works */
t1 = read_hpet();
t1 = hpet_readl(HPET_COUNTER);
rdtscll(start);
/*
@ -770,7 +770,7 @@ static int hpet_clocksource_register(void)
rdtscll(now);
} while ((now - start) < 200000UL);
if (t1 == read_hpet()) {
if (t1 == hpet_readl(HPET_COUNTER)) {
printk(KERN_WARNING
"HPET counter not counting. HPET disabled\n");
return -ENODEV;

View file

@ -129,7 +129,7 @@ void __init setup_pit_timer(void)
* to just read by itself. So use jiffies to emulate a free
* running counter:
*/
static cycle_t pit_read(void)
static cycle_t pit_read(struct clocksource *cs)
{
static int old_count;
static u32 old_jifs;

View file

@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void)
return ret;
}
static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
{
return kvm_clock_read();
}
/*
* If we don't do that, there is the possibility that the guest
* will calibrate under heavy load - thus, getting a lower lpj -
@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void)
static struct clocksource kvm_clock = {
.name = "kvm-clock",
.read = kvm_clock_read,
.read = kvm_clock_get_cycles,
.rating = 400,
.mask = CLOCKSOURCE_MASK(64),
.mult = 1 << KVM_SCALE,

View file

@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc;
* code, which is necessary to support wrapping clocksources like pm
* timer.
*/
static cycle_t read_tsc(void)
static cycle_t read_tsc(struct clocksource *cs)
{
cycle_t ret = (cycle_t)get_cycles();

View file

@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void)
/** vmi clocksource */
static struct clocksource clocksource_vmi;
static cycle_t read_real_cycles(void)
static cycle_t read_real_cycles(struct clocksource *cs)
{
cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
return max(ret, clocksource_vmi.cycle_last);

View file

@ -663,7 +663,7 @@ static unsigned long lguest_tsc_khz(void)
/* If we can't use the TSC, the kernel falls back to our lower-priority
* "lguest_clock", where we read the time value given to us by the Host. */
static cycle_t lguest_clock_read(void)
static cycle_t lguest_clock_read(struct clocksource *cs)
{
unsigned long sec, nsec;

View file

@ -213,6 +213,11 @@ cycle_t xen_clocksource_read(void)
return ret;
}
static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
{
return xen_clocksource_read();
}
static void xen_read_wallclock(struct timespec *ts)
{
struct shared_info *s = HYPERVISOR_shared_info;
@ -241,7 +246,7 @@ int xen_set_wallclock(unsigned long now)
static struct clocksource xen_clocksource __read_mostly = {
.name = "xen",
.rating = 400,
.read = xen_clocksource_read,
.read = xen_clocksource_get_cycles,
.mask = ~0,
.mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
.shift = XEN_SHIFT,

View file

@ -300,9 +300,9 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
static struct platform_suspend_ops acpi_suspend_ops = {
.valid = acpi_suspend_state_valid,
.begin = acpi_suspend_begin,
.prepare = acpi_pm_prepare,
.prepare_late = acpi_pm_prepare,
.enter = acpi_suspend_enter,
.finish = acpi_pm_finish,
.wake = acpi_pm_finish,
.end = acpi_pm_end,
};
@ -328,9 +328,9 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
static struct platform_suspend_ops acpi_suspend_ops_old = {
.valid = acpi_suspend_state_valid,
.begin = acpi_suspend_begin_old,
.prepare = acpi_pm_disable_gpes,
.prepare_late = acpi_pm_disable_gpes,
.enter = acpi_suspend_enter,
.finish = acpi_pm_finish,
.wake = acpi_pm_finish,
.end = acpi_pm_end,
.recover = acpi_pm_finish,
};

View file

@ -891,7 +891,8 @@ int device_add(struct device *dev)
set_dev_node(dev, dev_to_node(parent));
/* first, register with generic layer. */
error = kobject_add(&dev->kobj, dev->kobj.parent, "%s", dev_name(dev));
/* we require the name to be set before, and pass NULL */
error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
if (error)
goto Error;

View file

@ -179,6 +179,7 @@ void wait_for_device_probe(void)
wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
async_synchronize_full();
}
EXPORT_SYMBOL_GPL(wait_for_device_probe);
/**
* driver_probe_device - attempt to bind device & driver together

View file

@ -1226,7 +1226,7 @@ int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *m
int i, ret = -ENOMEM;
for (i = 0; i < num_pages; i++) {
page = alloc_page(GFP_KERNEL | GFP_DMA32);
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
/* agp_free_memory() needs gart address */
if (page == NULL)
goto out;
@ -1257,7 +1257,7 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
{
struct page * page;
page = alloc_page(GFP_KERNEL | GFP_DMA32);
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return NULL;

View file

@ -72,7 +72,7 @@ static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
#ifdef CONFIG_IA64
static void __iomem *hpet_mctr;
static cycle_t read_hpet(void)
static cycle_t read_hpet(struct clocksource *cs)
{
return (cycle_t)read_counter((void __iomem *)hpet_mctr);
}

View file

@ -285,6 +285,11 @@ enum ipmi_stat_indexes {
/* Events that were received with the proper format. */
IPMI_STAT_events,
/* Retransmissions on IPMB that failed. */
IPMI_STAT_dropped_rexmit_ipmb_commands,
/* Retransmissions on LAN that failed. */
IPMI_STAT_dropped_rexmit_lan_commands,
/* This *must* remain last, add new values above this. */
IPMI_NUM_STATS
@ -445,6 +450,20 @@ static DEFINE_MUTEX(smi_watchers_mutex);
#define ipmi_get_stat(intf, stat) \
((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
static int is_lan_addr(struct ipmi_addr *addr)
{
return addr->addr_type == IPMI_LAN_ADDR_TYPE;
}
static int is_ipmb_addr(struct ipmi_addr *addr)
{
return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
}
static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
{
return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
}
static void free_recv_msg_list(struct list_head *q)
{
@ -601,8 +620,7 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
return (smi_addr1->lun == smi_addr2->lun);
}
if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
|| (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
struct ipmi_ipmb_addr *ipmb_addr1
= (struct ipmi_ipmb_addr *) addr1;
struct ipmi_ipmb_addr *ipmb_addr2
@ -612,7 +630,7 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
&& (ipmb_addr1->lun == ipmb_addr2->lun));
}
if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
if (is_lan_addr(addr1)) {
struct ipmi_lan_addr *lan_addr1
= (struct ipmi_lan_addr *) addr1;
struct ipmi_lan_addr *lan_addr2
@ -644,14 +662,13 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len)
|| (addr->channel < 0))
return -EINVAL;
if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
|| (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
if (len < sizeof(struct ipmi_ipmb_addr))
return -EINVAL;
return 0;
}
if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
if (is_lan_addr(addr)) {
if (len < sizeof(struct ipmi_lan_addr))
return -EINVAL;
return 0;
@ -1503,8 +1520,7 @@ static int i_ipmi_request(ipmi_user_t user,
memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 2;
ipmi_inc_stat(intf, sent_local_commands);
} else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
|| (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
struct ipmi_ipmb_addr *ipmb_addr;
unsigned char ipmb_seq;
long seqid;
@ -1583,8 +1599,6 @@ static int i_ipmi_request(ipmi_user_t user,
spin_lock_irqsave(&(intf->seq_lock), flags);
ipmi_inc_stat(intf, sent_ipmb_commands);
/*
* Create a sequence number with a 1 second
* timeout and 4 retries.
@ -1606,6 +1620,8 @@ static int i_ipmi_request(ipmi_user_t user,
goto out_err;
}
ipmi_inc_stat(intf, sent_ipmb_commands);
/*
* Store the sequence number in the message,
* so that when the send message response
@ -1635,7 +1651,7 @@ static int i_ipmi_request(ipmi_user_t user,
*/
spin_unlock_irqrestore(&(intf->seq_lock), flags);
}
} else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
} else if (is_lan_addr(addr)) {
struct ipmi_lan_addr *lan_addr;
unsigned char ipmb_seq;
long seqid;
@ -1696,8 +1712,6 @@ static int i_ipmi_request(ipmi_user_t user,
spin_lock_irqsave(&(intf->seq_lock), flags);
ipmi_inc_stat(intf, sent_lan_commands);
/*
* Create a sequence number with a 1 second
* timeout and 4 retries.
@ -1719,6 +1733,8 @@ static int i_ipmi_request(ipmi_user_t user,
goto out_err;
}
ipmi_inc_stat(intf, sent_lan_commands);
/*
* Store the sequence number in the message,
* so that when the send message response
@ -1937,6 +1953,10 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
ipmi_get_stat(intf, invalid_events));
out += sprintf(out, "events: %u\n",
ipmi_get_stat(intf, events));
out += sprintf(out, "failed rexmit LAN msgs: %u\n",
ipmi_get_stat(intf, dropped_rexmit_lan_commands));
out += sprintf(out, "failed rexmit IPMB msgs: %u\n",
ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
return (out - ((char *) page));
}
@ -3264,6 +3284,114 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
return rv;
}
/*
* This routine will handle "Get Message" command responses with
* channels that use an OEM Medium. The message format belongs to
* the OEM. See IPMI 2.0 specification, Chapter 6 and
* Chapter 22, sections 22.6 and 22.24 for more details.
*/
static int handle_oem_get_msg_cmd(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
struct cmd_rcvr *rcvr;
int rv = 0;
unsigned char netfn;
unsigned char cmd;
unsigned char chan;
ipmi_user_t user = NULL;
struct ipmi_system_interface_addr *smi_addr;
struct ipmi_recv_msg *recv_msg;
/*
* We expect the OEM SW to perform error checking
* so we just do some basic sanity checks
*/
if (msg->rsp_size < 4) {
/* Message not big enough, just ignore it. */
ipmi_inc_stat(intf, invalid_commands);
return 0;
}
if (msg->rsp[2] != 0) {
/* An error getting the response, just ignore it. */
return 0;
}
/*
* This is an OEM Message so the OEM needs to know how
* handle the message. We do no interpretation.
*/
netfn = msg->rsp[0] >> 2;
cmd = msg->rsp[1];
chan = msg->rsp[3] & 0xf;
rcu_read_lock();
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
kref_get(&user->refcount);
} else
user = NULL;
rcu_read_unlock();
if (user == NULL) {
/* We didn't find a user, just give up. */
ipmi_inc_stat(intf, unhandled_commands);
/*
* Don't do anything with these messages, just allow
* them to be freed.
*/
rv = 0;
} else {
/* Deliver the message to the user. */
ipmi_inc_stat(intf, handled_commands);
recv_msg = ipmi_alloc_recv_msg();
if (!recv_msg) {
/*
* We couldn't allocate memory for the
* message, so requeue it for handling
* later.
*/
rv = 1;
kref_put(&user->refcount, free_user);
} else {
/*
* OEM Messages are expected to be delivered via
* the system interface to SMS software. We might
* need to visit this again depending on OEM
* requirements
*/
smi_addr = ((struct ipmi_system_interface_addr *)
&(recv_msg->addr));
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr->channel = IPMI_BMC_CHANNEL;
smi_addr->lun = msg->rsp[0] & 3;
recv_msg->user = user;
recv_msg->user_msg_data = NULL;
recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
recv_msg->msg.netfn = msg->rsp[0] >> 2;
recv_msg->msg.cmd = msg->rsp[1];
recv_msg->msg.data = recv_msg->msg_data;
/*
* The message starts at byte 4 which follows the
* the Channel Byte in the "GET MESSAGE" command
*/
recv_msg->msg.data_len = msg->rsp_size - 4;
memcpy(recv_msg->msg_data,
&(msg->rsp[4]),
msg->rsp_size - 4);
deliver_response(recv_msg);
}
}
return rv;
}
static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
struct ipmi_smi_msg *msg)
{
@ -3519,6 +3647,17 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
goto out;
}
/*
** We need to make sure the channels have been initialized.
** The channel_handler routine will set the "curr_channel"
** equal to or greater than IPMI_MAX_CHANNELS when all the
** channels for this interface have been initialized.
*/
if (intf->curr_channel < IPMI_MAX_CHANNELS) {
requeue = 1; /* Just put the message back for now */
goto out;
}
switch (intf->channels[chan].medium) {
case IPMI_CHANNEL_MEDIUM_IPMB:
if (msg->rsp[4] & 0x04) {
@ -3554,11 +3693,20 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
break;
default:
/*
* We don't handle the channel type, so just
* free the message.
*/
requeue = 0;
/* Check for OEM Channels. Clients had better
register for these commands. */
if ((intf->channels[chan].medium
>= IPMI_CHANNEL_MEDIUM_OEM_MIN)
&& (intf->channels[chan].medium
<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
requeue = handle_oem_get_msg_cmd(intf, msg);
} else {
/*
* We don't handle the channel type, so just
* free the message.
*/
requeue = 0;
}
}
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
@ -3730,7 +3878,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
list_add_tail(&msg->link, timeouts);
if (ent->broadcast)
ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
else if (is_lan_addr(&ent->recv_msg->addr))
ipmi_inc_stat(intf, timed_out_lan_commands);
else
ipmi_inc_stat(intf, timed_out_ipmb_commands);
@ -3744,15 +3892,17 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
*/
ent->timeout = MAX_MSG_TIMEOUT;
ent->retries_left--;
if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
ipmi_inc_stat(intf, retransmitted_lan_commands);
else
ipmi_inc_stat(intf, retransmitted_ipmb_commands);
smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
ent->seqid);
if (!smi_msg)
if (!smi_msg) {
if (is_lan_addr(&ent->recv_msg->addr))
ipmi_inc_stat(intf,
dropped_rexmit_lan_commands);
else
ipmi_inc_stat(intf,
dropped_rexmit_ipmb_commands);
return;
}
spin_unlock_irqrestore(&intf->seq_lock, *flags);
@ -3764,10 +3914,17 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
* resent.
*/
handlers = intf->handlers;
if (handlers)
if (handlers) {
if (is_lan_addr(&ent->recv_msg->addr))
ipmi_inc_stat(intf,
retransmitted_lan_commands);
else
ipmi_inc_stat(intf,
retransmitted_ipmb_commands);
intf->handlers->sender(intf->send_info,
smi_msg, 0);
else
} else
ipmi_free_smi_msg(smi_msg);
spin_lock_irqsave(&intf->seq_lock, *flags);

View file

@ -82,12 +82,6 @@
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
/* Bit for BMC global enables. */
#define IPMI_BMC_RCV_MSG_INTR 0x01
#define IPMI_BMC_EVT_MSG_INTR 0x02
#define IPMI_BMC_EVT_MSG_BUFF 0x04
#define IPMI_BMC_SYS_LOG 0x08
enum si_intf_state {
SI_NORMAL,
SI_GETTING_FLAGS,
@ -220,6 +214,9 @@ struct smi_info {
OEM2_DATA_AVAIL)
unsigned char msg_flags;
/* Does the BMC have an event buffer? */
char has_event_buffer;
/*
* If set to true, this will request events the next time the
* state machine is idle.
@ -968,7 +965,8 @@ static void request_events(void *send_info)
{
struct smi_info *smi_info = send_info;
if (atomic_read(&smi_info->stop_operation))
if (atomic_read(&smi_info->stop_operation) ||
!smi_info->has_event_buffer)
return;
atomic_set(&smi_info->req_events, 1);
@ -2407,26 +2405,9 @@ static struct of_platform_driver ipmi_of_platform_driver = {
};
#endif /* CONFIG_PPC_OF */
static int try_get_dev_id(struct smi_info *smi_info)
static int wait_for_msg_done(struct smi_info *smi_info)
{
unsigned char msg[2];
unsigned char *resp;
unsigned long resp_len;
enum si_sm_result smi_result;
int rv = 0;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
/*
* Do a Get Device ID command, since it comes back with some
* useful info.
*/
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_DEVICE_ID_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
for (;;) {
@ -2441,16 +2422,39 @@ static int try_get_dev_id(struct smi_info *smi_info)
} else
break;
}
if (smi_result == SI_SM_HOSED) {
if (smi_result == SI_SM_HOSED)
/*
* We couldn't get the state machine to run, so whatever's at
* the port is probably not an IPMI SMI interface.
*/
rv = -ENODEV;
goto out;
}
return -ENODEV;
return 0;
}
static int try_get_dev_id(struct smi_info *smi_info)
{
unsigned char msg[2];
unsigned char *resp;
unsigned long resp_len;
int rv = 0;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
/*
* Do a Get Device ID command, since it comes back with some
* useful info.
*/
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_DEVICE_ID_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
rv = wait_for_msg_done(smi_info);
if (rv)
goto out;
/* Otherwise, we got some data. */
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
@ -2462,6 +2466,88 @@ static int try_get_dev_id(struct smi_info *smi_info)
return rv;
}
static int try_enable_event_buffer(struct smi_info *smi_info)
{
unsigned char msg[3];
unsigned char *resp;
unsigned long resp_len;
int rv = 0;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
rv = wait_for_msg_done(smi_info);
if (rv) {
printk(KERN_WARNING
"ipmi_si: Error getting response from get global,"
" enables command, the event buffer is not"
" enabled.\n");
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 4 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
printk(KERN_WARNING
"ipmi_si: Invalid return from get global"
" enables command, cannot enable the event"
" buffer.\n");
rv = -EINVAL;
goto out;
}
if (resp[3] & IPMI_BMC_EVT_MSG_BUFF)
/* buffer is already enabled, nothing to do. */
goto out;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
rv = wait_for_msg_done(smi_info);
if (rv) {
printk(KERN_WARNING
"ipmi_si: Error getting response from set global,"
" enables command, the event buffer is not"
" enabled.\n");
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
printk(KERN_WARNING
"ipmi_si: Invalid return from get global,"
"enables command, not enable the event"
" buffer.\n");
rv = -EINVAL;
goto out;
}
if (resp[2] != 0)
/*
* An error when setting the event buffer bit means
* that the event buffer is not supported.
*/
rv = -ENOENT;
out:
kfree(resp);
return rv;
}
static int type_file_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
@ -2847,6 +2933,10 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->intf_num = smi_num;
smi_num++;
rv = try_enable_event_buffer(new_smi);
if (rv == 0)
new_smi->has_event_buffer = 1;
/*
* Start clearing the flags before we enable interrupts or the
* timer to avoid racing with the timer.
@ -2863,7 +2953,7 @@ static int try_smi_init(struct smi_info *new_smi)
*/
new_smi->pdev = platform_device_alloc("ipmi_si",
new_smi->intf_num);
if (rv) {
if (!new_smi->pdev) {
printk(KERN_ERR
"ipmi_si_intf:"
" Unable to allocate platform device\n");

View file

@ -57,7 +57,7 @@ u32 acpi_pm_read_verified(void)
return v2;
}
static cycle_t acpi_pm_read(void)
static cycle_t acpi_pm_read(struct clocksource *cs)
{
return (cycle_t)read_pmtmr();
}
@ -83,7 +83,7 @@ static int __init acpi_pm_good_setup(char *__str)
}
__setup("acpi_pm_good", acpi_pm_good_setup);
static cycle_t acpi_pm_read_slow(void)
static cycle_t acpi_pm_read_slow(struct clocksource *cs)
{
return (cycle_t)acpi_pm_read_verified();
}
@ -156,9 +156,9 @@ static int verify_pmtmr_rate(void)
unsigned long count, delta;
mach_prepare_counter();
value1 = clocksource_acpi_pm.read();
value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
mach_countup(&count);
value2 = clocksource_acpi_pm.read();
value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
delta = (value2 - value1) & ACPI_PM_MASK;
/* Check that the PMTMR delta is within 5% of what we expect */
@ -195,9 +195,9 @@ static int __init init_acpi_pm_clocksource(void)
/* "verify" this timing source: */
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
udelay(100 * j);
value1 = clocksource_acpi_pm.read();
value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
value2 = clocksource_acpi_pm.read();
value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
if (value2 == value1)
continue;
if (value2 > value1)

View file

@ -19,7 +19,7 @@
int use_cyclone = 0;
static void __iomem *cyclone_ptr;
static cycle_t read_cyclone(void)
static cycle_t read_cyclone(struct clocksource *cs)
{
return (cycle_t)readl(cyclone_ptr);
}

View file

@ -43,7 +43,7 @@ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)");
/* The base timer frequency, * 27 if selected */
#define HRT_FREQ 1000000
static cycle_t read_hrt(void)
static cycle_t read_hrt(struct clocksource *cs)
{
/* Read the timer value */
return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET);

View file

@ -39,7 +39,7 @@
static void __iomem *tcaddr;
static cycle_t tc_get_cycles(void)
static cycle_t tc_get_cycles(struct clocksource *cs)
{
unsigned long flags;
u32 lower, upper;

View file

@ -674,7 +674,7 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
int row_index;
err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
if (err_detect)
if (!err_detect)
return;
mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",

View file

@ -159,6 +159,9 @@ void drm_master_put(struct drm_master **master)
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (file_priv->is_master)
return 0;
if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
return -EINVAL;
@ -169,6 +172,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
file_priv->minor->master != file_priv->master) {
mutex_lock(&dev->struct_mutex);
file_priv->minor->master = drm_master_get(file_priv->master);
file_priv->is_master = 1;
mutex_unlock(&dev->struct_mutex);
}
@ -178,10 +182,15 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
if (!file_priv->master)
if (!file_priv->is_master)
return -EINVAL;
if (!file_priv->minor->master)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
drm_master_put(&file_priv->minor->master);
file_priv->is_master = 0;
mutex_unlock(&dev->struct_mutex);
return 0;
}

View file

@ -132,6 +132,7 @@ void drm_sysfs_destroy(void)
*/
static void drm_sysfs_device_release(struct device *dev)
{
memset(dev, 0, sizeof(struct device));
return;
}

View file

@ -481,11 +481,13 @@ static int via_wait_idle(drm_via_private_t * dev_priv)
{
int count = 10000000;
while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--);
while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
;
while (count-- && (VIA_READ(VIA_REG_STATUS) &
while (count && (VIA_READ(VIA_REG_STATUS) &
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
VIA_3D_ENG_BUSY))) ;
VIA_3D_ENG_BUSY)))
--count;
return count;
}
@ -705,7 +707,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
switch (d_siz->func) {
case VIA_CMDBUF_SPACE:
while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
&& count--) {
&& --count) {
if (!d_siz->wait) {
break;
}
@ -717,7 +719,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
break;
case VIA_CMDBUF_LAG:
while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
&& count--) {
&& --count) {
if (!d_siz->wait) {
break;
}

View file

@ -819,6 +819,7 @@ static const struct parisc_device_id hp_sdc_tbl[] = {
MODULE_DEVICE_TABLE(parisc, hp_sdc_tbl);
static int __init hp_sdc_init_hppa(struct parisc_device *d);
static struct delayed_work moduleloader_work;
static struct parisc_driver hp_sdc_driver = {
.name = "hp_sdc",
@ -930,8 +931,15 @@ static int __init hp_sdc_init(void)
#if defined(__hppa__)
static void request_module_delayed(struct work_struct *work)
{
request_module("hp_sdc_mlc");
}
static int __init hp_sdc_init_hppa(struct parisc_device *d)
{
int ret;
if (!d)
return 1;
if (hp_sdc.dev != NULL)
@ -944,13 +952,26 @@ static int __init hp_sdc_init_hppa(struct parisc_device *d)
hp_sdc.data_io = d->hpa.start + 0x800;
hp_sdc.status_io = d->hpa.start + 0x801;
return hp_sdc_init();
INIT_DELAYED_WORK(&moduleloader_work, request_module_delayed);
ret = hp_sdc_init();
/* after sucessfull initialization give SDC some time to settle
* and then load the hp_sdc_mlc upper layer driver */
if (!ret)
schedule_delayed_work(&moduleloader_work,
msecs_to_jiffies(2000));
return ret;
}
#endif /* __hppa__ */
static void hp_sdc_exit(void)
{
/* do nothing if we don't have a SDC */
if (!hp_sdc.dev)
return;
write_lock_irq(&hp_sdc.lock);
/* Turn off all maskable "sub-function" irq's. */
@ -969,6 +990,7 @@ static void hp_sdc_exit(void)
tasklet_kill(&hp_sdc.task);
#if defined(__hppa__)
cancel_delayed_work_sync(&moduleloader_work);
if (unregister_parisc_driver(&hp_sdc_driver))
printk(KERN_WARNING PREFIX "Error unregistering HP SDC");
#endif

View file

@ -1479,6 +1479,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
s += blocks;
}
bitmap->last_end_sync = jiffies;
sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
}
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
@ -1589,7 +1590,7 @@ void bitmap_destroy(mddev_t *mddev)
int bitmap_create(mddev_t *mddev)
{
struct bitmap *bitmap;
unsigned long blocks = mddev->resync_max_sectors;
sector_t blocks = mddev->resync_max_sectors;
unsigned long chunks;
unsigned long pages;
struct file *file = mddev->bitmap_file;
@ -1631,8 +1632,8 @@ int bitmap_create(mddev_t *mddev)
bitmap->chunkshift = ffz(~bitmap->chunksize);
/* now that chunksize and chunkshift are set, we can use these macros */
chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) /
CHUNK_BLOCK_RATIO(bitmap);
chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
CHUNK_BLOCK_SHIFT(bitmap);
pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
BUG_ON(!pages);

View file

@ -2017,6 +2017,8 @@ static void md_update_sb(mddev_t * mddev, int force_change)
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
spin_unlock_irq(&mddev->write_lock);
wake_up(&mddev->sb_wait);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
@ -2086,6 +2088,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
* -writemostly - clears write_mostly
* blocked - sets the Blocked flag
* -blocked - clears the Blocked flag
* insync - sets Insync providing device isn't active
*/
int err = -EINVAL;
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@ -2117,6 +2120,9 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
err = 0;
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
set_bit(In_sync, &rdev->flags);
err = 0;
}
if (!err && rdev->sysfs_state)
@ -2190,7 +2196,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
} else if (rdev->mddev->pers) {
mdk_rdev_t *rdev2;
/* Activating a spare .. or possibly reactivating
* if we every get bitmaps working here.
* if we ever get bitmaps working here.
*/
if (rdev->raid_disk != -1)
@ -3482,12 +3488,15 @@ sync_completed_show(mddev_t *mddev, char *page)
{
unsigned long max_sectors, resync;
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return sprintf(page, "none\n");
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
max_sectors = mddev->resync_max_sectors;
else
max_sectors = mddev->dev_sectors;
resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
resync = mddev->curr_resync_completed;
return sprintf(page, "%lu / %lu\n", resync, max_sectors);
}
@ -6334,18 +6343,13 @@ void md_do_sync(mddev_t *mddev)
sector_t sectors;
skipped = 0;
if (j >= mddev->resync_max) {
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
wait_event(mddev->recovery_wait,
mddev->resync_max > j
|| kthread_should_stop());
}
if (kthread_should_stop())
goto interrupted;
if (mddev->curr_resync > mddev->curr_resync_completed &&
(mddev->curr_resync - mddev->curr_resync_completed)
> (max_sectors >> 4)) {
if ((mddev->curr_resync > mddev->curr_resync_completed &&
(mddev->curr_resync - mddev->curr_resync_completed)
> (max_sectors >> 4)) ||
(j - mddev->curr_resync_completed)*2
>= mddev->resync_max - mddev->curr_resync_completed
) {
/* time to update curr_resync_completed */
blk_unplug(mddev->queue);
wait_event(mddev->recovery_wait,
@ -6353,7 +6357,17 @@ void md_do_sync(mddev_t *mddev)
mddev->curr_resync_completed =
mddev->curr_resync;
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
if (j >= mddev->resync_max)
wait_event(mddev->recovery_wait,
mddev->resync_max > j
|| kthread_should_stop());
if (kthread_should_stop())
goto interrupted;
sectors = mddev->pers->sync_request(mddev, j, &skipped,
currspeed < speed_min(mddev));
if (sectors == 0) {
@ -6461,6 +6475,7 @@ void md_do_sync(mddev_t *mddev)
skip:
mddev->curr_resync = 0;
mddev->curr_resync_completed = 0;
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");

View file

@ -12,10 +12,17 @@
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _MD_K_H
#define _MD_K_H
#ifndef _MD_MD_H
#define _MD_MD_H
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#define MaxSector (~(sector_t)0)
@ -408,10 +415,6 @@ static inline void safe_put_page(struct page *p)
if (p) put_page(p);
}
#endif /* CONFIG_BLOCK */
#endif
extern int register_md_personality(struct mdk_personality *p);
extern int unregister_md_personality(struct mdk_personality *p);
extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
@ -434,3 +437,5 @@ extern void md_new_event(mddev_t *mddev);
extern int md_allow_write(mddev_t *mddev);
extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
#endif /* _MD_MD_H */

View file

@ -3845,6 +3845,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
wait_event(conf->wait_for_overlap,
atomic_read(&conf->reshape_stripes)==0);
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = mddev->curr_resync;
conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
@ -3854,6 +3855,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
if (mddev->delta_disks < 0) {
@ -3938,11 +3940,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
* then we need to write out the superblock.
*/
sector_nr += reshape_sectors;
if (sector_nr >= mddev->resync_max) {
if ((sector_nr - mddev->curr_resync_completed) * 2
>= mddev->resync_max - mddev->curr_resync_completed) {
/* Cannot proceed until we've updated the superblock... */
wait_event(conf->wait_for_overlap,
atomic_read(&conf->reshape_stripes) == 0);
mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = mddev->curr_resync;
conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread);
@ -3953,6 +3957,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
conf->reshape_safe = mddev->reshape_position;
spin_unlock_irq(&conf->device_lock);
wake_up(&conf->wait_for_overlap);
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
return reshape_sectors;
}

View file

@ -5934,7 +5934,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
/* Initalize the timer
*/
init_timer(&pCfg->timer);
init_timer_on_stack(&pCfg->timer);
pCfg->timer.data = (unsigned long) ioc;
pCfg->timer.function = mpt_timer_expired;
pCfg->wait_done = 0;

View file

@ -375,7 +375,7 @@ static int __init gru_init(void)
void *gru_start_vaddr;
if (!is_uv_system())
return -ENODEV;
return 0;
#if defined CONFIG_IA64
gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */

View file

@ -248,19 +248,19 @@ xp_init(void)
enum xp_retval ret;
int ch_number;
/* initialize the connection registration mutex */
for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
mutex_init(&xpc_registrations[ch_number].mutex);
if (is_shub())
ret = xp_init_sn2();
else if (is_uv())
ret = xp_init_uv();
else
ret = xpUnsupported;
ret = 0;
if (ret != xpSuccess)
return -ENODEV;
/* initialize the connection registration mutex */
for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
mutex_init(&xpc_registrations[ch_number].mutex);
return ret;
return 0;
}

View file

@ -573,7 +573,7 @@ config RTC_DRV_SA1100
config RTC_DRV_SH
tristate "SuperH On-Chip RTC"
depends on RTC_CLASS && SUPERH
depends on RTC_CLASS && SUPERH && HAVE_CLK
help
Say Y here to enable support for the on-chip RTC found in
most SuperH processors.

View file

@ -797,17 +797,15 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup2;
}
pr_info("%s: alarms up to one %s%s, %zd bytes nvram%s\n",
dev_name(&cmos_rtc.rtc->dev),
is_valid_irq(rtc_irq)
? (cmos_rtc.mon_alrm
? "year"
: (cmos_rtc.day_alrm
? "month" : "day"))
: "no",
cmos_rtc.century ? ", y3k" : "",
nvram.size,
is_hpet_enabled() ? ", hpet irqs" : "");
pr_info("%s: %s%s, %zd bytes nvram%s\n",
dev_name(&cmos_rtc.rtc->dev),
!is_valid_irq(rtc_irq) ? "no alarms" :
cmos_rtc.mon_alrm ? "alarms up to one year" :
cmos_rtc.day_alrm ? "alarms up to one month" :
"alarms up to one day",
cmos_rtc.century ? ", y3k" : "",
nvram.size,
is_hpet_enabled() ? ", hpet irqs" : "");
return 0;

View file

@ -1,7 +1,7 @@
/*
* SuperH On-Chip RTC Support
*
* Copyright (C) 2006, 2007, 2008 Paul Mundt
* Copyright (C) 2006 - 2009 Paul Mundt
* Copyright (C) 2006 Jamie Lenehan
* Copyright (C) 2008 Angelo Castello
*
@ -25,10 +25,11 @@
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/clk.h>
#include <asm/rtc.h>
#define DRV_NAME "sh-rtc"
#define DRV_VERSION "0.2.1"
#define DRV_VERSION "0.2.2"
#define RTC_REG(r) ((r) * rtc_reg_size)
@ -87,16 +88,17 @@
#define RCR2_START 0x01 /* Start bit */
struct sh_rtc {
void __iomem *regbase;
unsigned long regsize;
struct resource *res;
int alarm_irq;
int periodic_irq;
int carry_irq;
struct rtc_device *rtc_dev;
spinlock_t lock;
unsigned long capabilities; /* See asm-sh/rtc.h for cap bits */
unsigned short periodic_freq;
void __iomem *regbase;
unsigned long regsize;
struct resource *res;
int alarm_irq;
int periodic_irq;
int carry_irq;
struct clk *clk;
struct rtc_device *rtc_dev;
spinlock_t lock;
unsigned long capabilities; /* See asm/rtc.h for cap bits */
unsigned short periodic_freq;
};
static int __sh_rtc_interrupt(struct sh_rtc *rtc)
@ -294,10 +296,10 @@ static inline void sh_rtc_setaie(struct device *dev, unsigned int enable)
tmp = readb(rtc->regbase + RCR1);
if (!enable)
tmp &= ~RCR1_AIE;
else
if (enable)
tmp |= RCR1_AIE;
else
tmp &= ~RCR1_AIE;
writeb(tmp, rtc->regbase + RCR1);
@ -618,6 +620,7 @@ static int sh_rtc_irq_set_freq(struct device *dev, int freq)
{
if (!is_power_of_2(freq))
return -EINVAL;
return sh_rtc_ioctl(dev, RTC_IRQP_SET, freq);
}
@ -637,7 +640,8 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
struct sh_rtc *rtc;
struct resource *res;
struct rtc_time r;
int ret;
char clk_name[6];
int clk_id, ret;
rtc = kzalloc(sizeof(struct sh_rtc), GFP_KERNEL);
if (unlikely(!rtc))
@ -652,6 +656,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "No IRQ resource\n");
goto err_badres;
}
rtc->periodic_irq = ret;
rtc->carry_irq = platform_get_irq(pdev, 1);
rtc->alarm_irq = platform_get_irq(pdev, 2);
@ -663,7 +668,7 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
goto err_badres;
}
rtc->regsize = res->end - res->start + 1;
rtc->regsize = resource_size(res);
rtc->res = request_mem_region(res->start, rtc->regsize, pdev->name);
if (unlikely(!rtc->res)) {
@ -677,6 +682,26 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
goto err_badmap;
}
clk_id = pdev->id;
/* With a single device, the clock id is still "rtc0" */
if (clk_id < 0)
clk_id = 0;
snprintf(clk_name, sizeof(clk_name), "rtc%d", clk_id);
rtc->clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(rtc->clk)) {
/*
* No error handling for rtc->clk intentionally, not all
* platforms will have a unique clock for the RTC, and
* the clk API can handle the struct clk pointer being
* NULL.
*/
rtc->clk = NULL;
}
clk_enable(rtc->clk);
rtc->rtc_dev = rtc_device_register("sh", &pdev->dev,
&sh_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc_dev)) {
@ -759,6 +784,8 @@ static int __devinit sh_rtc_probe(struct platform_device *pdev)
return 0;
err_unmap:
clk_disable(rtc->clk);
clk_put(rtc->clk);
iounmap(rtc->regbase);
err_badmap:
release_resource(rtc->res);
@ -780,6 +807,7 @@ static int __devexit sh_rtc_remove(struct platform_device *pdev)
sh_rtc_setcie(&pdev->dev, 0);
free_irq(rtc->periodic_irq, rtc);
if (rtc->carry_irq > 0) {
free_irq(rtc->carry_irq, rtc);
free_irq(rtc->alarm_irq, rtc);
@ -789,6 +817,9 @@ static int __devexit sh_rtc_remove(struct platform_device *pdev)
iounmap(rtc->regbase);
clk_disable(rtc->clk);
clk_put(rtc->clk);
platform_set_drvdata(pdev, NULL);
kfree(rtc);
@ -802,11 +833,11 @@ static void sh_rtc_set_irq_wake(struct device *dev, int enabled)
struct sh_rtc *rtc = platform_get_drvdata(pdev);
set_irq_wake(rtc->periodic_irq, enabled);
if (rtc->carry_irq > 0) {
set_irq_wake(rtc->carry_irq, enabled);
set_irq_wake(rtc->alarm_irq, enabled);
}
}
static int sh_rtc_suspend(struct device *dev)

View file

@ -180,8 +180,6 @@ int scsi_complete_async_scans(void)
spin_unlock(&async_scan_lock);
kfree(data);
/* Synchronize async operations globally */
async_synchronize_full();
return 0;
}

View file

@ -11,10 +11,21 @@
*/
#include <linux/module.h>
#include <linux/device.h>
#include <scsi/scsi_scan.h>
static int __init wait_scan_init(void)
{
/*
* First we need to wait for device probing to finish;
* the drivers we just loaded might just still be probing
* and might not yet have reached the scsi async scanning
*/
wait_for_device_probe();
/*
* and then we wait for the actual asynchronous scsi scan
* to finish.
*/
scsi_complete_async_scans();
return 0;
}

View file

@ -166,7 +166,7 @@ static void bfin_serial_start_tx(struct uart_port *port)
struct tty_struct *tty = uart->port.info->port.tty;
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->scts && (!bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
uart->scts = 0;
uart_handle_cts_change(&uart->port, uart->scts);
}
@ -368,7 +368,7 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
struct bfin_serial_port *uart = dev_id;
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->scts && (!bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
uart->scts = 0;
uart_handle_cts_change(&uart->port, uart->scts);
}
@ -504,7 +504,7 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
struct circ_buf *xmit = &uart->port.info->xmit;
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->scts && (!bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
uart->scts = 0;
uart_handle_cts_change(&uart->port, uart->scts);
}

View file

@ -213,7 +213,7 @@ static int flush(struct driver_data *drv_data)
while (read_SSSR(reg) & SSSR_RNE) {
read_SSDR(reg);
}
} while ((read_SSSR(reg) & SSSR_BSY) && limit--);
} while ((read_SSSR(reg) & SSSR_BSY) && --limit);
write_SSSR(SSSR_ROR, reg);
return limit;
@ -484,7 +484,7 @@ static int wait_ssp_rx_stall(void const __iomem *ioaddr)
{
unsigned long limit = loops_per_jiffy << 1;
while ((read_SSSR(ioaddr) & SSSR_BSY) && limit--)
while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
cpu_relax();
return limit;
@ -494,7 +494,7 @@ static int wait_dma_channel_stop(int channel)
{
unsigned long limit = loops_per_jiffy << 1;
while (!(DCSR(channel) & DCSR_STOPSTATE) && limit--)
while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
cpu_relax();
return limit;
@ -1700,6 +1700,13 @@ static int pxa2xx_spi_resume(struct platform_device *pdev)
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
if (drv_data->rx_channel != -1)
DRCMR(drv_data->ssp->drcmr_rx) =
DRCMR_MAPVLD | drv_data->rx_channel;
if (drv_data->tx_channel != -1)
DRCMR(drv_data->ssp->drcmr_tx) =
DRCMR_MAPVLD | drv_data->tx_channel;
/* Enable the SSP clock */
clk_enable(ssp->clk);

View file

@ -191,8 +191,10 @@ int go7007_reset_encoder(struct go7007 *go)
/*
* Attempt to instantiate an I2C client by ID, probably loading a module.
*/
static int init_i2c_module(struct i2c_adapter *adapter, int id, int addr)
static int init_i2c_module(struct i2c_adapter *adapter, const char *type,
int id, int addr)
{
struct i2c_board_info info;
char *modname;
switch (id) {
@ -226,7 +228,11 @@ static int init_i2c_module(struct i2c_adapter *adapter, int id, int addr)
}
if (modname != NULL)
request_module(modname);
if (wis_i2c_probe_device(adapter, id, addr) == 1)
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = addr;
strlcpy(info.type, type, I2C_NAME_SIZE);
if (!i2c_new_device(adapter, &info))
return 0;
if (modname != NULL)
printk(KERN_INFO
@ -266,6 +272,7 @@ int go7007_register_encoder(struct go7007 *go)
if (go->i2c_adapter_online) {
for (i = 0; i < go->board_info->num_i2c_devs; ++i)
init_i2c_module(&go->i2c_adapter,
go->board_info->i2c_devs[i].type,
go->board_info->i2c_devs[i].id,
go->board_info->i2c_devs[i].addr);
if (go->board_id == GO7007_BOARDID_ADLINK_MPG24)

View file

@ -31,87 +31,6 @@
#include "go7007-priv.h"
#include "wis-i2c.h"
/************** Registration interface for I2C client drivers **************/
/* Since there's no way to auto-probe the I2C devices connected to the I2C
* bus on the go7007, we have this silly little registration system that
* client drivers can use to register their I2C driver ID and their
* detect_client function (the one that's normally passed to i2c_probe).
*
* When a new go7007 device is connected, we can look up in a board info
* table by the USB or PCI vendor/product/revision ID to determine
* which I2C client module to load. The client driver module will register
* itself here, and then we can call the registered detect_client function
* to force-load a new client at the address listed in the board info table.
*
* Really the I2C subsystem should have a way to force-load I2C client
* drivers when we have a priori knowledge of what's on the bus, especially
* since the existing I2C auto-probe mechanism is so hokey, but we'll use
* our own mechanism for the time being. */
struct wis_i2c_client_driver {
unsigned int id;
found_proc found_proc;
struct list_head list;
};
static LIST_HEAD(i2c_client_drivers);
static DECLARE_MUTEX(i2c_client_driver_list_lock);
/* Client drivers register here by their I2C driver ID */
int wis_i2c_add_driver(unsigned int id, found_proc found_proc)
{
struct wis_i2c_client_driver *driver;
driver = kmalloc(sizeof(struct wis_i2c_client_driver), GFP_KERNEL);
if (driver == NULL)
return -ENOMEM;
driver->id = id;
driver->found_proc = found_proc;
down(&i2c_client_driver_list_lock);
list_add_tail(&driver->list, &i2c_client_drivers);
up(&i2c_client_driver_list_lock);
return 0;
}
EXPORT_SYMBOL(wis_i2c_add_driver);
void wis_i2c_del_driver(found_proc found_proc)
{
struct wis_i2c_client_driver *driver, *next;
down(&i2c_client_driver_list_lock);
list_for_each_entry_safe(driver, next, &i2c_client_drivers, list)
if (driver->found_proc == found_proc) {
list_del(&driver->list);
kfree(driver);
}
up(&i2c_client_driver_list_lock);
}
EXPORT_SYMBOL(wis_i2c_del_driver);
/* The main go7007 driver calls this to instantiate a client by driver
* ID and bus address, which are both stored in the board info table */
int wis_i2c_probe_device(struct i2c_adapter *adapter,
unsigned int id, int addr)
{
struct wis_i2c_client_driver *driver;
int found = 0;
if (addr < 0 || addr > 0x7f)
return -1;
down(&i2c_client_driver_list_lock);
list_for_each_entry(driver, &i2c_client_drivers, list)
if (driver->id == id) {
if (driver->found_proc(adapter, addr, 0) == 0)
found = 1;
break;
}
up(&i2c_client_driver_list_lock);
return found;
}
/********************* Driver for on-board I2C adapter *********************/
/* #define GO7007_I2C_DEBUG */
@ -287,9 +206,7 @@ static struct i2c_algorithm go7007_algo = {
static struct i2c_adapter go7007_adap_templ = {
.owner = THIS_MODULE,
.class = I2C_CLASS_TV_ANALOG,
.name = "WIS GO7007SB",
.id = I2C_ALGO_GO7007,
.algo = &go7007_algo,
};

Some files were not shown because too many files have changed in this diff Show more