Merge branch 'master'
This commit is contained in:
commit
5a476deff3
59 changed files with 501 additions and 567 deletions
|
@ -1618,6 +1618,13 @@ M: vandrove@vc.cvut.cz
|
||||||
L: linux-fbdev-devel@lists.sourceforge.net
|
L: linux-fbdev-devel@lists.sourceforge.net
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
||||||
|
MEGARAID SCSI DRIVERS
|
||||||
|
P: Neela Syam Kolli
|
||||||
|
M: Neela.Kolli@engenio.com
|
||||||
|
S: linux-scsi@vger.kernel.org
|
||||||
|
W: http://megaraid.lsilogic.com
|
||||||
|
S: Maintained
|
||||||
|
|
||||||
MEMORY TECHNOLOGY DEVICES
|
MEMORY TECHNOLOGY DEVICES
|
||||||
P: David Woodhouse
|
P: David Woodhouse
|
||||||
M: dwmw2@infradead.org
|
M: dwmw2@infradead.org
|
||||||
|
|
4
Makefile
4
Makefile
|
@ -660,8 +660,10 @@ quiet_cmd_sysmap = SYSMAP
|
||||||
# Link of vmlinux
|
# Link of vmlinux
|
||||||
# If CONFIG_KALLSYMS is set .version is already updated
|
# If CONFIG_KALLSYMS is set .version is already updated
|
||||||
# Generate System.map and verify that the content is consistent
|
# Generate System.map and verify that the content is consistent
|
||||||
|
# Use + in front of the vmlinux_version rule to silent warning with make -j2
|
||||||
|
# First command is ':' to allow us to use + in front of the rule
|
||||||
define rule_vmlinux__
|
define rule_vmlinux__
|
||||||
|
:
|
||||||
$(if $(CONFIG_KALLSYMS),,+$(call cmd,vmlinux_version))
|
$(if $(CONFIG_KALLSYMS),,+$(call cmd,vmlinux_version))
|
||||||
|
|
||||||
$(call cmd,vmlinux__)
|
$(call cmd,vmlinux__)
|
||||||
|
|
|
@ -89,13 +89,6 @@ SECTIONS
|
||||||
*(.got) /* Global offset table */
|
*(.got) /* Global offset table */
|
||||||
}
|
}
|
||||||
|
|
||||||
. = ALIGN(16);
|
|
||||||
__ex_table : { /* Exception table */
|
|
||||||
__start___ex_table = .;
|
|
||||||
*(__ex_table)
|
|
||||||
__stop___ex_table = .;
|
|
||||||
}
|
|
||||||
|
|
||||||
RODATA
|
RODATA
|
||||||
|
|
||||||
_etext = .; /* End of text and rodata section */
|
_etext = .; /* End of text and rodata section */
|
||||||
|
@ -137,6 +130,14 @@ SECTIONS
|
||||||
. = ALIGN(32);
|
. = ALIGN(32);
|
||||||
*(.data.cacheline_aligned)
|
*(.data.cacheline_aligned)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The exception fixup table (might need resorting at runtime)
|
||||||
|
*/
|
||||||
|
. = ALIGN(32);
|
||||||
|
__start___ex_table = .;
|
||||||
|
*(__ex_table)
|
||||||
|
__stop___ex_table = .;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* and the usual data section
|
* and the usual data section
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -7,11 +7,17 @@
|
||||||
*/
|
*/
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
#include <linux/device.h>
|
||||||
|
|
||||||
|
#include <asm/types.h>
|
||||||
|
#include <asm/irq.h>
|
||||||
|
#include <asm/mach-types.h>
|
||||||
#include <asm/hardware.h>
|
#include <asm/hardware.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
|
#include <asm/mach/arch.h>
|
||||||
#include <asm/mach/map.h>
|
#include <asm/mach/map.h>
|
||||||
|
#include <asm/mach/irq.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IRQ base register
|
* IRQ base register
|
||||||
|
@ -48,6 +54,12 @@ static void l7200_unmask_irq(unsigned int irq)
|
||||||
IRQ_ENABLE = 1 << irq;
|
IRQ_ENABLE = 1 << irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct irqchip l7200_irq_chip = {
|
||||||
|
.ack = l7200_mask_irq,
|
||||||
|
.mask = l7200_mask_irq,
|
||||||
|
.unmask = l7200_unmask_irq
|
||||||
|
};
|
||||||
|
|
||||||
static void __init l7200_init_irq(void)
|
static void __init l7200_init_irq(void)
|
||||||
{
|
{
|
||||||
int irq;
|
int irq;
|
||||||
|
@ -56,11 +68,9 @@ static void __init l7200_init_irq(void)
|
||||||
FIQ_ENABLECLEAR = 0xffffffff; /* clear all fast interrupt enables */
|
FIQ_ENABLECLEAR = 0xffffffff; /* clear all fast interrupt enables */
|
||||||
|
|
||||||
for (irq = 0; irq < NR_IRQS; irq++) {
|
for (irq = 0; irq < NR_IRQS; irq++) {
|
||||||
irq_desc[irq].valid = 1;
|
set_irq_chip(irq, &l7200_irq_chip);
|
||||||
irq_desc[irq].probe_ok = 1;
|
set_irq_flags(irq, IRQF_VALID);
|
||||||
irq_desc[irq].mask_ack = l7200_mask_irq;
|
set_irq_handler(irq, do_level_IRQ);
|
||||||
irq_desc[irq].mask = l7200_mask_irq;
|
|
||||||
irq_desc[irq].unmask = l7200_unmask_irq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
init_FIQ();
|
init_FIQ();
|
||||||
|
|
|
@ -467,6 +467,7 @@ void corgi_put_hsync(void)
|
||||||
{
|
{
|
||||||
if (get_hsync_time)
|
if (get_hsync_time)
|
||||||
symbol_put(w100fb_get_hsynclen);
|
symbol_put(w100fb_get_hsynclen);
|
||||||
|
get_hsync_time = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void corgi_wait_hsync(void)
|
void corgi_wait_hsync(void)
|
||||||
|
@ -476,20 +477,37 @@ void corgi_wait_hsync(void)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PXA_SHARP_Cxx00
|
#ifdef CONFIG_PXA_SHARP_Cxx00
|
||||||
|
static struct device *spitz_pxafb_dev;
|
||||||
|
|
||||||
|
static int is_pxafb_device(struct device * dev, void * data)
|
||||||
|
{
|
||||||
|
struct platform_device *pdev = container_of(dev, struct platform_device, dev);
|
||||||
|
|
||||||
|
return (strncmp(pdev->name, "pxa2xx-fb", 9) == 0);
|
||||||
|
}
|
||||||
|
|
||||||
unsigned long spitz_get_hsync_len(void)
|
unsigned long spitz_get_hsync_len(void)
|
||||||
{
|
{
|
||||||
|
if (!spitz_pxafb_dev) {
|
||||||
|
spitz_pxafb_dev = bus_find_device(&platform_bus_type, NULL, NULL, is_pxafb_device);
|
||||||
|
if (!spitz_pxafb_dev)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
if (!get_hsync_time)
|
if (!get_hsync_time)
|
||||||
get_hsync_time = symbol_get(pxafb_get_hsync_time);
|
get_hsync_time = symbol_get(pxafb_get_hsync_time);
|
||||||
if (!get_hsync_time)
|
if (!get_hsync_time)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return pxafb_get_hsync_time(&pxafb_device.dev);
|
return pxafb_get_hsync_time(spitz_pxafb_dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void spitz_put_hsync(void)
|
void spitz_put_hsync(void)
|
||||||
{
|
{
|
||||||
|
put_device(spitz_pxafb_dev);
|
||||||
if (get_hsync_time)
|
if (get_hsync_time)
|
||||||
symbol_put(pxafb_get_hsync_time);
|
symbol_put(pxafb_get_hsync_time);
|
||||||
|
spitz_pxafb_dev = NULL;
|
||||||
|
get_hsync_time = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void spitz_wait_hsync(void)
|
void spitz_wait_hsync(void)
|
||||||
|
|
|
@ -208,6 +208,11 @@ static struct platform_device pxafb_device = {
|
||||||
.resource = pxafb_resources,
|
.resource = pxafb_resources,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void __init set_pxa_fb_parent(struct device *parent_dev)
|
||||||
|
{
|
||||||
|
pxafb_device.dev.parent = parent_dev;
|
||||||
|
}
|
||||||
|
|
||||||
static struct platform_device ffuart_device = {
|
static struct platform_device ffuart_device = {
|
||||||
.name = "pxa2xx-uart",
|
.name = "pxa2xx-uart",
|
||||||
.id = 0,
|
.id = 0,
|
||||||
|
|
|
@ -36,7 +36,6 @@
|
||||||
#include <asm/arch/irq.h>
|
#include <asm/arch/irq.h>
|
||||||
#include <asm/arch/mmc.h>
|
#include <asm/arch/mmc.h>
|
||||||
#include <asm/arch/udc.h>
|
#include <asm/arch/udc.h>
|
||||||
#include <asm/arch/ohci.h>
|
|
||||||
#include <asm/arch/pxafb.h>
|
#include <asm/arch/pxafb.h>
|
||||||
#include <asm/arch/akita.h>
|
#include <asm/arch/akita.h>
|
||||||
#include <asm/arch/spitz.h>
|
#include <asm/arch/spitz.h>
|
||||||
|
@ -304,7 +303,6 @@ static struct platform_device *devices[] __initdata = {
|
||||||
&spitzkbd_device,
|
&spitzkbd_device,
|
||||||
&spitzts_device,
|
&spitzts_device,
|
||||||
&spitzbl_device,
|
&spitzbl_device,
|
||||||
&spitzbattery_device,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void __init common_init(void)
|
static void __init common_init(void)
|
||||||
|
@ -328,7 +326,7 @@ static void __init common_init(void)
|
||||||
|
|
||||||
platform_add_devices(devices, ARRAY_SIZE(devices));
|
platform_add_devices(devices, ARRAY_SIZE(devices));
|
||||||
pxa_set_mci_info(&spitz_mci_platform_data);
|
pxa_set_mci_info(&spitz_mci_platform_data);
|
||||||
pxafb_device.dev.parent = &spitzssp_device.dev;
|
set_pxa_fb_parent(&spitzssp_device.dev);
|
||||||
set_pxa_fb_info(&spitz_pxafb_info);
|
set_pxa_fb_info(&spitz_pxafb_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ config MACH_ANUBIS
|
||||||
config ARCH_BAST
|
config ARCH_BAST
|
||||||
bool "Simtec Electronics BAST (EB2410ITX)"
|
bool "Simtec Electronics BAST (EB2410ITX)"
|
||||||
select CPU_S3C2410
|
select CPU_S3C2410
|
||||||
|
select ISA
|
||||||
help
|
help
|
||||||
Say Y here if you are using the Simtec Electronics EB2410ITX
|
Say Y here if you are using the Simtec Electronics EB2410ITX
|
||||||
development board (also known as BAST)
|
development board (also known as BAST)
|
||||||
|
|
|
@ -275,12 +275,14 @@ static void flush_tlb_all_ipi(void *info)
|
||||||
*==========================================================================*/
|
*==========================================================================*/
|
||||||
void smp_flush_tlb_mm(struct mm_struct *mm)
|
void smp_flush_tlb_mm(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
int cpu_id = smp_processor_id();
|
int cpu_id;
|
||||||
cpumask_t cpu_mask;
|
cpumask_t cpu_mask;
|
||||||
unsigned long *mmc = &mm->context[cpu_id];
|
unsigned long *mmc;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
cpu_id = smp_processor_id();
|
||||||
|
mmc = &mm->context[cpu_id];
|
||||||
cpu_mask = mm->cpu_vm_mask;
|
cpu_mask = mm->cpu_vm_mask;
|
||||||
cpu_clear(cpu_id, cpu_mask);
|
cpu_clear(cpu_id, cpu_mask);
|
||||||
|
|
||||||
|
@ -343,12 +345,14 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||||
void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
|
void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
int cpu_id = smp_processor_id();
|
int cpu_id;
|
||||||
cpumask_t cpu_mask;
|
cpumask_t cpu_mask;
|
||||||
unsigned long *mmc = &mm->context[cpu_id];
|
unsigned long *mmc;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
cpu_id = smp_processor_id();
|
||||||
|
mmc = &mm->context[cpu_id];
|
||||||
cpu_mask = mm->cpu_vm_mask;
|
cpu_mask = mm->cpu_vm_mask;
|
||||||
cpu_clear(cpu_id, cpu_mask);
|
cpu_clear(cpu_id, cpu_mask);
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
/*
|
/*
|
||||||
* fixup-tb0226.c, The TANBAC TB0226 specific PCI fixups.
|
* fixup-tb0226.c, The TANBAC TB0226 specific PCI fixups.
|
||||||
*
|
*
|
||||||
* Copyright (C) 2002-2004 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
|
* Copyright (C) 2002-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify
|
* This program is free software; you can redistribute it and/or modify
|
||||||
* it under the terms of the GNU General Public License as published by
|
* it under the terms of the GNU General Public License as published by
|
||||||
|
@ -20,6 +20,7 @@
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/pci.h>
|
#include <linux/pci.h>
|
||||||
|
|
||||||
|
#include <asm/vr41xx/giu.h>
|
||||||
#include <asm/vr41xx/tb0226.h>
|
#include <asm/vr41xx/tb0226.h>
|
||||||
|
|
||||||
int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
|
int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
|
@ -29,42 +30,42 @@ int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
switch (slot) {
|
switch (slot) {
|
||||||
case 12:
|
case 12:
|
||||||
vr41xx_set_irq_trigger(GD82559_1_PIN,
|
vr41xx_set_irq_trigger(GD82559_1_PIN,
|
||||||
TRIGGER_LEVEL,
|
IRQ_TRIGGER_LEVEL,
|
||||||
SIGNAL_THROUGH);
|
IRQ_SIGNAL_THROUGH);
|
||||||
vr41xx_set_irq_level(GD82559_1_PIN, LEVEL_LOW);
|
vr41xx_set_irq_level(GD82559_1_PIN, IRQ_LEVEL_LOW);
|
||||||
irq = GD82559_1_IRQ;
|
irq = GD82559_1_IRQ;
|
||||||
break;
|
break;
|
||||||
case 13:
|
case 13:
|
||||||
vr41xx_set_irq_trigger(GD82559_2_PIN,
|
vr41xx_set_irq_trigger(GD82559_2_PIN,
|
||||||
TRIGGER_LEVEL,
|
IRQ_TRIGGER_LEVEL,
|
||||||
SIGNAL_THROUGH);
|
IRQ_SIGNAL_THROUGH);
|
||||||
vr41xx_set_irq_level(GD82559_2_PIN, LEVEL_LOW);
|
vr41xx_set_irq_level(GD82559_2_PIN, IRQ_LEVEL_LOW);
|
||||||
irq = GD82559_2_IRQ;
|
irq = GD82559_2_IRQ;
|
||||||
break;
|
break;
|
||||||
case 14:
|
case 14:
|
||||||
switch (pin) {
|
switch (pin) {
|
||||||
case 1:
|
case 1:
|
||||||
vr41xx_set_irq_trigger(UPD720100_INTA_PIN,
|
vr41xx_set_irq_trigger(UPD720100_INTA_PIN,
|
||||||
TRIGGER_LEVEL,
|
IRQ_TRIGGER_LEVEL,
|
||||||
SIGNAL_THROUGH);
|
IRQ_SIGNAL_THROUGH);
|
||||||
vr41xx_set_irq_level(UPD720100_INTA_PIN,
|
vr41xx_set_irq_level(UPD720100_INTA_PIN,
|
||||||
LEVEL_LOW);
|
IRQ_LEVEL_LOW);
|
||||||
irq = UPD720100_INTA_IRQ;
|
irq = UPD720100_INTA_IRQ;
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
vr41xx_set_irq_trigger(UPD720100_INTB_PIN,
|
vr41xx_set_irq_trigger(UPD720100_INTB_PIN,
|
||||||
TRIGGER_LEVEL,
|
IRQ_TRIGGER_LEVEL,
|
||||||
SIGNAL_THROUGH);
|
IRQ_SIGNAL_THROUGH);
|
||||||
vr41xx_set_irq_level(UPD720100_INTB_PIN,
|
vr41xx_set_irq_level(UPD720100_INTB_PIN,
|
||||||
LEVEL_LOW);
|
IRQ_LEVEL_LOW);
|
||||||
irq = UPD720100_INTB_IRQ;
|
irq = UPD720100_INTB_IRQ;
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
vr41xx_set_irq_trigger(UPD720100_INTC_PIN,
|
vr41xx_set_irq_trigger(UPD720100_INTC_PIN,
|
||||||
TRIGGER_LEVEL,
|
IRQ_TRIGGER_LEVEL,
|
||||||
SIGNAL_THROUGH);
|
IRQ_SIGNAL_THROUGH);
|
||||||
vr41xx_set_irq_level(UPD720100_INTC_PIN,
|
vr41xx_set_irq_level(UPD720100_INTC_PIN,
|
||||||
LEVEL_LOW);
|
IRQ_LEVEL_LOW);
|
||||||
irq = UPD720100_INTC_IRQ;
|
irq = UPD720100_INTC_IRQ;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -115,7 +115,7 @@ static void __pmac pmac_show_cpuinfo(struct seq_file *m)
|
||||||
|
|
||||||
/* find motherboard type */
|
/* find motherboard type */
|
||||||
seq_printf(m, "machine\t\t: ");
|
seq_printf(m, "machine\t\t: ");
|
||||||
np = find_devices("device-tree");
|
np = of_find_node_by_path("/");
|
||||||
if (np != NULL) {
|
if (np != NULL) {
|
||||||
pp = (char *) get_property(np, "model", NULL);
|
pp = (char *) get_property(np, "model", NULL);
|
||||||
if (pp != NULL)
|
if (pp != NULL)
|
||||||
|
@ -133,6 +133,7 @@ static void __pmac pmac_show_cpuinfo(struct seq_file *m)
|
||||||
}
|
}
|
||||||
seq_printf(m, "\n");
|
seq_printf(m, "\n");
|
||||||
}
|
}
|
||||||
|
of_node_put(np);
|
||||||
} else
|
} else
|
||||||
seq_printf(m, "PowerMac\n");
|
seq_printf(m, "PowerMac\n");
|
||||||
|
|
||||||
|
|
|
@ -49,12 +49,6 @@ static void __iommu_flushall(struct pci_iommu *iommu)
|
||||||
|
|
||||||
/* Ensure completion of previous PIO writes. */
|
/* Ensure completion of previous PIO writes. */
|
||||||
(void) pci_iommu_read(iommu->write_complete_reg);
|
(void) pci_iommu_read(iommu->write_complete_reg);
|
||||||
|
|
||||||
/* Now update everyone's flush point. */
|
|
||||||
for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
|
|
||||||
iommu->alloc_info[entry].flush =
|
|
||||||
iommu->alloc_info[entry].next;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define IOPTE_CONSISTENT(CTX) \
|
#define IOPTE_CONSISTENT(CTX) \
|
||||||
|
@ -80,120 +74,117 @@ static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
|
||||||
iopte_val(*iopte) = val;
|
iopte_val(*iopte) = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize)
|
/* Based largely upon the ppc64 iommu allocator. */
|
||||||
|
static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages)
|
||||||
{
|
{
|
||||||
int i;
|
struct pci_iommu_arena *arena = &iommu->arena;
|
||||||
|
unsigned long n, i, start, end, limit;
|
||||||
|
int pass;
|
||||||
|
|
||||||
tsbsize /= sizeof(iopte_t);
|
limit = arena->limit;
|
||||||
|
start = arena->hint;
|
||||||
|
pass = 0;
|
||||||
|
|
||||||
for (i = 0; i < tsbsize; i++)
|
again:
|
||||||
|
n = find_next_zero_bit(arena->map, limit, start);
|
||||||
|
end = n + npages;
|
||||||
|
if (unlikely(end >= limit)) {
|
||||||
|
if (likely(pass < 1)) {
|
||||||
|
limit = start;
|
||||||
|
start = 0;
|
||||||
|
__iommu_flushall(iommu);
|
||||||
|
pass++;
|
||||||
|
goto again;
|
||||||
|
} else {
|
||||||
|
/* Scanned the whole thing, give up. */
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = n; i < end; i++) {
|
||||||
|
if (test_bit(i, arena->map)) {
|
||||||
|
start = i + 1;
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = n; i < end; i++)
|
||||||
|
__set_bit(i, arena->map);
|
||||||
|
|
||||||
|
arena->hint = end;
|
||||||
|
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
|
||||||
|
{
|
||||||
|
unsigned long i;
|
||||||
|
|
||||||
|
for (i = base; i < (base + npages); i++)
|
||||||
|
__clear_bit(i, arena->map);
|
||||||
|
}
|
||||||
|
|
||||||
|
void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
|
||||||
|
{
|
||||||
|
unsigned long i, tsbbase, order, sz, num_tsb_entries;
|
||||||
|
|
||||||
|
num_tsb_entries = tsbsize / sizeof(iopte_t);
|
||||||
|
|
||||||
|
/* Setup initial software IOMMU state. */
|
||||||
|
spin_lock_init(&iommu->lock);
|
||||||
|
iommu->ctx_lowest_free = 1;
|
||||||
|
iommu->page_table_map_base = dma_offset;
|
||||||
|
iommu->dma_addr_mask = dma_addr_mask;
|
||||||
|
|
||||||
|
/* Allocate and initialize the free area map. */
|
||||||
|
sz = num_tsb_entries / 8;
|
||||||
|
sz = (sz + 7UL) & ~7UL;
|
||||||
|
iommu->arena.map = kmalloc(sz, GFP_KERNEL);
|
||||||
|
if (!iommu->arena.map) {
|
||||||
|
prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
|
||||||
|
prom_halt();
|
||||||
|
}
|
||||||
|
memset(iommu->arena.map, 0, sz);
|
||||||
|
iommu->arena.limit = num_tsb_entries;
|
||||||
|
|
||||||
|
/* Allocate and initialize the dummy page which we
|
||||||
|
* set inactive IO PTEs to point to.
|
||||||
|
*/
|
||||||
|
iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
|
||||||
|
if (!iommu->dummy_page) {
|
||||||
|
prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n");
|
||||||
|
prom_halt();
|
||||||
|
}
|
||||||
|
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
|
||||||
|
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
|
||||||
|
|
||||||
|
/* Now allocate and setup the IOMMU page table itself. */
|
||||||
|
order = get_order(tsbsize);
|
||||||
|
tsbbase = __get_free_pages(GFP_KERNEL, order);
|
||||||
|
if (!tsbbase) {
|
||||||
|
prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n");
|
||||||
|
prom_halt();
|
||||||
|
}
|
||||||
|
iommu->page_table = (iopte_t *)tsbbase;
|
||||||
|
|
||||||
|
for (i = 0; i < num_tsb_entries; i++)
|
||||||
iopte_make_dummy(iommu, &iommu->page_table[i]);
|
iopte_make_dummy(iommu, &iommu->page_table[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
|
static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages)
|
||||||
{
|
{
|
||||||
iopte_t *iopte, *limit, *first;
|
long entry;
|
||||||
unsigned long cnum, ent, flush_point;
|
|
||||||
|
|
||||||
cnum = 0;
|
entry = pci_arena_alloc(iommu, npages);
|
||||||
while ((1UL << cnum) < npages)
|
if (unlikely(entry < 0))
|
||||||
cnum++;
|
|
||||||
iopte = (iommu->page_table +
|
|
||||||
(cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
|
|
||||||
|
|
||||||
if (cnum == 0)
|
|
||||||
limit = (iommu->page_table +
|
|
||||||
iommu->lowest_consistent_map);
|
|
||||||
else
|
|
||||||
limit = (iopte +
|
|
||||||
(1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
|
|
||||||
|
|
||||||
iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
|
|
||||||
flush_point = iommu->alloc_info[cnum].flush;
|
|
||||||
|
|
||||||
first = iopte;
|
|
||||||
for (;;) {
|
|
||||||
if (IOPTE_IS_DUMMY(iommu, iopte)) {
|
|
||||||
if ((iopte + (1 << cnum)) >= limit)
|
|
||||||
ent = 0;
|
|
||||||
else
|
|
||||||
ent = ent + 1;
|
|
||||||
iommu->alloc_info[cnum].next = ent;
|
|
||||||
if (ent == flush_point)
|
|
||||||
__iommu_flushall(iommu);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
iopte += (1 << cnum);
|
|
||||||
ent++;
|
|
||||||
if (iopte >= limit) {
|
|
||||||
iopte = (iommu->page_table +
|
|
||||||
(cnum <<
|
|
||||||
(iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
|
|
||||||
ent = 0;
|
|
||||||
}
|
|
||||||
if (ent == flush_point)
|
|
||||||
__iommu_flushall(iommu);
|
|
||||||
if (iopte == first)
|
|
||||||
goto bad;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* I've got your streaming cluster right here buddy boy... */
|
|
||||||
return iopte;
|
|
||||||
|
|
||||||
bad:
|
|
||||||
printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
|
|
||||||
npages);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
return iommu->page_table + entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
|
static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages)
|
||||||
unsigned long npages, unsigned long ctx)
|
|
||||||
{
|
{
|
||||||
unsigned long cnum, ent;
|
pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
|
||||||
|
|
||||||
cnum = 0;
|
|
||||||
while ((1UL << cnum) < npages)
|
|
||||||
cnum++;
|
|
||||||
|
|
||||||
ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
|
|
||||||
>> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
|
|
||||||
|
|
||||||
/* If the global flush might not have caught this entry,
|
|
||||||
* adjust the flush point such that we will flush before
|
|
||||||
* ever trying to reuse it.
|
|
||||||
*/
|
|
||||||
#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
|
|
||||||
if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
|
|
||||||
iommu->alloc_info[cnum].flush = ent;
|
|
||||||
#undef between
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We allocate consistent mappings from the end of cluster zero. */
|
|
||||||
static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
|
|
||||||
{
|
|
||||||
iopte_t *iopte;
|
|
||||||
|
|
||||||
iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
|
|
||||||
while (iopte > iommu->page_table) {
|
|
||||||
iopte--;
|
|
||||||
if (IOPTE_IS_DUMMY(iommu, iopte)) {
|
|
||||||
unsigned long tmp = npages;
|
|
||||||
|
|
||||||
while (--tmp) {
|
|
||||||
iopte--;
|
|
||||||
if (!IOPTE_IS_DUMMY(iommu, iopte))
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (tmp == 0) {
|
|
||||||
u32 entry = (iopte - iommu->page_table);
|
|
||||||
|
|
||||||
if (entry < iommu->lowest_consistent_map)
|
|
||||||
iommu->lowest_consistent_map = entry;
|
|
||||||
return iopte;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iommu_alloc_ctx(struct pci_iommu *iommu)
|
static int iommu_alloc_ctx(struct pci_iommu *iommu)
|
||||||
|
@ -233,7 +224,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
|
||||||
struct pcidev_cookie *pcp;
|
struct pcidev_cookie *pcp;
|
||||||
struct pci_iommu *iommu;
|
struct pci_iommu *iommu;
|
||||||
iopte_t *iopte;
|
iopte_t *iopte;
|
||||||
unsigned long flags, order, first_page, ctx;
|
unsigned long flags, order, first_page;
|
||||||
void *ret;
|
void *ret;
|
||||||
int npages;
|
int npages;
|
||||||
|
|
||||||
|
@ -251,9 +242,10 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
|
||||||
iommu = pcp->pbm->iommu;
|
iommu = pcp->pbm->iommu;
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
|
iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
|
||||||
if (iopte == NULL) {
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
|
|
||||||
|
if (unlikely(iopte == NULL)) {
|
||||||
free_pages(first_page, order);
|
free_pages(first_page, order);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -262,31 +254,15 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
|
||||||
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
|
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
|
||||||
ret = (void *) first_page;
|
ret = (void *) first_page;
|
||||||
npages = size >> IO_PAGE_SHIFT;
|
npages = size >> IO_PAGE_SHIFT;
|
||||||
ctx = 0;
|
|
||||||
if (iommu->iommu_ctxflush)
|
|
||||||
ctx = iommu_alloc_ctx(iommu);
|
|
||||||
first_page = __pa(first_page);
|
first_page = __pa(first_page);
|
||||||
while (npages--) {
|
while (npages--) {
|
||||||
iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
|
iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
|
||||||
IOPTE_WRITE |
|
IOPTE_WRITE |
|
||||||
(first_page & IOPTE_PAGE));
|
(first_page & IOPTE_PAGE));
|
||||||
iopte++;
|
iopte++;
|
||||||
first_page += IO_PAGE_SIZE;
|
first_page += IO_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
u32 daddr = *dma_addrp;
|
|
||||||
|
|
||||||
npages = size >> IO_PAGE_SHIFT;
|
|
||||||
for (i = 0; i < npages; i++) {
|
|
||||||
pci_iommu_write(iommu->iommu_flush, daddr);
|
|
||||||
daddr += IO_PAGE_SIZE;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -296,7 +272,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
|
||||||
struct pcidev_cookie *pcp;
|
struct pcidev_cookie *pcp;
|
||||||
struct pci_iommu *iommu;
|
struct pci_iommu *iommu;
|
||||||
iopte_t *iopte;
|
iopte_t *iopte;
|
||||||
unsigned long flags, order, npages, i, ctx;
|
unsigned long flags, order, npages;
|
||||||
|
|
||||||
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
|
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
|
||||||
pcp = pdev->sysdata;
|
pcp = pdev->sysdata;
|
||||||
|
@ -306,46 +282,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
|
|
||||||
if ((iopte - iommu->page_table) ==
|
free_npages(iommu, dvma, npages);
|
||||||
iommu->lowest_consistent_map) {
|
|
||||||
iopte_t *walk = iopte + npages;
|
|
||||||
iopte_t *limit;
|
|
||||||
|
|
||||||
limit = (iommu->page_table +
|
|
||||||
(1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
|
|
||||||
while (walk < limit) {
|
|
||||||
if (!IOPTE_IS_DUMMY(iommu, walk))
|
|
||||||
break;
|
|
||||||
walk++;
|
|
||||||
}
|
|
||||||
iommu->lowest_consistent_map =
|
|
||||||
(walk - iommu->page_table);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Data for consistent mappings cannot enter the streaming
|
|
||||||
* buffers, so we only need to update the TSB. We flush
|
|
||||||
* the IOMMU here as well to prevent conflicts with the
|
|
||||||
* streaming mapping deferred tlb flush scheme.
|
|
||||||
*/
|
|
||||||
|
|
||||||
ctx = 0;
|
|
||||||
if (iommu->iommu_ctxflush)
|
|
||||||
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
|
|
||||||
|
|
||||||
for (i = 0; i < npages; i++, iopte++)
|
|
||||||
iopte_make_dummy(iommu, iopte);
|
|
||||||
|
|
||||||
if (iommu->iommu_ctxflush) {
|
|
||||||
pci_iommu_write(iommu->iommu_ctxflush, ctx);
|
|
||||||
} else {
|
|
||||||
for (i = 0; i < npages; i++) {
|
|
||||||
u32 daddr = dvma + (i << IO_PAGE_SHIFT);
|
|
||||||
|
|
||||||
pci_iommu_write(iommu->iommu_flush, daddr);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
iommu_free_ctx(iommu, ctx);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
|
|
||||||
|
@ -372,25 +309,27 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
|
||||||
iommu = pcp->pbm->iommu;
|
iommu = pcp->pbm->iommu;
|
||||||
strbuf = &pcp->pbm->stc;
|
strbuf = &pcp->pbm->stc;
|
||||||
|
|
||||||
if (direction == PCI_DMA_NONE)
|
if (unlikely(direction == PCI_DMA_NONE))
|
||||||
BUG();
|
goto bad_no_ctx;
|
||||||
|
|
||||||
oaddr = (unsigned long)ptr;
|
oaddr = (unsigned long)ptr;
|
||||||
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
|
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
|
||||||
npages >>= IO_PAGE_SHIFT;
|
npages >>= IO_PAGE_SHIFT;
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
|
base = alloc_npages(iommu, npages);
|
||||||
|
ctx = 0;
|
||||||
|
if (iommu->iommu_ctxflush)
|
||||||
|
ctx = iommu_alloc_ctx(iommu);
|
||||||
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
|
|
||||||
base = alloc_streaming_cluster(iommu, npages);
|
if (unlikely(!base))
|
||||||
if (base == NULL)
|
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
bus_addr = (iommu->page_table_map_base +
|
bus_addr = (iommu->page_table_map_base +
|
||||||
((base - iommu->page_table) << IO_PAGE_SHIFT));
|
((base - iommu->page_table) << IO_PAGE_SHIFT));
|
||||||
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
|
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
|
||||||
base_paddr = __pa(oaddr & IO_PAGE_MASK);
|
base_paddr = __pa(oaddr & IO_PAGE_MASK);
|
||||||
ctx = 0;
|
|
||||||
if (iommu->iommu_ctxflush)
|
|
||||||
ctx = iommu_alloc_ctx(iommu);
|
|
||||||
if (strbuf->strbuf_enabled)
|
if (strbuf->strbuf_enabled)
|
||||||
iopte_protection = IOPTE_STREAMING(ctx);
|
iopte_protection = IOPTE_STREAMING(ctx);
|
||||||
else
|
else
|
||||||
|
@ -401,12 +340,13 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
|
||||||
for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
|
for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
|
||||||
iopte_val(*base) = iopte_protection | base_paddr;
|
iopte_val(*base) = iopte_protection | base_paddr;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
bad:
|
bad:
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
iommu_free_ctx(iommu, ctx);
|
||||||
|
bad_no_ctx:
|
||||||
|
if (printk_ratelimit())
|
||||||
|
WARN_ON(1);
|
||||||
return PCI_DMA_ERROR_CODE;
|
return PCI_DMA_ERROR_CODE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -481,10 +421,13 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
|
||||||
struct pci_iommu *iommu;
|
struct pci_iommu *iommu;
|
||||||
struct pci_strbuf *strbuf;
|
struct pci_strbuf *strbuf;
|
||||||
iopte_t *base;
|
iopte_t *base;
|
||||||
unsigned long flags, npages, ctx;
|
unsigned long flags, npages, ctx, i;
|
||||||
|
|
||||||
if (direction == PCI_DMA_NONE)
|
if (unlikely(direction == PCI_DMA_NONE)) {
|
||||||
BUG();
|
if (printk_ratelimit())
|
||||||
|
WARN_ON(1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
pcp = pdev->sysdata;
|
pcp = pdev->sysdata;
|
||||||
iommu = pcp->pbm->iommu;
|
iommu = pcp->pbm->iommu;
|
||||||
|
@ -510,13 +453,14 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
|
||||||
|
|
||||||
/* Step 1: Kick data out of streaming buffers if necessary. */
|
/* Step 1: Kick data out of streaming buffers if necessary. */
|
||||||
if (strbuf->strbuf_enabled)
|
if (strbuf->strbuf_enabled)
|
||||||
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
|
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx,
|
||||||
|
npages, direction);
|
||||||
|
|
||||||
/* Step 2: Clear out first TSB entry. */
|
/* Step 2: Clear out TSB entries. */
|
||||||
iopte_make_dummy(iommu, base);
|
for (i = 0; i < npages; i++)
|
||||||
|
iopte_make_dummy(iommu, base + i);
|
||||||
|
|
||||||
free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
|
free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
|
||||||
npages, ctx);
|
|
||||||
|
|
||||||
iommu_free_ctx(iommu, ctx);
|
iommu_free_ctx(iommu, ctx);
|
||||||
|
|
||||||
|
@ -621,6 +565,8 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
|
||||||
pci_map_single(pdev,
|
pci_map_single(pdev,
|
||||||
(page_address(sglist->page) + sglist->offset),
|
(page_address(sglist->page) + sglist->offset),
|
||||||
sglist->length, direction);
|
sglist->length, direction);
|
||||||
|
if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
|
||||||
|
return 0;
|
||||||
sglist->dma_length = sglist->length;
|
sglist->dma_length = sglist->length;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -629,21 +575,29 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
|
||||||
iommu = pcp->pbm->iommu;
|
iommu = pcp->pbm->iommu;
|
||||||
strbuf = &pcp->pbm->stc;
|
strbuf = &pcp->pbm->stc;
|
||||||
|
|
||||||
if (direction == PCI_DMA_NONE)
|
if (unlikely(direction == PCI_DMA_NONE))
|
||||||
BUG();
|
goto bad_no_ctx;
|
||||||
|
|
||||||
/* Step 1: Prepare scatter list. */
|
/* Step 1: Prepare scatter list. */
|
||||||
|
|
||||||
npages = prepare_sg(sglist, nelems);
|
npages = prepare_sg(sglist, nelems);
|
||||||
|
|
||||||
/* Step 2: Allocate a cluster. */
|
/* Step 2: Allocate a cluster and context, if necessary. */
|
||||||
|
|
||||||
spin_lock_irqsave(&iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
|
|
||||||
base = alloc_streaming_cluster(iommu, npages);
|
base = alloc_npages(iommu, npages);
|
||||||
|
ctx = 0;
|
||||||
|
if (iommu->iommu_ctxflush)
|
||||||
|
ctx = iommu_alloc_ctx(iommu);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
|
|
||||||
if (base == NULL)
|
if (base == NULL)
|
||||||
goto bad;
|
goto bad;
|
||||||
dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
|
|
||||||
|
dma_base = iommu->page_table_map_base +
|
||||||
|
((base - iommu->page_table) << IO_PAGE_SHIFT);
|
||||||
|
|
||||||
/* Step 3: Normalize DMA addresses. */
|
/* Step 3: Normalize DMA addresses. */
|
||||||
used = nelems;
|
used = nelems;
|
||||||
|
@ -656,30 +610,28 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
|
||||||
}
|
}
|
||||||
used = nelems - used;
|
used = nelems - used;
|
||||||
|
|
||||||
/* Step 4: Choose a context if necessary. */
|
/* Step 4: Create the mappings. */
|
||||||
ctx = 0;
|
|
||||||
if (iommu->iommu_ctxflush)
|
|
||||||
ctx = iommu_alloc_ctx(iommu);
|
|
||||||
|
|
||||||
/* Step 5: Create the mappings. */
|
|
||||||
if (strbuf->strbuf_enabled)
|
if (strbuf->strbuf_enabled)
|
||||||
iopte_protection = IOPTE_STREAMING(ctx);
|
iopte_protection = IOPTE_STREAMING(ctx);
|
||||||
else
|
else
|
||||||
iopte_protection = IOPTE_CONSISTENT(ctx);
|
iopte_protection = IOPTE_CONSISTENT(ctx);
|
||||||
if (direction != PCI_DMA_TODEVICE)
|
if (direction != PCI_DMA_TODEVICE)
|
||||||
iopte_protection |= IOPTE_WRITE;
|
iopte_protection |= IOPTE_WRITE;
|
||||||
fill_sg (base, sglist, used, nelems, iopte_protection);
|
|
||||||
|
fill_sg(base, sglist, used, nelems, iopte_protection);
|
||||||
|
|
||||||
#ifdef VERIFY_SG
|
#ifdef VERIFY_SG
|
||||||
verify_sglist(sglist, nelems, base, npages);
|
verify_sglist(sglist, nelems, base, npages);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
|
||||||
|
|
||||||
return used;
|
return used;
|
||||||
|
|
||||||
bad:
|
bad:
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
iommu_free_ctx(iommu, ctx);
|
||||||
return PCI_DMA_ERROR_CODE;
|
bad_no_ctx:
|
||||||
|
if (printk_ratelimit())
|
||||||
|
WARN_ON(1);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unmap a set of streaming mode DMA translations. */
|
/* Unmap a set of streaming mode DMA translations. */
|
||||||
|
@ -692,8 +644,10 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
|
||||||
unsigned long flags, ctx, i, npages;
|
unsigned long flags, ctx, i, npages;
|
||||||
u32 bus_addr;
|
u32 bus_addr;
|
||||||
|
|
||||||
if (direction == PCI_DMA_NONE)
|
if (unlikely(direction == PCI_DMA_NONE)) {
|
||||||
BUG();
|
if (printk_ratelimit())
|
||||||
|
WARN_ON(1);
|
||||||
|
}
|
||||||
|
|
||||||
pcp = pdev->sysdata;
|
pcp = pdev->sysdata;
|
||||||
iommu = pcp->pbm->iommu;
|
iommu = pcp->pbm->iommu;
|
||||||
|
@ -705,7 +659,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
|
||||||
if (sglist[i].dma_length == 0)
|
if (sglist[i].dma_length == 0)
|
||||||
break;
|
break;
|
||||||
i--;
|
i--;
|
||||||
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
|
npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
|
||||||
|
bus_addr) >> IO_PAGE_SHIFT;
|
||||||
|
|
||||||
base = iommu->page_table +
|
base = iommu->page_table +
|
||||||
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
|
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
|
||||||
|
@ -726,11 +681,11 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
|
||||||
if (strbuf->strbuf_enabled)
|
if (strbuf->strbuf_enabled)
|
||||||
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
|
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
|
||||||
|
|
||||||
/* Step 2: Clear out first TSB entry. */
|
/* Step 2: Clear out the TSB entries. */
|
||||||
iopte_make_dummy(iommu, base);
|
for (i = 0; i < npages; i++)
|
||||||
|
iopte_make_dummy(iommu, base + i);
|
||||||
|
|
||||||
free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
|
free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
|
||||||
npages, ctx);
|
|
||||||
|
|
||||||
iommu_free_ctx(iommu, ctx);
|
iommu_free_ctx(iommu, ctx);
|
||||||
|
|
||||||
|
|
|
@ -1207,13 +1207,9 @@ static void psycho_scan_bus(struct pci_controller_info *p)
|
||||||
static void psycho_iommu_init(struct pci_controller_info *p)
|
static void psycho_iommu_init(struct pci_controller_info *p)
|
||||||
{
|
{
|
||||||
struct pci_iommu *iommu = p->pbm_A.iommu;
|
struct pci_iommu *iommu = p->pbm_A.iommu;
|
||||||
unsigned long tsbbase, i;
|
unsigned long i;
|
||||||
u64 control;
|
u64 control;
|
||||||
|
|
||||||
/* Setup initial software IOMMU state. */
|
|
||||||
spin_lock_init(&iommu->lock);
|
|
||||||
iommu->ctx_lowest_free = 1;
|
|
||||||
|
|
||||||
/* Register addresses. */
|
/* Register addresses. */
|
||||||
iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
|
iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
|
||||||
iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE;
|
iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE;
|
||||||
|
@ -1240,40 +1236,10 @@ static void psycho_iommu_init(struct pci_controller_info *p)
|
||||||
/* Leave diag mode enabled for full-flushing done
|
/* Leave diag mode enabled for full-flushing done
|
||||||
* in pci_iommu.c
|
* in pci_iommu.c
|
||||||
*/
|
*/
|
||||||
|
pci_iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff);
|
||||||
|
|
||||||
iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
|
psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE,
|
||||||
if (!iommu->dummy_page) {
|
__pa(iommu->page_table));
|
||||||
prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
|
|
||||||
prom_halt();
|
|
||||||
}
|
|
||||||
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
|
|
||||||
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
|
|
||||||
|
|
||||||
/* Using assumed page size 8K with 128K entries we need 1MB iommu page
|
|
||||||
* table (128K ioptes * 8 bytes per iopte). This is
|
|
||||||
* page order 7 on UltraSparc.
|
|
||||||
*/
|
|
||||||
tsbbase = __get_free_pages(GFP_KERNEL, get_order(IO_TSB_SIZE));
|
|
||||||
if (!tsbbase) {
|
|
||||||
prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n");
|
|
||||||
prom_halt();
|
|
||||||
}
|
|
||||||
iommu->page_table = (iopte_t *)tsbbase;
|
|
||||||
iommu->page_table_sz_bits = 17;
|
|
||||||
iommu->page_table_map_base = 0xc0000000;
|
|
||||||
iommu->dma_addr_mask = 0xffffffff;
|
|
||||||
pci_iommu_table_init(iommu, IO_TSB_SIZE);
|
|
||||||
|
|
||||||
/* We start with no consistent mappings. */
|
|
||||||
iommu->lowest_consistent_map =
|
|
||||||
1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
|
|
||||||
|
|
||||||
for (i = 0; i < PBM_NCLUSTERS; i++) {
|
|
||||||
iommu->alloc_info[i].flush = 0;
|
|
||||||
iommu->alloc_info[i].next = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase));
|
|
||||||
|
|
||||||
control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
|
control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
|
||||||
control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
|
control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
|
||||||
|
@ -1281,7 +1247,7 @@ static void psycho_iommu_init(struct pci_controller_info *p)
|
||||||
psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
|
psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
|
||||||
|
|
||||||
/* If necessary, hook us up for starfire IRQ translations. */
|
/* If necessary, hook us up for starfire IRQ translations. */
|
||||||
if(this_is_starfire)
|
if (this_is_starfire)
|
||||||
p->starfire_cookie = starfire_hookup(p->pbm_A.portid);
|
p->starfire_cookie = starfire_hookup(p->pbm_A.portid);
|
||||||
else
|
else
|
||||||
p->starfire_cookie = NULL;
|
p->starfire_cookie = NULL;
|
||||||
|
|
|
@ -1267,13 +1267,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
|
||||||
u32 dma_mask)
|
u32 dma_mask)
|
||||||
{
|
{
|
||||||
struct pci_iommu *iommu = p->pbm_A.iommu;
|
struct pci_iommu *iommu = p->pbm_A.iommu;
|
||||||
unsigned long tsbbase, i, order;
|
unsigned long i;
|
||||||
u64 control;
|
u64 control;
|
||||||
|
|
||||||
/* Setup initial software IOMMU state. */
|
|
||||||
spin_lock_init(&iommu->lock);
|
|
||||||
iommu->ctx_lowest_free = 1;
|
|
||||||
|
|
||||||
/* Register addresses. */
|
/* Register addresses. */
|
||||||
iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
|
iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
|
||||||
iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE;
|
iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE;
|
||||||
|
@ -1295,26 +1291,10 @@ static void sabre_iommu_init(struct pci_controller_info *p,
|
||||||
/* Leave diag mode enabled for full-flushing done
|
/* Leave diag mode enabled for full-flushing done
|
||||||
* in pci_iommu.c
|
* in pci_iommu.c
|
||||||
*/
|
*/
|
||||||
|
pci_iommu_table_init(iommu, tsbsize * 1024 * 8, dvma_offset, dma_mask);
|
||||||
|
|
||||||
iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
|
sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE,
|
||||||
if (!iommu->dummy_page) {
|
__pa(iommu->page_table));
|
||||||
prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
|
|
||||||
prom_halt();
|
|
||||||
}
|
|
||||||
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
|
|
||||||
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
|
|
||||||
|
|
||||||
tsbbase = __get_free_pages(GFP_KERNEL, order = get_order(tsbsize * 1024 * 8));
|
|
||||||
if (!tsbbase) {
|
|
||||||
prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n");
|
|
||||||
prom_halt();
|
|
||||||
}
|
|
||||||
iommu->page_table = (iopte_t *)tsbbase;
|
|
||||||
iommu->page_table_map_base = dvma_offset;
|
|
||||||
iommu->dma_addr_mask = dma_mask;
|
|
||||||
pci_iommu_table_init(iommu, PAGE_SIZE << order);
|
|
||||||
|
|
||||||
sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase));
|
|
||||||
|
|
||||||
control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
|
control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
|
||||||
control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
|
control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
|
||||||
|
@ -1322,11 +1302,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
|
||||||
switch(tsbsize) {
|
switch(tsbsize) {
|
||||||
case 64:
|
case 64:
|
||||||
control |= SABRE_IOMMU_TSBSZ_64K;
|
control |= SABRE_IOMMU_TSBSZ_64K;
|
||||||
iommu->page_table_sz_bits = 16;
|
|
||||||
break;
|
break;
|
||||||
case 128:
|
case 128:
|
||||||
control |= SABRE_IOMMU_TSBSZ_128K;
|
control |= SABRE_IOMMU_TSBSZ_128K;
|
||||||
iommu->page_table_sz_bits = 17;
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
|
prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
|
||||||
|
@ -1334,15 +1312,6 @@ static void sabre_iommu_init(struct pci_controller_info *p,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
|
sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
|
||||||
|
|
||||||
/* We start with no consistent mappings. */
|
|
||||||
iommu->lowest_consistent_map =
|
|
||||||
1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
|
|
||||||
|
|
||||||
for (i = 0; i < PBM_NCLUSTERS; i++) {
|
|
||||||
iommu->alloc_info[i].flush = 0;
|
|
||||||
iommu->alloc_info[i].next = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pbm_register_toplevel_resources(struct pci_controller_info *p,
|
static void pbm_register_toplevel_resources(struct pci_controller_info *p,
|
||||||
|
|
|
@ -1765,7 +1765,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
|
||||||
static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
|
static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
|
||||||
{
|
{
|
||||||
struct pci_iommu *iommu = pbm->iommu;
|
struct pci_iommu *iommu = pbm->iommu;
|
||||||
unsigned long tsbbase, i, tagbase, database, order;
|
unsigned long i, tagbase, database;
|
||||||
u32 vdma[2], dma_mask;
|
u32 vdma[2], dma_mask;
|
||||||
u64 control;
|
u64 control;
|
||||||
int err, tsbsize;
|
int err, tsbsize;
|
||||||
|
@ -1800,10 +1800,6 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
|
||||||
prom_halt();
|
prom_halt();
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Setup initial software IOMMU state. */
|
|
||||||
spin_lock_init(&iommu->lock);
|
|
||||||
iommu->ctx_lowest_free = 1;
|
|
||||||
|
|
||||||
/* Register addresses, SCHIZO has iommu ctx flushing. */
|
/* Register addresses, SCHIZO has iommu ctx flushing. */
|
||||||
iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
|
iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
|
||||||
iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
|
iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
|
||||||
|
@ -1832,56 +1828,9 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
|
||||||
/* Leave diag mode enabled for full-flushing done
|
/* Leave diag mode enabled for full-flushing done
|
||||||
* in pci_iommu.c
|
* in pci_iommu.c
|
||||||
*/
|
*/
|
||||||
|
pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
|
||||||
|
|
||||||
iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
|
schizo_write(iommu->iommu_tsbbase, __pa(iommu->page_table));
|
||||||
if (!iommu->dummy_page) {
|
|
||||||
prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
|
|
||||||
prom_halt();
|
|
||||||
}
|
|
||||||
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
|
|
||||||
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
|
|
||||||
|
|
||||||
/* Using assumed page size 8K with 128K entries we need 1MB iommu page
|
|
||||||
* table (128K ioptes * 8 bytes per iopte). This is
|
|
||||||
* page order 7 on UltraSparc.
|
|
||||||
*/
|
|
||||||
order = get_order(tsbsize * 8 * 1024);
|
|
||||||
tsbbase = __get_free_pages(GFP_KERNEL, order);
|
|
||||||
if (!tsbbase) {
|
|
||||||
prom_printf("%s: Error, gfp(tsb) failed.\n", pbm->name);
|
|
||||||
prom_halt();
|
|
||||||
}
|
|
||||||
|
|
||||||
iommu->page_table = (iopte_t *)tsbbase;
|
|
||||||
iommu->page_table_map_base = vdma[0];
|
|
||||||
iommu->dma_addr_mask = dma_mask;
|
|
||||||
pci_iommu_table_init(iommu, PAGE_SIZE << order);
|
|
||||||
|
|
||||||
switch (tsbsize) {
|
|
||||||
case 64:
|
|
||||||
iommu->page_table_sz_bits = 16;
|
|
||||||
break;
|
|
||||||
|
|
||||||
case 128:
|
|
||||||
iommu->page_table_sz_bits = 17;
|
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
|
|
||||||
prom_halt();
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
|
|
||||||
/* We start with no consistent mappings. */
|
|
||||||
iommu->lowest_consistent_map =
|
|
||||||
1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
|
|
||||||
|
|
||||||
for (i = 0; i < PBM_NCLUSTERS; i++) {
|
|
||||||
iommu->alloc_info[i].flush = 0;
|
|
||||||
iommu->alloc_info[i].next = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
schizo_write(iommu->iommu_tsbbase, __pa(tsbbase));
|
|
||||||
|
|
||||||
control = schizo_read(iommu->iommu_control);
|
control = schizo_read(iommu->iommu_control);
|
||||||
control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
|
control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
|
||||||
|
|
|
@ -1001,13 +1001,6 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern unsigned long xcall_promstop;
|
|
||||||
|
|
||||||
void smp_promstop_others(void)
|
|
||||||
{
|
|
||||||
smp_cross_call(&xcall_promstop, 0, 0, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
|
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
|
||||||
#define prof_counter(__cpu) cpu_data(__cpu).counter
|
#define prof_counter(__cpu) cpu_data(__cpu).counter
|
||||||
|
|
||||||
|
|
|
@ -453,22 +453,6 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
|
||||||
nop
|
nop
|
||||||
nop
|
nop
|
||||||
|
|
||||||
.globl xcall_promstop
|
|
||||||
xcall_promstop:
|
|
||||||
rdpr %pstate, %g2
|
|
||||||
wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
|
|
||||||
rdpr %pil, %g2
|
|
||||||
wrpr %g0, 15, %pil
|
|
||||||
sethi %hi(109f), %g7
|
|
||||||
b,pt %xcc, etrap_irq
|
|
||||||
109: or %g7, %lo(109b), %g7
|
|
||||||
flushw
|
|
||||||
call prom_stopself
|
|
||||||
nop
|
|
||||||
/* We should not return, just spin if we do... */
|
|
||||||
1: b,a,pt %xcc, 1b
|
|
||||||
nop
|
|
||||||
|
|
||||||
.data
|
.data
|
||||||
|
|
||||||
errata32_hwbug:
|
errata32_hwbug:
|
||||||
|
|
|
@ -68,19 +68,11 @@ void prom_cmdline(void)
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
extern void smp_promstop_others(void);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Drop into the prom, but completely terminate the program.
|
/* Drop into the prom, but completely terminate the program.
|
||||||
* No chance of continuing.
|
* No chance of continuing.
|
||||||
*/
|
*/
|
||||||
void prom_halt(void)
|
void prom_halt(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
smp_promstop_others();
|
|
||||||
udelay(8000);
|
|
||||||
#endif
|
|
||||||
again:
|
again:
|
||||||
p1275_cmd("exit", P1275_INOUT(0, 0));
|
p1275_cmd("exit", P1275_INOUT(0, 0));
|
||||||
goto again; /* PROM is out to get me -DaveM */
|
goto again; /* PROM is out to get me -DaveM */
|
||||||
|
@ -88,10 +80,6 @@ void prom_halt(void)
|
||||||
|
|
||||||
void prom_halt_power_off(void)
|
void prom_halt_power_off(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
smp_promstop_others();
|
|
||||||
udelay(8000);
|
|
||||||
#endif
|
|
||||||
p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
|
p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
|
||||||
|
|
||||||
/* if nothing else helps, we just halt */
|
/* if nothing else helps, we just halt */
|
||||||
|
|
|
@ -58,9 +58,8 @@ acpi_system_read_event(struct file *file, char __user * buffer, size_t count,
|
||||||
return_VALUE(-EAGAIN);
|
return_VALUE(-EAGAIN);
|
||||||
|
|
||||||
result = acpi_bus_receive_event(&event);
|
result = acpi_bus_receive_event(&event);
|
||||||
if (result) {
|
if (result)
|
||||||
return_VALUE(-EIO);
|
return_VALUE(result);
|
||||||
}
|
|
||||||
|
|
||||||
chars_remaining = sprintf(str, "%s %s %08x %08x\n",
|
chars_remaining = sprintf(str, "%s %s %08x %08x\n",
|
||||||
event.device_class ? event.
|
event.device_class ? event.
|
||||||
|
|
|
@ -830,6 +830,9 @@ static int __init mbcs_init(void)
|
||||||
{
|
{
|
||||||
int rv;
|
int rv;
|
||||||
|
|
||||||
|
if (!ia64_platform_is("sn2"))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
// Put driver into chrdevs[]. Get major number.
|
// Put driver into chrdevs[]. Get major number.
|
||||||
rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops);
|
rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops);
|
||||||
if (rv < 0) {
|
if (rv < 0) {
|
||||||
|
|
|
@ -695,7 +695,7 @@ static void receive_char(struct r3964_info *pInfo, const unsigned char c)
|
||||||
{
|
{
|
||||||
TRACE_PE("IDLE - got STX but no space in rx_queue!");
|
TRACE_PE("IDLE - got STX but no space in rx_queue!");
|
||||||
pInfo->state=R3964_WAIT_FOR_RX_BUF;
|
pInfo->state=R3964_WAIT_FOR_RX_BUF;
|
||||||
mod_timer(&pInfo->tmr, R3964_TO_NO_BUF);
|
mod_timer(&pInfo->tmr, jiffies + R3964_TO_NO_BUF);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
start_receiving:
|
start_receiving:
|
||||||
|
@ -705,7 +705,7 @@ static void receive_char(struct r3964_info *pInfo, const unsigned char c)
|
||||||
pInfo->last_rx = 0;
|
pInfo->last_rx = 0;
|
||||||
pInfo->flags &= ~R3964_ERROR;
|
pInfo->flags &= ~R3964_ERROR;
|
||||||
pInfo->state=R3964_RECEIVING;
|
pInfo->state=R3964_RECEIVING;
|
||||||
mod_timer(&pInfo->tmr, R3964_TO_ZVZ);
|
mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ);
|
||||||
pInfo->nRetry = 0;
|
pInfo->nRetry = 0;
|
||||||
put_char(pInfo, DLE);
|
put_char(pInfo, DLE);
|
||||||
flush(pInfo);
|
flush(pInfo);
|
||||||
|
@ -732,7 +732,7 @@ static void receive_char(struct r3964_info *pInfo, const unsigned char c)
|
||||||
if(pInfo->flags & R3964_BCC)
|
if(pInfo->flags & R3964_BCC)
|
||||||
{
|
{
|
||||||
pInfo->state = R3964_WAIT_FOR_BCC;
|
pInfo->state = R3964_WAIT_FOR_BCC;
|
||||||
mod_timer(&pInfo->tmr, R3964_TO_ZVZ);
|
mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -744,7 +744,7 @@ static void receive_char(struct r3964_info *pInfo, const unsigned char c)
|
||||||
pInfo->last_rx = c;
|
pInfo->last_rx = c;
|
||||||
char_to_buf:
|
char_to_buf:
|
||||||
pInfo->rx_buf[pInfo->rx_position++] = c;
|
pInfo->rx_buf[pInfo->rx_position++] = c;
|
||||||
mod_timer(&pInfo->tmr, R3964_TO_ZVZ);
|
mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* else: overflow-msg? BUF_SIZE>MTU; should not happen? */
|
/* else: overflow-msg? BUF_SIZE>MTU; should not happen? */
|
||||||
|
|
|
@ -93,7 +93,7 @@ config KEYBOARD_LKKBD
|
||||||
|
|
||||||
config KEYBOARD_LOCOMO
|
config KEYBOARD_LOCOMO
|
||||||
tristate "LoCoMo Keyboard Support"
|
tristate "LoCoMo Keyboard Support"
|
||||||
depends on SHARP_LOCOMO
|
depends on SHARP_LOCOMO && INPUT_KEYBOARD
|
||||||
help
|
help
|
||||||
Say Y here if you are running Linux on a Sharp Zaurus Collie or Poodle based PDA
|
Say Y here if you are running Linux on a Sharp Zaurus Collie or Poodle based PDA
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ static unsigned char spitzkbd_keycode[NR_SCANCODES] = {
|
||||||
KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */
|
KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */
|
||||||
0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */
|
0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */
|
||||||
KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
|
KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */
|
||||||
SPITZ_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, /* 49-64 */
|
SPITZ_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */
|
||||||
SPITZ_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */
|
SPITZ_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */
|
||||||
SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */
|
SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */
|
||||||
KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */
|
KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */
|
||||||
|
|
|
@ -90,11 +90,11 @@ static inline int uinput_request_reserve_slot(struct uinput_device *udev, struct
|
||||||
|
|
||||||
static void uinput_request_done(struct uinput_device *udev, struct uinput_request *request)
|
static void uinput_request_done(struct uinput_device *udev, struct uinput_request *request)
|
||||||
{
|
{
|
||||||
complete(&request->done);
|
|
||||||
|
|
||||||
/* Mark slot as available */
|
/* Mark slot as available */
|
||||||
udev->requests[request->id] = NULL;
|
udev->requests[request->id] = NULL;
|
||||||
wake_up_interruptible(&udev->requests_waitq);
|
wake_up_interruptible(&udev->requests_waitq);
|
||||||
|
|
||||||
|
complete(&request->done);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int uinput_request_submit(struct input_dev *dev, struct uinput_request *request)
|
static int uinput_request_submit(struct input_dev *dev, struct uinput_request *request)
|
||||||
|
|
|
@ -543,7 +543,7 @@ static int cadet_probe(void)
|
||||||
|
|
||||||
for(i=0;i<8;i++) {
|
for(i=0;i<8;i++) {
|
||||||
io=iovals[i];
|
io=iovals[i];
|
||||||
if(request_region(io,2, "cadet-probe")>=0) {
|
if (request_region(io, 2, "cadet-probe")) {
|
||||||
cadet_setfreq(1410);
|
cadet_setfreq(1410);
|
||||||
if(cadet_getfreq()==1410) {
|
if(cadet_getfreq()==1410) {
|
||||||
release_region(io, 2);
|
release_region(io, 2);
|
||||||
|
|
|
@ -203,7 +203,7 @@ static const unsigned short init_ntsc[] = {
|
||||||
0x8c, 640, /* Horizontal length */
|
0x8c, 640, /* Horizontal length */
|
||||||
0x8d, 640, /* Number of pixels */
|
0x8d, 640, /* Number of pixels */
|
||||||
0x8f, 0xc00, /* Disable window 2 */
|
0x8f, 0xc00, /* Disable window 2 */
|
||||||
0xf0, 0x173, /* 13.5 MHz transport, Forced
|
0xf0, 0x73, /* 13.5 MHz transport, Forced
|
||||||
* mode, latch windows */
|
* mode, latch windows */
|
||||||
0xf2, 0x13, /* NTSC M, composite input */
|
0xf2, 0x13, /* NTSC M, composite input */
|
||||||
0xe7, 0x1e1, /* Enable vertical standard
|
0xe7, 0x1e1, /* Enable vertical standard
|
||||||
|
@ -212,38 +212,36 @@ static const unsigned short init_ntsc[] = {
|
||||||
|
|
||||||
static const unsigned short init_pal[] = {
|
static const unsigned short init_pal[] = {
|
||||||
0x88, 23, /* Window 1 vertical begin */
|
0x88, 23, /* Window 1 vertical begin */
|
||||||
0x89, 288 + 16, /* Vertical lines in (16 lines
|
0x89, 288, /* Vertical lines in (16 lines
|
||||||
* skipped by the VFE) */
|
* skipped by the VFE) */
|
||||||
0x8a, 288 + 16, /* Vertical lines out (16 lines
|
0x8a, 288, /* Vertical lines out (16 lines
|
||||||
* skipped by the VFE) */
|
* skipped by the VFE) */
|
||||||
0x8b, 16, /* Horizontal begin */
|
0x8b, 16, /* Horizontal begin */
|
||||||
0x8c, 768, /* Horizontal length */
|
0x8c, 768, /* Horizontal length */
|
||||||
0x8d, 784, /* Number of pixels
|
0x8d, 784, /* Number of pixels
|
||||||
* Must be >= Horizontal begin + Horizontal length */
|
* Must be >= Horizontal begin + Horizontal length */
|
||||||
0x8f, 0xc00, /* Disable window 2 */
|
0x8f, 0xc00, /* Disable window 2 */
|
||||||
0xf0, 0x177, /* 13.5 MHz transport, Forced
|
0xf0, 0x77, /* 13.5 MHz transport, Forced
|
||||||
* mode, latch windows */
|
* mode, latch windows */
|
||||||
0xf2, 0x3d1, /* PAL B,G,H,I, composite input */
|
0xf2, 0x3d1, /* PAL B,G,H,I, composite input */
|
||||||
0xe7, 0x261, /* PAL/SECAM set to 288 + 16 lines
|
0xe7, 0x241, /* PAL/SECAM set to 288 lines */
|
||||||
* change to 0x241 for 288 lines */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const unsigned short init_secam[] = {
|
static const unsigned short init_secam[] = {
|
||||||
0x88, 23 - 16, /* Window 1 vertical begin */
|
0x88, 23, /* Window 1 vertical begin */
|
||||||
0x89, 288 + 16, /* Vertical lines in (16 lines
|
0x89, 288, /* Vertical lines in (16 lines
|
||||||
* skipped by the VFE) */
|
* skipped by the VFE) */
|
||||||
0x8a, 288 + 16, /* Vertical lines out (16 lines
|
0x8a, 288, /* Vertical lines out (16 lines
|
||||||
* skipped by the VFE) */
|
* skipped by the VFE) */
|
||||||
0x8b, 16, /* Horizontal begin */
|
0x8b, 16, /* Horizontal begin */
|
||||||
0x8c, 768, /* Horizontal length */
|
0x8c, 768, /* Horizontal length */
|
||||||
0x8d, 784, /* Number of pixels
|
0x8d, 784, /* Number of pixels
|
||||||
* Must be >= Horizontal begin + Horizontal length */
|
* Must be >= Horizontal begin + Horizontal length */
|
||||||
0x8f, 0xc00, /* Disable window 2 */
|
0x8f, 0xc00, /* Disable window 2 */
|
||||||
0xf0, 0x177, /* 13.5 MHz transport, Forced
|
0xf0, 0x77, /* 13.5 MHz transport, Forced
|
||||||
* mode, latch windows */
|
* mode, latch windows */
|
||||||
0xf2, 0x3d5, /* SECAM, composite input */
|
0xf2, 0x3d5, /* SECAM, composite input */
|
||||||
0xe7, 0x261, /* PAL/SECAM set to 288 + 16 lines
|
0xe7, 0x241, /* PAL/SECAM set to 288 lines */
|
||||||
* change to 0x241 for 288 lines */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const unsigned char init_common[] = {
|
static const unsigned char init_common[] = {
|
||||||
|
@ -410,6 +408,12 @@ vpx3220_command (struct i2c_client *client,
|
||||||
case DECODER_SET_NORM:
|
case DECODER_SET_NORM:
|
||||||
{
|
{
|
||||||
int *iarg = arg, data;
|
int *iarg = arg, data;
|
||||||
|
int temp_input;
|
||||||
|
|
||||||
|
/* Here we back up the input selection because it gets
|
||||||
|
overwritten when we fill the registers with the
|
||||||
|
choosen video norm */
|
||||||
|
temp_input = vpx3220_fp_read(client, 0xf2);
|
||||||
|
|
||||||
dprintk(1, KERN_DEBUG "%s: DECODER_SET_NORM %d\n",
|
dprintk(1, KERN_DEBUG "%s: DECODER_SET_NORM %d\n",
|
||||||
I2C_NAME(client), *iarg);
|
I2C_NAME(client), *iarg);
|
||||||
|
@ -449,6 +453,10 @@ vpx3220_command (struct i2c_client *client,
|
||||||
|
|
||||||
}
|
}
|
||||||
decoder->norm = *iarg;
|
decoder->norm = *iarg;
|
||||||
|
|
||||||
|
/* And here we set the backed up video input again */
|
||||||
|
vpx3220_fp_write(client, 0xf2, temp_input | 0x0010);
|
||||||
|
udelay(10);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -243,7 +243,7 @@ config IPW_DEBUG
|
||||||
|
|
||||||
config AIRO
|
config AIRO
|
||||||
tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
|
tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
|
||||||
depends on NET_RADIO && ISA && (PCI || BROKEN)
|
depends on NET_RADIO && ISA_DMA_API && (PCI || BROKEN)
|
||||||
---help---
|
---help---
|
||||||
This is the standard Linux driver to support Cisco/Aironet ISA and
|
This is the standard Linux driver to support Cisco/Aironet ISA and
|
||||||
PCI 802.11 wireless cards.
|
PCI 802.11 wireless cards.
|
||||||
|
|
|
@ -1233,7 +1233,7 @@ static void __init quirk_alder_ioapic(struct pci_dev *pdev)
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic );
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic );
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SCSI_SATA
|
#ifdef CONFIG_SCSI_SATA_INTEL_COMBINED
|
||||||
static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
|
static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
u8 prog, comb, tmp;
|
u8 prog, comb, tmp;
|
||||||
|
@ -1310,7 +1310,7 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
|
||||||
request_region(0x170, 8, "libata"); /* port 1 */
|
request_region(0x170, 8, "libata"); /* port 1 */
|
||||||
}
|
}
|
||||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_intel_ide_combined );
|
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_intel_ide_combined );
|
||||||
#endif /* CONFIG_SCSI_SATA */
|
#endif /* CONFIG_SCSI_SATA_INTEL_COMBINED */
|
||||||
|
|
||||||
|
|
||||||
int pcie_mch_quirk;
|
int pcie_mch_quirk;
|
||||||
|
|
|
@ -66,7 +66,7 @@ void soc_pcmcia_debug(struct soc_pcmcia_socket *skt, const char *func,
|
||||||
if (pc_debug > lvl) {
|
if (pc_debug > lvl) {
|
||||||
printk(KERN_DEBUG "skt%u: %s: ", skt->nr, func);
|
printk(KERN_DEBUG "skt%u: %s: ", skt->nr, func);
|
||||||
va_start(args, fmt);
|
va_start(args, fmt);
|
||||||
printk(fmt, args);
|
vprintk(fmt, args);
|
||||||
va_end(args);
|
va_end(args);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -321,8 +321,6 @@ soc_common_pcmcia_get_socket(struct pcmcia_socket *sock, socket_state_t *state)
|
||||||
* less punt all of this work and let the kernel handle the details
|
* less punt all of this work and let the kernel handle the details
|
||||||
* of power configuration, reset, &c. We also record the value of
|
* of power configuration, reset, &c. We also record the value of
|
||||||
* `state' in order to regurgitate it to the PCMCIA core later.
|
* `state' in order to regurgitate it to the PCMCIA core later.
|
||||||
*
|
|
||||||
* Returns: 0
|
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
soc_common_pcmcia_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
|
soc_common_pcmcia_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
|
||||||
|
@ -407,7 +405,7 @@ soc_common_pcmcia_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *m
|
||||||
* the map speed as requested, but override the address ranges
|
* the map speed as requested, but override the address ranges
|
||||||
* supplied by Card Services.
|
* supplied by Card Services.
|
||||||
*
|
*
|
||||||
* Returns: 0 on success, -1 on error
|
* Returns: 0 on success, -ERRNO on error
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
soc_common_pcmcia_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *map)
|
soc_common_pcmcia_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *map)
|
||||||
|
@ -655,8 +653,8 @@ static void soc_pcmcia_cpufreq_unregister(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#define soc_pcmcia_cpufreq_register()
|
static int soc_pcmcia_cpufreq_register(void) { return 0; }
|
||||||
#define soc_pcmcia_cpufreq_unregister()
|
static void soc_pcmcia_cpufreq_unregister(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr)
|
int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr)
|
||||||
|
@ -738,7 +736,7 @@ int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops
|
||||||
goto out_err_5;
|
goto out_err_5;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( list_empty(&soc_pcmcia_sockets) )
|
if (list_empty(&soc_pcmcia_sockets))
|
||||||
soc_pcmcia_cpufreq_register();
|
soc_pcmcia_cpufreq_register();
|
||||||
|
|
||||||
list_add(&skt->node, &soc_pcmcia_sockets);
|
list_add(&skt->node, &soc_pcmcia_sockets);
|
||||||
|
@ -839,7 +837,7 @@ int soc_common_drv_pcmcia_remove(struct device *dev)
|
||||||
release_resource(&skt->res_io);
|
release_resource(&skt->res_io);
|
||||||
release_resource(&skt->res_skt);
|
release_resource(&skt->res_skt);
|
||||||
}
|
}
|
||||||
if ( list_empty(&soc_pcmcia_sockets) )
|
if (list_empty(&soc_pcmcia_sockets))
|
||||||
soc_pcmcia_cpufreq_unregister();
|
soc_pcmcia_cpufreq_unregister();
|
||||||
|
|
||||||
up(&soc_pcmcia_sockets_lock);
|
up(&soc_pcmcia_sockets_lock);
|
||||||
|
|
|
@ -569,6 +569,11 @@ config SCSI_SATA_VITESSE
|
||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
|
config SCSI_SATA_INTEL_COMBINED
|
||||||
|
bool
|
||||||
|
depends on IDE=y && !BLK_DEV_IDE_SATA && (SCSI_SATA_AHCI || SCSI_ATA_PIIX)
|
||||||
|
default y
|
||||||
|
|
||||||
config SCSI_BUSLOGIC
|
config SCSI_BUSLOGIC
|
||||||
tristate "BusLogic SCSI support"
|
tristate "BusLogic SCSI support"
|
||||||
depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API
|
depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API
|
||||||
|
|
|
@ -453,9 +453,9 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
|
||||||
/*
|
/*
|
||||||
* We can exit If all the commands are complete
|
* We can exit If all the commands are complete
|
||||||
*/
|
*/
|
||||||
|
spin_unlock_irq(host->host_lock);
|
||||||
if (active == 0)
|
if (active == 0)
|
||||||
return SUCCESS;
|
return SUCCESS;
|
||||||
spin_unlock_irq(host->host_lock);
|
|
||||||
ssleep(1);
|
ssleep(1);
|
||||||
spin_lock_irq(host->host_lock);
|
spin_lock_irq(host->host_lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1119,6 +1119,36 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int
|
||||||
host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
|
host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned int scsi_rbuf_get(struct scsi_cmnd *cmd, unsigned char **buf_out)
|
||||||
|
{
|
||||||
|
unsigned char *buf;
|
||||||
|
unsigned int buflen;
|
||||||
|
|
||||||
|
if (cmd->use_sg) {
|
||||||
|
struct scatterlist *sg;
|
||||||
|
|
||||||
|
sg = (struct scatterlist *) cmd->request_buffer;
|
||||||
|
buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
|
||||||
|
buflen = sg->length;
|
||||||
|
} else {
|
||||||
|
buf = cmd->request_buffer;
|
||||||
|
buflen = cmd->request_bufflen;
|
||||||
|
}
|
||||||
|
|
||||||
|
*buf_out = buf;
|
||||||
|
return buflen;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void scsi_rbuf_put(struct scsi_cmnd *cmd, unsigned char *buf)
|
||||||
|
{
|
||||||
|
if (cmd->use_sg) {
|
||||||
|
struct scatterlist *sg;
|
||||||
|
|
||||||
|
sg = (struct scatterlist *) cmd->request_buffer;
|
||||||
|
kunmap_atomic(buf - sg->offset, KM_IRQ0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Until we scan the entire bus with inquiries, go throught this fella...
|
* Until we scan the entire bus with inquiries, go throught this fella...
|
||||||
*/
|
*/
|
||||||
|
@ -1145,11 +1175,9 @@ static void ourdone(struct scsi_cmnd *Cmnd)
|
||||||
int ok = host_byte(Cmnd->result) == DID_OK;
|
int ok = host_byte(Cmnd->result) == DID_OK;
|
||||||
if (Cmnd->cmnd[0] == 0x12 && ok) {
|
if (Cmnd->cmnd[0] == 0x12 && ok) {
|
||||||
unsigned char *iqd;
|
unsigned char *iqd;
|
||||||
|
unsigned int iqd_len;
|
||||||
|
|
||||||
if (Cmnd->use_sg != 0)
|
iqd_len = scsi_rbuf_get(Cmnd, &iqd);
|
||||||
BUG();
|
|
||||||
|
|
||||||
iqd = ((unsigned char *)Cmnd->buffer);
|
|
||||||
|
|
||||||
/* tags handled in midlayer */
|
/* tags handled in midlayer */
|
||||||
/* enable sync mode? */
|
/* enable sync mode? */
|
||||||
|
@ -1163,6 +1191,9 @@ static void ourdone(struct scsi_cmnd *Cmnd)
|
||||||
if (iqd[7] & 0x20) {
|
if (iqd[7] & 0x20) {
|
||||||
qpti->dev_param[tgt].device_flags |= 0x20;
|
qpti->dev_param[tgt].device_flags |= 0x20;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
scsi_rbuf_put(Cmnd, iqd);
|
||||||
|
|
||||||
qpti->sbits |= (1 << tgt);
|
qpti->sbits |= (1 << tgt);
|
||||||
} else if (!ok) {
|
} else if (!ok) {
|
||||||
qpti->sbits |= (1 << tgt);
|
qpti->sbits |= (1 << tgt);
|
||||||
|
|
|
@ -272,6 +272,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
|
||||||
{ "SUP1421", 0 },
|
{ "SUP1421", 0 },
|
||||||
/* SupraExpress 33.6 Data/Fax PnP modem */
|
/* SupraExpress 33.6 Data/Fax PnP modem */
|
||||||
{ "SUP1590", 0 },
|
{ "SUP1590", 0 },
|
||||||
|
/* SupraExpress 336i Sp ASVD */
|
||||||
|
{ "SUP1620", 0 },
|
||||||
/* SupraExpress 33.6 Data/Fax PnP modem */
|
/* SupraExpress 33.6 Data/Fax PnP modem */
|
||||||
{ "SUP1760", 0 },
|
{ "SUP1760", 0 },
|
||||||
/* Phoebe Micro */
|
/* Phoebe Micro */
|
||||||
|
|
|
@ -967,7 +967,7 @@ static int sci_startup(struct uart_port *port)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
sci_request_irq(s);
|
sci_request_irq(s);
|
||||||
sci_start_tx(port, 1);
|
sci_start_tx(port);
|
||||||
sci_start_rx(port, 1);
|
sci_start_rx(port, 1);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -326,7 +326,8 @@ static void postproc_atl_queue(struct isp116x *isp116x)
|
||||||
usb_settoggle(udev, ep->epnum,
|
usb_settoggle(udev, ep->epnum,
|
||||||
ep->nextpid ==
|
ep->nextpid ==
|
||||||
USB_PID_OUT,
|
USB_PID_OUT,
|
||||||
PTD_GET_TOGGLE(ptd) ^ 1);
|
PTD_GET_TOGGLE(ptd));
|
||||||
|
urb->actual_length += PTD_GET_COUNT(ptd);
|
||||||
urb->status = cc_to_error[TD_DATAUNDERRUN];
|
urb->status = cc_to_error[TD_DATAUNDERRUN];
|
||||||
spin_unlock(&urb->lock);
|
spin_unlock(&urb->lock);
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -1702,10 +1702,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
|
||||||
if ((endpoint->bmAttributes & 3) != 3) /* Not an interrupt endpoint */
|
if ((endpoint->bmAttributes & 3) != 3) /* Not an interrupt endpoint */
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* handle potential highspeed HID correctly */
|
|
||||||
interval = endpoint->bInterval;
|
interval = endpoint->bInterval;
|
||||||
if (dev->speed == USB_SPEED_HIGH)
|
|
||||||
interval = 1 << (interval - 1);
|
|
||||||
|
|
||||||
/* Change the polling interval of mice. */
|
/* Change the polling interval of mice. */
|
||||||
if (hid->collection->usage == HID_GD_MOUSE && hid_mousepoll_interval > 0)
|
if (hid->collection->usage == HID_GD_MOUSE && hid_mousepoll_interval > 0)
|
||||||
|
|
|
@ -223,7 +223,7 @@ int usb_serial_generic_write_room (struct usb_serial_port *port)
|
||||||
dbg("%s - port %d", __FUNCTION__, port->number);
|
dbg("%s - port %d", __FUNCTION__, port->number);
|
||||||
|
|
||||||
if (serial->num_bulk_out) {
|
if (serial->num_bulk_out) {
|
||||||
if (port->write_urb_busy)
|
if (!(port->write_urb_busy))
|
||||||
room = port->bulk_out_size;
|
room = port->bulk_out_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -565,6 +565,10 @@ static int vgacon_switch(struct vc_data *c)
|
||||||
scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf,
|
scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf,
|
||||||
c->vc_screenbuf_size > vga_vram_size ?
|
c->vc_screenbuf_size > vga_vram_size ?
|
||||||
vga_vram_size : c->vc_screenbuf_size);
|
vga_vram_size : c->vc_screenbuf_size);
|
||||||
|
if (!(vga_video_num_columns % 2) &&
|
||||||
|
vga_video_num_columns <= ORIG_VIDEO_COLS &&
|
||||||
|
vga_video_num_lines <= (ORIG_VIDEO_LINES *
|
||||||
|
vga_default_font_height) / c->vc_font.height)
|
||||||
vgacon_doresize(c, c->vc_cols, c->vc_rows);
|
vgacon_doresize(c, c->vc_cols, c->vc_rows);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1023,7 +1027,8 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
|
||||||
if (width % 2 || width > ORIG_VIDEO_COLS ||
|
if (width % 2 || width > ORIG_VIDEO_COLS ||
|
||||||
height > (ORIG_VIDEO_LINES * vga_default_font_height)/
|
height > (ORIG_VIDEO_LINES * vga_default_font_height)/
|
||||||
c->vc_font.height)
|
c->vc_font.height)
|
||||||
return -EINVAL;
|
/* let svgatextmode tinker with video timings */
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */
|
if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */
|
||||||
vgacon_doresize(c, width, height);
|
vgacon_doresize(c, width, height);
|
||||||
|
|
|
@ -592,6 +592,7 @@ sa1100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_FREQ
|
||||||
/*
|
/*
|
||||||
* sa1100fb_display_dma_period()
|
* sa1100fb_display_dma_period()
|
||||||
* Calculate the minimum period (in picoseconds) between two DMA
|
* Calculate the minimum period (in picoseconds) between two DMA
|
||||||
|
@ -606,6 +607,7 @@ static inline unsigned int sa1100fb_display_dma_period(struct fb_var_screeninfo
|
||||||
*/
|
*/
|
||||||
return var->pixclock * 8 * 16 / var->bits_per_pixel;
|
return var->pixclock * 8 * 16 / var->bits_per_pixel;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* sa1100fb_check_var():
|
* sa1100fb_check_var():
|
||||||
|
|
|
@ -77,8 +77,7 @@ static void w1_master_release(struct device *dev)
|
||||||
|
|
||||||
dev_dbg(dev, "%s: Releasing %s.\n", __func__, md->name);
|
dev_dbg(dev, "%s: Releasing %s.\n", __func__, md->name);
|
||||||
|
|
||||||
if (md->nls && md->nls->sk_socket)
|
dev_fini_netlink(md);
|
||||||
sock_release(md->nls->sk_socket);
|
|
||||||
memset(md, 0, sizeof(struct w1_master) + sizeof(struct w1_bus_master));
|
memset(md, 0, sizeof(struct w1_master) + sizeof(struct w1_bus_master));
|
||||||
kfree(md);
|
kfree(md);
|
||||||
}
|
}
|
||||||
|
|
26
fs/aio.c
26
fs/aio.c
|
@ -398,7 +398,7 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx)
|
||||||
if (unlikely(!req))
|
if (unlikely(!req))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
req->ki_flags = 1 << KIF_LOCKED;
|
req->ki_flags = 0;
|
||||||
req->ki_users = 2;
|
req->ki_users = 2;
|
||||||
req->ki_key = 0;
|
req->ki_key = 0;
|
||||||
req->ki_ctx = ctx;
|
req->ki_ctx = ctx;
|
||||||
|
@ -547,25 +547,6 @@ struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
||||||
return ioctx;
|
return ioctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int lock_kiocb_action(void *param)
|
|
||||||
{
|
|
||||||
schedule();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void lock_kiocb(struct kiocb *iocb)
|
|
||||||
{
|
|
||||||
wait_on_bit_lock(&iocb->ki_flags, KIF_LOCKED, lock_kiocb_action,
|
|
||||||
TASK_UNINTERRUPTIBLE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void unlock_kiocb(struct kiocb *iocb)
|
|
||||||
{
|
|
||||||
kiocbClearLocked(iocb);
|
|
||||||
smp_mb__after_clear_bit();
|
|
||||||
wake_up_bit(&iocb->ki_flags, KIF_LOCKED);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* use_mm
|
* use_mm
|
||||||
* Makes the calling kernel thread take on the specified
|
* Makes the calling kernel thread take on the specified
|
||||||
|
@ -796,9 +777,7 @@ static int __aio_run_iocbs(struct kioctx *ctx)
|
||||||
* Hold an extra reference while retrying i/o.
|
* Hold an extra reference while retrying i/o.
|
||||||
*/
|
*/
|
||||||
iocb->ki_users++; /* grab extra reference */
|
iocb->ki_users++; /* grab extra reference */
|
||||||
lock_kiocb(iocb);
|
|
||||||
aio_run_iocb(iocb);
|
aio_run_iocb(iocb);
|
||||||
unlock_kiocb(iocb);
|
|
||||||
if (__aio_put_req(ctx, iocb)) /* drop extra ref */
|
if (__aio_put_req(ctx, iocb)) /* drop extra ref */
|
||||||
put_ioctx(ctx);
|
put_ioctx(ctx);
|
||||||
}
|
}
|
||||||
|
@ -1542,7 +1521,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
||||||
|
|
||||||
spin_lock_irq(&ctx->ctx_lock);
|
spin_lock_irq(&ctx->ctx_lock);
|
||||||
aio_run_iocb(req);
|
aio_run_iocb(req);
|
||||||
unlock_kiocb(req);
|
|
||||||
if (!list_empty(&ctx->run_list)) {
|
if (!list_empty(&ctx->run_list)) {
|
||||||
/* drain the run list */
|
/* drain the run list */
|
||||||
while (__aio_run_iocbs(ctx))
|
while (__aio_run_iocbs(ctx))
|
||||||
|
@ -1674,7 +1652,6 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
|
||||||
if (NULL != cancel) {
|
if (NULL != cancel) {
|
||||||
struct io_event tmp;
|
struct io_event tmp;
|
||||||
pr_debug("calling cancel\n");
|
pr_debug("calling cancel\n");
|
||||||
lock_kiocb(kiocb);
|
|
||||||
memset(&tmp, 0, sizeof(tmp));
|
memset(&tmp, 0, sizeof(tmp));
|
||||||
tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
|
tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
|
||||||
tmp.data = kiocb->ki_user_data;
|
tmp.data = kiocb->ki_user_data;
|
||||||
|
@ -1686,7 +1663,6 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
|
||||||
if (copy_to_user(result, &tmp, sizeof(tmp)))
|
if (copy_to_user(result, &tmp, sizeof(tmp)))
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
}
|
}
|
||||||
unlock_kiocb(kiocb);
|
|
||||||
} else
|
} else
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
|
||||||
|
|
|
@ -85,6 +85,10 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
|
||||||
struct nfs_delegation *delegation;
|
struct nfs_delegation *delegation;
|
||||||
int status = 0;
|
int status = 0;
|
||||||
|
|
||||||
|
/* Ensure we first revalidate the attributes and page cache! */
|
||||||
|
if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
|
||||||
|
__nfs_revalidate_inode(NFS_SERVER(inode), inode);
|
||||||
|
|
||||||
delegation = nfs_alloc_delegation();
|
delegation = nfs_alloc_delegation();
|
||||||
if (delegation == NULL)
|
if (delegation == NULL)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -137,7 +137,8 @@ static int nfs_revalidate_file(struct inode *inode, struct file *filp)
|
||||||
struct nfs_inode *nfsi = NFS_I(inode);
|
struct nfs_inode *nfsi = NFS_I(inode);
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
|
|
||||||
if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) || nfs_attribute_timeout(inode))
|
if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR))
|
||||||
|
|| nfs_attribute_timeout(inode))
|
||||||
retval = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
|
retval = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
|
||||||
nfs_revalidate_mapping(inode, filp->f_mapping);
|
nfs_revalidate_mapping(inode, filp->f_mapping);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -877,12 +877,10 @@ static int nfs_wait_on_inode(struct inode *inode)
|
||||||
sigset_t oldmask;
|
sigset_t oldmask;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
atomic_inc(&inode->i_count);
|
|
||||||
rpc_clnt_sigmask(clnt, &oldmask);
|
rpc_clnt_sigmask(clnt, &oldmask);
|
||||||
error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
|
error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
|
||||||
nfs_wait_schedule, TASK_INTERRUPTIBLE);
|
nfs_wait_schedule, TASK_INTERRUPTIBLE);
|
||||||
rpc_clnt_sigunmask(clnt, &oldmask);
|
rpc_clnt_sigunmask(clnt, &oldmask);
|
||||||
iput(inode);
|
|
||||||
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
@ -1226,10 +1224,6 @@ int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
|
||||||
loff_t cur_size, new_isize;
|
loff_t cur_size, new_isize;
|
||||||
int data_unstable;
|
int data_unstable;
|
||||||
|
|
||||||
/* Do we hold a delegation? */
|
|
||||||
if (nfs_have_delegation(inode, FMODE_READ))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
spin_lock(&inode->i_lock);
|
spin_lock(&inode->i_lock);
|
||||||
|
|
||||||
/* Are we in the process of updating data on the server? */
|
/* Are we in the process of updating data on the server? */
|
||||||
|
@ -1350,7 +1344,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign
|
||||||
nfsi->read_cache_jiffies = fattr->timestamp;
|
nfsi->read_cache_jiffies = fattr->timestamp;
|
||||||
|
|
||||||
/* Are we racing with known updates of the metadata on the server? */
|
/* Are we racing with known updates of the metadata on the server? */
|
||||||
data_unstable = ! nfs_verify_change_attribute(inode, verifier);
|
data_unstable = ! (nfs_verify_change_attribute(inode, verifier) ||
|
||||||
|
(nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE));
|
||||||
|
|
||||||
/* Check if our cached file size is stale */
|
/* Check if our cached file size is stale */
|
||||||
new_isize = nfs_size_to_loff_t(fattr->size);
|
new_isize = nfs_size_to_loff_t(fattr->size);
|
||||||
|
|
|
@ -103,7 +103,9 @@ enum pid_directory_inos {
|
||||||
PROC_TGID_NUMA_MAPS,
|
PROC_TGID_NUMA_MAPS,
|
||||||
PROC_TGID_MOUNTS,
|
PROC_TGID_MOUNTS,
|
||||||
PROC_TGID_WCHAN,
|
PROC_TGID_WCHAN,
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
PROC_TGID_SMAPS,
|
PROC_TGID_SMAPS,
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_SCHEDSTATS
|
#ifdef CONFIG_SCHEDSTATS
|
||||||
PROC_TGID_SCHEDSTAT,
|
PROC_TGID_SCHEDSTAT,
|
||||||
#endif
|
#endif
|
||||||
|
@ -141,7 +143,9 @@ enum pid_directory_inos {
|
||||||
PROC_TID_NUMA_MAPS,
|
PROC_TID_NUMA_MAPS,
|
||||||
PROC_TID_MOUNTS,
|
PROC_TID_MOUNTS,
|
||||||
PROC_TID_WCHAN,
|
PROC_TID_WCHAN,
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
PROC_TID_SMAPS,
|
PROC_TID_SMAPS,
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_SCHEDSTATS
|
#ifdef CONFIG_SCHEDSTATS
|
||||||
PROC_TID_SCHEDSTAT,
|
PROC_TID_SCHEDSTAT,
|
||||||
#endif
|
#endif
|
||||||
|
@ -195,7 +199,9 @@ static struct pid_entry tgid_base_stuff[] = {
|
||||||
E(PROC_TGID_ROOT, "root", S_IFLNK|S_IRWXUGO),
|
E(PROC_TGID_ROOT, "root", S_IFLNK|S_IRWXUGO),
|
||||||
E(PROC_TGID_EXE, "exe", S_IFLNK|S_IRWXUGO),
|
E(PROC_TGID_EXE, "exe", S_IFLNK|S_IRWXUGO),
|
||||||
E(PROC_TGID_MOUNTS, "mounts", S_IFREG|S_IRUGO),
|
E(PROC_TGID_MOUNTS, "mounts", S_IFREG|S_IRUGO),
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
E(PROC_TGID_SMAPS, "smaps", S_IFREG|S_IRUGO),
|
E(PROC_TGID_SMAPS, "smaps", S_IFREG|S_IRUGO),
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_SECURITY
|
#ifdef CONFIG_SECURITY
|
||||||
E(PROC_TGID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO),
|
E(PROC_TGID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO),
|
||||||
#endif
|
#endif
|
||||||
|
@ -235,7 +241,9 @@ static struct pid_entry tid_base_stuff[] = {
|
||||||
E(PROC_TID_ROOT, "root", S_IFLNK|S_IRWXUGO),
|
E(PROC_TID_ROOT, "root", S_IFLNK|S_IRWXUGO),
|
||||||
E(PROC_TID_EXE, "exe", S_IFLNK|S_IRWXUGO),
|
E(PROC_TID_EXE, "exe", S_IFLNK|S_IRWXUGO),
|
||||||
E(PROC_TID_MOUNTS, "mounts", S_IFREG|S_IRUGO),
|
E(PROC_TID_MOUNTS, "mounts", S_IFREG|S_IRUGO),
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
E(PROC_TID_SMAPS, "smaps", S_IFREG|S_IRUGO),
|
E(PROC_TID_SMAPS, "smaps", S_IFREG|S_IRUGO),
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_SECURITY
|
#ifdef CONFIG_SECURITY
|
||||||
E(PROC_TID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO),
|
E(PROC_TID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO),
|
||||||
#endif
|
#endif
|
||||||
|
@ -630,6 +638,7 @@ static struct file_operations proc_numa_maps_operations = {
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
extern struct seq_operations proc_pid_smaps_op;
|
extern struct seq_operations proc_pid_smaps_op;
|
||||||
static int smaps_open(struct inode *inode, struct file *file)
|
static int smaps_open(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
|
@ -648,6 +657,7 @@ static struct file_operations proc_smaps_operations = {
|
||||||
.llseek = seq_lseek,
|
.llseek = seq_lseek,
|
||||||
.release = seq_release,
|
.release = seq_release,
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
extern struct seq_operations mounts_op;
|
extern struct seq_operations mounts_op;
|
||||||
static int mounts_open(struct inode *inode, struct file *file)
|
static int mounts_open(struct inode *inode, struct file *file)
|
||||||
|
@ -1681,10 +1691,12 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
|
||||||
case PROC_TGID_MOUNTS:
|
case PROC_TGID_MOUNTS:
|
||||||
inode->i_fop = &proc_mounts_operations;
|
inode->i_fop = &proc_mounts_operations;
|
||||||
break;
|
break;
|
||||||
|
#ifdef CONFIG_MMU
|
||||||
case PROC_TID_SMAPS:
|
case PROC_TID_SMAPS:
|
||||||
case PROC_TGID_SMAPS:
|
case PROC_TGID_SMAPS:
|
||||||
inode->i_fop = &proc_smaps_operations;
|
inode->i_fop = &proc_smaps_operations;
|
||||||
break;
|
break;
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_SECURITY
|
#ifdef CONFIG_SECURITY
|
||||||
case PROC_TID_ATTR:
|
case PROC_TID_ATTR:
|
||||||
inode->i_nlink = 2;
|
inode->i_nlink = 2;
|
||||||
|
|
|
@ -91,6 +91,7 @@ static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos)
|
||||||
next = _rb;
|
next = _rb;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
pos--;
|
||||||
}
|
}
|
||||||
|
|
||||||
return next;
|
return next;
|
||||||
|
|
|
@ -66,4 +66,5 @@ struct pxafb_mach_info {
|
||||||
|
|
||||||
};
|
};
|
||||||
void set_pxa_fb_info(struct pxafb_mach_info *hard_pxa_fb_info);
|
void set_pxa_fb_info(struct pxafb_mach_info *hard_pxa_fb_info);
|
||||||
|
void set_pxa_fb_parent(struct device *parent_dev);
|
||||||
unsigned long pxafb_get_hsync_time(struct device *dev);
|
unsigned long pxafb_get_hsync_time(struct device *dev);
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
* 06-Dec-1997 RMK Created.
|
* 06-Dec-1997 RMK Created.
|
||||||
* 02-Sep-2003 BJD Modified for S3C2410
|
* 02-Sep-2003 BJD Modified for S3C2410
|
||||||
* 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
|
* 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
|
||||||
*
|
* 13-Oct-2005 BJD Fixed problems with LDRH/STRH offset range
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef __ASM_ARM_ARCH_IO_H
|
#ifndef __ASM_ARM_ARCH_IO_H
|
||||||
|
@ -117,11 +117,23 @@ DECLARE_IO(int,l,"")
|
||||||
#define __outwc(value,port) \
|
#define __outwc(value,port) \
|
||||||
({ \
|
({ \
|
||||||
unsigned long v = value; \
|
unsigned long v = value; \
|
||||||
if (__PORT_PCIO((port))) \
|
if (__PORT_PCIO((port))) { \
|
||||||
|
if ((port) < 256 && (port) > -256) \
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
"strh %0, [%1, %2] @ outwc" \
|
"strh %0, [%1, %2] @ outwc" \
|
||||||
: : "r" (v), "r" (PCIO_BASE), "Jr" ((port))); \
|
: : "r" (v), "r" (PCIO_BASE), "Jr" ((port))); \
|
||||||
|
else if ((port) > 0) \
|
||||||
|
__asm__ __volatile__( \
|
||||||
|
"strh %0, [%1, %2] @ outwc" \
|
||||||
|
: : "r" (v), \
|
||||||
|
"r" (PCIO_BASE + ((port) & ~0xff)), \
|
||||||
|
"Jr" (((port) & 0xff))); \
|
||||||
else \
|
else \
|
||||||
|
__asm__ __volatile__( \
|
||||||
|
"strh %0, [%1, #0] @ outwc" \
|
||||||
|
: : "r" (v), \
|
||||||
|
"r" (PCIO_BASE + (port))); \
|
||||||
|
} else \
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
"strh %0, [%1, #0] @ outwc" \
|
"strh %0, [%1, #0] @ outwc" \
|
||||||
: : "r" (v), "r" ((port))); \
|
: : "r" (v), "r" ((port))); \
|
||||||
|
@ -130,11 +142,25 @@ DECLARE_IO(int,l,"")
|
||||||
#define __inwc(port) \
|
#define __inwc(port) \
|
||||||
({ \
|
({ \
|
||||||
unsigned short result; \
|
unsigned short result; \
|
||||||
if (__PORT_PCIO((port))) \
|
if (__PORT_PCIO((port))) { \
|
||||||
|
if ((port) < 256 && (port) > -256 ) \
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
"ldrh %0, [%1, %2] @ inwc" \
|
"ldrh %0, [%1, %2] @ inwc" \
|
||||||
: "=r" (result) : "r" (PCIO_BASE), "Jr" ((port))); \
|
: "=r" (result) \
|
||||||
|
: "r" (PCIO_BASE), \
|
||||||
|
"Jr" ((port))); \
|
||||||
|
else if ((port) > 0) \
|
||||||
|
__asm__ __volatile__( \
|
||||||
|
"ldrh %0, [%1, %2] @ inwc" \
|
||||||
|
: "=r" (result) \
|
||||||
|
: "r" (PCIO_BASE + ((port) & ~0xff)), \
|
||||||
|
"Jr" (((port) & 0xff))); \
|
||||||
else \
|
else \
|
||||||
|
__asm__ __volatile__( \
|
||||||
|
"ldrh %0, [%1, #0] @ inwc" \
|
||||||
|
: "=r" (result) \
|
||||||
|
: "r" (PCIO_BASE + ((port)))); \
|
||||||
|
} else \
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
"ldrh %0, [%1, #0] @ inwc" \
|
"ldrh %0, [%1, #0] @ inwc" \
|
||||||
: "=r" (result) : "r" ((port))); \
|
: "=r" (result) : "r" ((port))); \
|
||||||
|
|
|
@ -27,23 +27,27 @@
|
||||||
* PCI bus.
|
* PCI bus.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define PBM_LOGCLUSTERS 3
|
|
||||||
#define PBM_NCLUSTERS (1 << PBM_LOGCLUSTERS)
|
|
||||||
|
|
||||||
struct pci_controller_info;
|
struct pci_controller_info;
|
||||||
|
|
||||||
/* This contains the software state necessary to drive a PCI
|
/* This contains the software state necessary to drive a PCI
|
||||||
* controller's IOMMU.
|
* controller's IOMMU.
|
||||||
*/
|
*/
|
||||||
|
struct pci_iommu_arena {
|
||||||
|
unsigned long *map;
|
||||||
|
unsigned int hint;
|
||||||
|
unsigned int limit;
|
||||||
|
};
|
||||||
|
|
||||||
struct pci_iommu {
|
struct pci_iommu {
|
||||||
/* This protects the controller's IOMMU and all
|
/* This protects the controller's IOMMU and all
|
||||||
* streaming buffers underneath.
|
* streaming buffers underneath.
|
||||||
*/
|
*/
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
|
||||||
|
struct pci_iommu_arena arena;
|
||||||
|
|
||||||
/* IOMMU page table, a linear array of ioptes. */
|
/* IOMMU page table, a linear array of ioptes. */
|
||||||
iopte_t *page_table; /* The page table itself. */
|
iopte_t *page_table; /* The page table itself. */
|
||||||
int page_table_sz_bits; /* log2 of ow many pages does it map? */
|
|
||||||
|
|
||||||
/* Base PCI memory space address where IOMMU mappings
|
/* Base PCI memory space address where IOMMU mappings
|
||||||
* begin.
|
* begin.
|
||||||
|
@ -62,12 +66,6 @@ struct pci_iommu {
|
||||||
*/
|
*/
|
||||||
unsigned long write_complete_reg;
|
unsigned long write_complete_reg;
|
||||||
|
|
||||||
/* The lowest used consistent mapping entry. Since
|
|
||||||
* we allocate consistent maps out of cluster 0 this
|
|
||||||
* is relative to the beginning of closter 0.
|
|
||||||
*/
|
|
||||||
u32 lowest_consistent_map;
|
|
||||||
|
|
||||||
/* In order to deal with some buggy third-party PCI bridges that
|
/* In order to deal with some buggy third-party PCI bridges that
|
||||||
* do wrong prefetching, we never mark valid mappings as invalid.
|
* do wrong prefetching, we never mark valid mappings as invalid.
|
||||||
* Instead we point them at this dummy page.
|
* Instead we point them at this dummy page.
|
||||||
|
@ -75,16 +73,6 @@ struct pci_iommu {
|
||||||
unsigned long dummy_page;
|
unsigned long dummy_page;
|
||||||
unsigned long dummy_page_pa;
|
unsigned long dummy_page_pa;
|
||||||
|
|
||||||
/* If PBM_NCLUSTERS is ever decreased to 4 or lower,
|
|
||||||
* or if largest supported page_table_sz * 8K goes above
|
|
||||||
* 2GB, you must increase the size of the type of
|
|
||||||
* these counters. You have been duly warned. -DaveM
|
|
||||||
*/
|
|
||||||
struct {
|
|
||||||
u16 next;
|
|
||||||
u16 flush;
|
|
||||||
} alloc_info[PBM_NCLUSTERS];
|
|
||||||
|
|
||||||
/* CTX allocation. */
|
/* CTX allocation. */
|
||||||
unsigned long ctx_lowest_free;
|
unsigned long ctx_lowest_free;
|
||||||
unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)];
|
unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)];
|
||||||
|
@ -102,7 +90,7 @@ struct pci_iommu {
|
||||||
u32 dma_addr_mask;
|
u32 dma_addr_mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern void pci_iommu_table_init(struct pci_iommu *, int);
|
extern void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask);
|
||||||
|
|
||||||
/* This describes a PCI bus module's streaming buffer. */
|
/* This describes a PCI bus module's streaming buffer. */
|
||||||
struct pci_strbuf {
|
struct pci_strbuf {
|
||||||
|
|
|
@ -162,13 +162,13 @@ typedef struct acct acct_t;
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
/*
|
/*
|
||||||
* Yet another set of HZ to *HZ helper functions.
|
* Yet another set of HZ to *HZ helper functions.
|
||||||
* See <linux/times.h> for the original.
|
* See <linux/jiffies.h> for the original.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline u32 jiffies_to_AHZ(unsigned long x)
|
static inline u32 jiffies_to_AHZ(unsigned long x)
|
||||||
{
|
{
|
||||||
#if (TICK_NSEC % (NSEC_PER_SEC / AHZ)) == 0
|
#if (TICK_NSEC % (NSEC_PER_SEC / AHZ)) == 0
|
||||||
return x / (HZ / USER_HZ);
|
return x / (HZ / AHZ);
|
||||||
#else
|
#else
|
||||||
u64 tmp = (u64)x * TICK_NSEC;
|
u64 tmp = (u64)x * TICK_NSEC;
|
||||||
do_div(tmp, (NSEC_PER_SEC / AHZ));
|
do_div(tmp, (NSEC_PER_SEC / AHZ));
|
||||||
|
|
|
@ -24,7 +24,12 @@ struct kioctx;
|
||||||
#define KIOCB_SYNC_KEY (~0U)
|
#define KIOCB_SYNC_KEY (~0U)
|
||||||
|
|
||||||
/* ki_flags bits */
|
/* ki_flags bits */
|
||||||
#define KIF_LOCKED 0
|
/*
|
||||||
|
* This may be used for cancel/retry serialization in the future, but
|
||||||
|
* for now it's unused and we probably don't want modules to even
|
||||||
|
* think they can use it.
|
||||||
|
*/
|
||||||
|
/* #define KIF_LOCKED 0 */
|
||||||
#define KIF_KICKED 1
|
#define KIF_KICKED 1
|
||||||
#define KIF_CANCELLED 2
|
#define KIF_CANCELLED 2
|
||||||
|
|
||||||
|
|
|
@ -393,15 +393,13 @@ extern cpumask_t cpu_present_map;
|
||||||
#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
|
#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
|
||||||
|
|
||||||
/* Find the highest possible smp_processor_id() */
|
/* Find the highest possible smp_processor_id() */
|
||||||
static inline unsigned int highest_possible_processor_id(void)
|
#define highest_possible_processor_id() \
|
||||||
{
|
({ \
|
||||||
unsigned int cpu, highest = 0;
|
unsigned int cpu, highest = 0; \
|
||||||
|
for_each_cpu_mask(cpu, cpu_possible_map) \
|
||||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
highest = cpu; \
|
||||||
highest = cpu;
|
highest; \
|
||||||
|
})
|
||||||
return highest;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* __LINUX_CPUMASK_H */
|
#endif /* __LINUX_CPUMASK_H */
|
||||||
|
|
|
@ -442,12 +442,14 @@ static inline void list_splice_init(struct list_head *list,
|
||||||
* as long as the traversal is guarded by rcu_read_lock().
|
* as long as the traversal is guarded by rcu_read_lock().
|
||||||
*/
|
*/
|
||||||
#define list_for_each_rcu(pos, head) \
|
#define list_for_each_rcu(pos, head) \
|
||||||
for (pos = (head)->next; prefetch(pos->next), pos != (head); \
|
for (pos = (head)->next; \
|
||||||
pos = rcu_dereference(pos->next))
|
prefetch(rcu_dereference(pos)->next), pos != (head); \
|
||||||
|
pos = pos->next)
|
||||||
|
|
||||||
#define __list_for_each_rcu(pos, head) \
|
#define __list_for_each_rcu(pos, head) \
|
||||||
for (pos = (head)->next; pos != (head); \
|
for (pos = (head)->next; \
|
||||||
pos = rcu_dereference(pos->next))
|
rcu_dereference(pos) != (head); \
|
||||||
|
pos = pos->next)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* list_for_each_safe_rcu - iterate over an rcu-protected list safe
|
* list_for_each_safe_rcu - iterate over an rcu-protected list safe
|
||||||
|
@ -461,8 +463,9 @@ static inline void list_splice_init(struct list_head *list,
|
||||||
* as long as the traversal is guarded by rcu_read_lock().
|
* as long as the traversal is guarded by rcu_read_lock().
|
||||||
*/
|
*/
|
||||||
#define list_for_each_safe_rcu(pos, n, head) \
|
#define list_for_each_safe_rcu(pos, n, head) \
|
||||||
for (pos = (head)->next, n = pos->next; pos != (head); \
|
for (pos = (head)->next; \
|
||||||
pos = rcu_dereference(n), n = pos->next)
|
n = rcu_dereference(pos)->next, pos != (head); \
|
||||||
|
pos = n)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* list_for_each_entry_rcu - iterate over rcu list of given type
|
* list_for_each_entry_rcu - iterate over rcu list of given type
|
||||||
|
@ -476,9 +479,9 @@ static inline void list_splice_init(struct list_head *list,
|
||||||
*/
|
*/
|
||||||
#define list_for_each_entry_rcu(pos, head, member) \
|
#define list_for_each_entry_rcu(pos, head, member) \
|
||||||
for (pos = list_entry((head)->next, typeof(*pos), member); \
|
for (pos = list_entry((head)->next, typeof(*pos), member); \
|
||||||
prefetch(pos->member.next), &pos->member != (head); \
|
prefetch(rcu_dereference(pos)->member.next), \
|
||||||
pos = rcu_dereference(list_entry(pos->member.next, \
|
&pos->member != (head); \
|
||||||
typeof(*pos), member)))
|
pos = list_entry(pos->member.next, typeof(*pos), member))
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -492,8 +495,9 @@ static inline void list_splice_init(struct list_head *list,
|
||||||
* as long as the traversal is guarded by rcu_read_lock().
|
* as long as the traversal is guarded by rcu_read_lock().
|
||||||
*/
|
*/
|
||||||
#define list_for_each_continue_rcu(pos, head) \
|
#define list_for_each_continue_rcu(pos, head) \
|
||||||
for ((pos) = (pos)->next; prefetch((pos)->next), (pos) != (head); \
|
for ((pos) = (pos)->next; \
|
||||||
(pos) = rcu_dereference((pos)->next))
|
prefetch(rcu_dereference((pos))->next), (pos) != (head); \
|
||||||
|
(pos) = (pos)->next)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Double linked lists with a single pointer list head.
|
* Double linked lists with a single pointer list head.
|
||||||
|
@ -696,8 +700,9 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
||||||
pos = n)
|
pos = n)
|
||||||
|
|
||||||
#define hlist_for_each_rcu(pos, head) \
|
#define hlist_for_each_rcu(pos, head) \
|
||||||
for ((pos) = (head)->first; pos && ({ prefetch((pos)->next); 1; }); \
|
for ((pos) = (head)->first; \
|
||||||
(pos) = rcu_dereference((pos)->next))
|
rcu_dereference((pos)) && ({ prefetch((pos)->next); 1; }); \
|
||||||
|
(pos) = (pos)->next)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* hlist_for_each_entry - iterate over list of given type
|
* hlist_for_each_entry - iterate over list of given type
|
||||||
|
@ -762,9 +767,9 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
||||||
*/
|
*/
|
||||||
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
|
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
|
||||||
for (pos = (head)->first; \
|
for (pos = (head)->first; \
|
||||||
pos && ({ prefetch(pos->next); 1;}) && \
|
rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \
|
||||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
||||||
pos = rcu_dereference(pos->next))
|
pos = pos->next)
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#warning "don't include kernel headers in userspace"
|
#warning "don't include kernel headers in userspace"
|
||||||
|
|
|
@ -94,6 +94,7 @@ struct rcu_data {
|
||||||
long batch; /* Batch # for current RCU batch */
|
long batch; /* Batch # for current RCU batch */
|
||||||
struct rcu_head *nxtlist;
|
struct rcu_head *nxtlist;
|
||||||
struct rcu_head **nxttail;
|
struct rcu_head **nxttail;
|
||||||
|
long count; /* # of queued items */
|
||||||
struct rcu_head *curlist;
|
struct rcu_head *curlist;
|
||||||
struct rcu_head **curtail;
|
struct rcu_head **curtail;
|
||||||
struct rcu_head *donelist;
|
struct rcu_head *donelist;
|
||||||
|
|
|
@ -424,6 +424,7 @@ static void cleanup_timers(struct list_head *head,
|
||||||
cputime_t ptime = cputime_add(utime, stime);
|
cputime_t ptime = cputime_add(utime, stime);
|
||||||
|
|
||||||
list_for_each_entry_safe(timer, next, head, entry) {
|
list_for_each_entry_safe(timer, next, head, entry) {
|
||||||
|
put_task_struct(timer->task);
|
||||||
timer->task = NULL;
|
timer->task = NULL;
|
||||||
list_del_init(&timer->entry);
|
list_del_init(&timer->entry);
|
||||||
if (cputime_lt(timer->expires.cpu, ptime)) {
|
if (cputime_lt(timer->expires.cpu, ptime)) {
|
||||||
|
@ -436,6 +437,7 @@ static void cleanup_timers(struct list_head *head,
|
||||||
|
|
||||||
++head;
|
++head;
|
||||||
list_for_each_entry_safe(timer, next, head, entry) {
|
list_for_each_entry_safe(timer, next, head, entry) {
|
||||||
|
put_task_struct(timer->task);
|
||||||
timer->task = NULL;
|
timer->task = NULL;
|
||||||
list_del_init(&timer->entry);
|
list_del_init(&timer->entry);
|
||||||
if (cputime_lt(timer->expires.cpu, utime)) {
|
if (cputime_lt(timer->expires.cpu, utime)) {
|
||||||
|
@ -448,6 +450,7 @@ static void cleanup_timers(struct list_head *head,
|
||||||
|
|
||||||
++head;
|
++head;
|
||||||
list_for_each_entry_safe(timer, next, head, entry) {
|
list_for_each_entry_safe(timer, next, head, entry) {
|
||||||
|
put_task_struct(timer->task);
|
||||||
timer->task = NULL;
|
timer->task = NULL;
|
||||||
list_del_init(&timer->entry);
|
list_del_init(&timer->entry);
|
||||||
if (timer->expires.sched < sched_time) {
|
if (timer->expires.sched < sched_time) {
|
||||||
|
|
|
@ -71,7 +71,7 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
|
||||||
|
|
||||||
/* Fake initialization required by compiler */
|
/* Fake initialization required by compiler */
|
||||||
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
|
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
|
||||||
static int maxbatch = 10;
|
static int maxbatch = 10000;
|
||||||
|
|
||||||
#ifndef __HAVE_ARCH_CMPXCHG
|
#ifndef __HAVE_ARCH_CMPXCHG
|
||||||
/*
|
/*
|
||||||
|
@ -109,6 +109,10 @@ void fastcall call_rcu(struct rcu_head *head,
|
||||||
rdp = &__get_cpu_var(rcu_data);
|
rdp = &__get_cpu_var(rcu_data);
|
||||||
*rdp->nxttail = head;
|
*rdp->nxttail = head;
|
||||||
rdp->nxttail = &head->next;
|
rdp->nxttail = &head->next;
|
||||||
|
|
||||||
|
if (unlikely(++rdp->count > 10000))
|
||||||
|
set_need_resched();
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,6 +144,12 @@ void fastcall call_rcu_bh(struct rcu_head *head,
|
||||||
rdp = &__get_cpu_var(rcu_bh_data);
|
rdp = &__get_cpu_var(rcu_bh_data);
|
||||||
*rdp->nxttail = head;
|
*rdp->nxttail = head;
|
||||||
rdp->nxttail = &head->next;
|
rdp->nxttail = &head->next;
|
||||||
|
rdp->count++;
|
||||||
|
/*
|
||||||
|
* Should we directly call rcu_do_batch() here ?
|
||||||
|
* if (unlikely(rdp->count > 10000))
|
||||||
|
* rcu_do_batch(rdp);
|
||||||
|
*/
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,6 +167,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
|
||||||
next = rdp->donelist = list->next;
|
next = rdp->donelist = list->next;
|
||||||
list->func(list);
|
list->func(list);
|
||||||
list = next;
|
list = next;
|
||||||
|
rdp->count--;
|
||||||
if (++count >= maxbatch)
|
if (++count >= maxbatch)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -570,6 +570,7 @@ void getnstimeofday(struct timespec *tv)
|
||||||
tv->tv_sec = x.tv_sec;
|
tv->tv_sec = x.tv_sec;
|
||||||
tv->tv_nsec = x.tv_usec * NSEC_PER_USEC;
|
tv->tv_nsec = x.tv_usec * NSEC_PER_USEC;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(getnstimeofday);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (BITS_PER_LONG < 64)
|
#if (BITS_PER_LONG < 64)
|
||||||
|
|
13
mm/vmscan.c
13
mm/vmscan.c
|
@ -511,10 +511,11 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
|
||||||
* PageDirty _after_ making sure that the page is freeable and
|
* PageDirty _after_ making sure that the page is freeable and
|
||||||
* not in use by anybody. (pagecache + us == 2)
|
* not in use by anybody. (pagecache + us == 2)
|
||||||
*/
|
*/
|
||||||
if (page_count(page) != 2 || PageDirty(page)) {
|
if (unlikely(page_count(page) != 2))
|
||||||
write_unlock_irq(&mapping->tree_lock);
|
goto cannot_free;
|
||||||
goto keep_locked;
|
smp_rmb();
|
||||||
}
|
if (unlikely(PageDirty(page)))
|
||||||
|
goto cannot_free;
|
||||||
|
|
||||||
#ifdef CONFIG_SWAP
|
#ifdef CONFIG_SWAP
|
||||||
if (PageSwapCache(page)) {
|
if (PageSwapCache(page)) {
|
||||||
|
@ -538,6 +539,10 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
|
||||||
__pagevec_release_nonlru(&freed_pvec);
|
__pagevec_release_nonlru(&freed_pvec);
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
cannot_free:
|
||||||
|
write_unlock_irq(&mapping->tree_lock);
|
||||||
|
goto keep_locked;
|
||||||
|
|
||||||
activate_locked:
|
activate_locked:
|
||||||
SetPageActive(page);
|
SetPageActive(page);
|
||||||
pgactivate++;
|
pgactivate++;
|
||||||
|
|
|
@ -975,7 +975,6 @@ replace_table(struct ip6t_table *table,
|
||||||
struct ip6t_entry *table_base;
|
struct ip6t_entry *table_base;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < num_possible_cpus(); i++) {
|
|
||||||
for_each_cpu(i) {
|
for_each_cpu(i) {
|
||||||
table_base =
|
table_base =
|
||||||
(void *)newinfo->entries
|
(void *)newinfo->entries
|
||||||
|
|
Loading…
Add table
Reference in a new issue