Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c net/bridge/br_multicast.c net/ipv6/sit.c The conflicts were minor: 1) sit.c changes overlap with change to ip_tunnel_xmit() signature. 2) br_multicast.c had an overlap between computing max_delay using msecs_to_jiffies and turning MLDV2_MRC() into an inline function with a name using lowercase instead of uppercase letters. 3) stmmac had two overlapping changes, one which conditionally allocated and hooked up a dma_cfg based upon the presence of the pbl OF property, and another one handling store-and-forward DMA made. The latter of which should not go into the new of_find_property() basic block. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
06c54055be
143 changed files with 1487 additions and 688 deletions
|
@ -6067,7 +6067,7 @@ M: Rob Herring <rob.herring@calxeda.com>
|
|||
M: Pawel Moll <pawel.moll@arm.com>
|
||||
M: Mark Rutland <mark.rutland@arm.com>
|
||||
M: Stephen Warren <swarren@wwwdotorg.org>
|
||||
M: Ian Campbell <ian.campbell@citrix.com>
|
||||
M: Ian Campbell <ijc+devicetree@hellion.org.uk>
|
||||
L: devicetree@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Linux for Workgroups
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -39,9 +39,18 @@ ARC_ENTRY strchr
|
|||
ld.a r2,[r0,4]
|
||||
sub r12,r6,r7
|
||||
bic r12,r12,r6
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
and r7,r12,r4
|
||||
breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
|
||||
b .Lfound_char ; Likewise this one.
|
||||
#else
|
||||
and r12,r12,r4
|
||||
breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
|
||||
lsr_s r12,r12,7
|
||||
bic r2,r7,r6
|
||||
b.d .Lfound_char_b
|
||||
and_s r2,r2,r12
|
||||
#endif
|
||||
; /* We require this code address to be unaligned for speed... */
|
||||
.Laligned:
|
||||
ld_s r2,[r0]
|
||||
|
@ -95,6 +104,7 @@ ARC_ENTRY strchr
|
|||
lsr r7,r7,7
|
||||
|
||||
bic r2,r7,r6
|
||||
.Lfound_char_b:
|
||||
norm r2,r2
|
||||
sub_s r0,r0,4
|
||||
asr_s r2,r2,3
|
||||
|
|
|
@ -89,7 +89,8 @@ void set_fiq_handler(void *start, unsigned int length)
|
|||
|
||||
memcpy(base + offset, start, length);
|
||||
if (!cache_is_vipt_nonaliasing())
|
||||
flush_icache_range(base + offset, offset + length);
|
||||
flush_icache_range((unsigned long)base + offset, offset +
|
||||
length);
|
||||
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
|
||||
}
|
||||
|
||||
|
|
|
@ -82,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused)
|
|||
crash_save_cpu(®s, smp_processor_id());
|
||||
flush_cache_all();
|
||||
|
||||
set_cpu_online(smp_processor_id(), false);
|
||||
atomic_dec(&waiting_for_crash_ipi);
|
||||
while (1)
|
||||
cpu_relax();
|
||||
|
|
|
@ -42,7 +42,6 @@ static const char *atlas6_dt_match[] __initdata = {
|
|||
|
||||
DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)")
|
||||
/* Maintainer: Barry Song <baohua.song@csr.com> */
|
||||
.nr_irqs = 128,
|
||||
.map_io = sirfsoc_map_io,
|
||||
.init_time = sirfsoc_init_time,
|
||||
.init_late = sirfsoc_init_late,
|
||||
|
@ -59,7 +58,6 @@ static const char *prima2_dt_match[] __initdata = {
|
|||
|
||||
DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)")
|
||||
/* Maintainer: Barry Song <baohua.song@csr.com> */
|
||||
.nr_irqs = 128,
|
||||
.map_io = sirfsoc_map_io,
|
||||
.init_time = sirfsoc_init_time,
|
||||
.dma_zone_size = SZ_256M,
|
||||
|
|
|
@ -809,15 +809,18 @@ config KUSER_HELPERS
|
|||
the CPU type fitted to the system. This permits binaries to be
|
||||
run on ARMv4 through to ARMv7 without modification.
|
||||
|
||||
See Documentation/arm/kernel_user_helpers.txt for details.
|
||||
|
||||
However, the fixed address nature of these helpers can be used
|
||||
by ROP (return orientated programming) authors when creating
|
||||
exploits.
|
||||
|
||||
If all of the binaries and libraries which run on your platform
|
||||
are built specifically for your platform, and make no use of
|
||||
these helpers, then you can turn this option off. However,
|
||||
when such an binary or library is run, it will receive a SIGILL
|
||||
signal, which will terminate the program.
|
||||
these helpers, then you can turn this option off to hinder
|
||||
such exploits. However, in that case, if a binary or library
|
||||
relying on those helpers is run, it will receive a SIGILL signal,
|
||||
which will terminate the program.
|
||||
|
||||
Say N here only if you are absolutely certain that you do not
|
||||
need these helpers; otherwise, the safe option is to say Y.
|
||||
|
|
|
@ -979,6 +979,7 @@ config RELOCATABLE
|
|||
must live at a different physical address than the primary
|
||||
kernel.
|
||||
|
||||
# This value must have zeroes in the bottom 60 bits otherwise lots will break
|
||||
config PAGE_OFFSET
|
||||
hex
|
||||
default "0xc000000000000000"
|
||||
|
|
|
@ -211,9 +211,19 @@ extern long long virt_phys_offset;
|
|||
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
|
||||
#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
|
||||
#else
|
||||
#ifdef CONFIG_PPC64
|
||||
/*
|
||||
* gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
|
||||
* with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
|
||||
*/
|
||||
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
|
||||
#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
|
||||
|
||||
#else /* 32-bit, non book E */
|
||||
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
|
||||
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
|
||||
|
|
|
@ -35,7 +35,13 @@
|
|||
#include <asm/vdso_datapage.h>
|
||||
#include <asm/vio.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/machdep.h>
|
||||
|
||||
|
||||
/*
|
||||
* This isn't a module but we expose that to userspace
|
||||
* via /proc so leave the definitions here
|
||||
*/
|
||||
#define MODULE_VERS "1.9"
|
||||
#define MODULE_NAME "lparcfg"
|
||||
|
||||
|
@ -418,7 +424,8 @@ static void parse_em_data(struct seq_file *m)
|
|||
{
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
|
||||
if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
|
||||
if (firmware_has_feature(FW_FEATURE_LPAR) &&
|
||||
plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
|
||||
seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
|
||||
}
|
||||
|
||||
|
@ -677,7 +684,6 @@ static int lparcfg_open(struct inode *inode, struct file *file)
|
|||
}
|
||||
|
||||
static const struct file_operations lparcfg_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = seq_read,
|
||||
.write = lparcfg_write,
|
||||
.open = lparcfg_open,
|
||||
|
@ -699,14 +705,4 @@ static int __init lparcfg_init(void)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit lparcfg_cleanup(void)
|
||||
{
|
||||
remove_proc_subtree("powerpc/lparcfg", NULL);
|
||||
}
|
||||
|
||||
module_init(lparcfg_init);
|
||||
module_exit(lparcfg_cleanup);
|
||||
MODULE_DESCRIPTION("Interface for LPAR configuration data");
|
||||
MODULE_AUTHOR("Dave Engebretsen");
|
||||
MODULE_LICENSE("GPL");
|
||||
machine_device_initcall(pseries, lparcfg_init);
|
||||
|
|
|
@ -908,9 +908,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
|
|||
device->cap._DDC = 1;
|
||||
}
|
||||
|
||||
if (acpi_video_init_brightness(device))
|
||||
return;
|
||||
|
||||
if (acpi_video_backlight_support()) {
|
||||
struct backlight_properties props;
|
||||
struct pci_dev *pdev;
|
||||
|
@ -920,6 +917,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
|
|||
static int count = 0;
|
||||
char *name;
|
||||
|
||||
result = acpi_video_init_brightness(device);
|
||||
if (result)
|
||||
return;
|
||||
name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
|
||||
if (!name)
|
||||
return;
|
||||
|
@ -979,11 +979,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
|
|||
if (result)
|
||||
printk(KERN_ERR PREFIX "Create sysfs link\n");
|
||||
|
||||
} else {
|
||||
/* Remove the brightness object. */
|
||||
kfree(device->brightness->levels);
|
||||
kfree(device->brightness);
|
||||
device->brightness = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
|
|||
|
||||
/* Disable sending Early R_OK.
|
||||
* With "cached read" HDD testing and multiple ports busy on a SATA
|
||||
* host controller, 3726 PMP will very rarely drop a deferred
|
||||
* host controller, 3x26 PMP will very rarely drop a deferred
|
||||
* R_OK that was intended for the host. Symptom will be all
|
||||
* 5 drives under test will timeout, get reset, and recover.
|
||||
*/
|
||||
if (vendor == 0x1095 && devid == 0x3726) {
|
||||
if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
|
||||
u32 reg;
|
||||
|
||||
err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®);
|
||||
if (err_mask) {
|
||||
rc = -EIO;
|
||||
reason = "failed to read Sil3726 Private Register";
|
||||
reason = "failed to read Sil3x26 Private Register";
|
||||
goto fail;
|
||||
}
|
||||
reg &= ~0x1;
|
||||
err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
|
||||
if (err_mask) {
|
||||
rc = -EIO;
|
||||
reason = "failed to write Sil3726 Private Register";
|
||||
reason = "failed to write Sil3x26 Private Register";
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
|
|||
u16 devid = sata_pmp_gscr_devid(gscr);
|
||||
struct ata_link *link;
|
||||
|
||||
if (vendor == 0x1095 && devid == 0x3726) {
|
||||
/* sil3726 quirks */
|
||||
if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
|
||||
/* sil3x26 quirks */
|
||||
ata_for_each_link(link, ap, EDGE) {
|
||||
/* link reports offline after LPM */
|
||||
link->flags |= ATA_LFLAG_NO_LPM;
|
||||
|
|
|
@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
|
|||
{
|
||||
struct sata_fsl_host_priv *host_priv = host->private_data;
|
||||
void __iomem *hcr_base = host_priv->hcr_base;
|
||||
unsigned long flags;
|
||||
|
||||
if (count > ICC_MAX_INT_COUNT_THRESHOLD)
|
||||
count = ICC_MAX_INT_COUNT_THRESHOLD;
|
||||
|
@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
|
|||
(count > ICC_MIN_INT_COUNT_THRESHOLD))
|
||||
ticks = ICC_SAFE_INT_TICKS;
|
||||
|
||||
spin_lock(&host->lock);
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
iowrite32((count << 24 | ticks), hcr_base + ICC);
|
||||
|
||||
intr_coalescing_count = count;
|
||||
intr_coalescing_ticks = ticks;
|
||||
spin_unlock(&host->lock);
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
|
||||
intr_coalescing_count, intr_coalescing_ticks);
|
||||
|
|
|
@ -86,11 +86,11 @@ struct ecx_plat_data {
|
|||
|
||||
#define SGPIO_SIGNALS 3
|
||||
#define ECX_ACTIVITY_BITS 0x300000
|
||||
#define ECX_ACTIVITY_SHIFT 2
|
||||
#define ECX_ACTIVITY_SHIFT 0
|
||||
#define ECX_LOCATE_BITS 0x80000
|
||||
#define ECX_LOCATE_SHIFT 1
|
||||
#define ECX_FAULT_BITS 0x400000
|
||||
#define ECX_FAULT_SHIFT 0
|
||||
#define ECX_FAULT_SHIFT 2
|
||||
static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
|
||||
u32 shift)
|
||||
{
|
||||
|
|
|
@ -141,6 +141,8 @@ static ssize_t show_mem_removable(struct device *dev,
|
|||
container_of(dev, struct memory_block, dev);
|
||||
|
||||
for (i = 0; i < sections_per_block; i++) {
|
||||
if (!present_section_nr(mem->start_section_nr + i))
|
||||
continue;
|
||||
pfn = section_nr_to_pfn(mem->start_section_nr + i);
|
||||
ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
|
||||
}
|
||||
|
|
|
@ -332,7 +332,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
|
|||
}
|
||||
|
||||
if (!rbnode->blklen) {
|
||||
rbnode->blklen = sizeof(*rbnode);
|
||||
rbnode->blklen = 1;
|
||||
rbnode->base_reg = reg;
|
||||
}
|
||||
|
||||
|
|
|
@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
|
|||
&status))
|
||||
goto log_fail;
|
||||
|
||||
while (status == SDVO_CMD_STATUS_PENDING && retry--) {
|
||||
while ((status == SDVO_CMD_STATUS_PENDING ||
|
||||
status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
|
||||
udelay(15);
|
||||
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
|
||||
SDVO_I2C_CMD_STATUS,
|
||||
|
|
|
@ -752,6 +752,8 @@
|
|||
will not assert AGPBUSY# and will only
|
||||
be delivered when out of C3. */
|
||||
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
|
||||
#define INSTPM_TLB_INVALIDATE (1<<9)
|
||||
#define INSTPM_SYNC_FLUSH (1<<5)
|
||||
#define ACTHD 0x020c8
|
||||
#define FW_BLC 0x020d8
|
||||
#define FW_BLC2 0x020dc
|
||||
|
@ -4438,7 +4440,7 @@
|
|||
#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
|
||||
#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
|
||||
#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
|
||||
#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
|
||||
#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
|
||||
|
||||
/* legacy values */
|
||||
#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
|
||||
|
|
|
@ -968,6 +968,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
|
|||
|
||||
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
|
||||
POSTING_READ(mmio);
|
||||
|
||||
/* Flush the TLB for this page */
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
u32 reg = RING_INSTPM(ring->mmio_base);
|
||||
I915_WRITE(reg,
|
||||
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
|
||||
INSTPM_SYNC_FLUSH));
|
||||
if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
|
||||
1000))
|
||||
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
|
||||
ring->name);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
|
|||
u32 splitoff;
|
||||
u32 s, e;
|
||||
|
||||
BUG_ON(!type);
|
||||
|
||||
list_for_each_entry(this, &mm->free, fl_entry) {
|
||||
e = this->offset + this->length;
|
||||
s = this->offset;
|
||||
|
@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
|
|||
struct nouveau_mm_node *prev, *this, *next;
|
||||
u32 mask = align - 1;
|
||||
|
||||
BUG_ON(!type);
|
||||
|
||||
list_for_each_entry_reverse(this, &mm->free, fl_entry) {
|
||||
u32 e = this->offset + this->length;
|
||||
u32 s = this->offset;
|
||||
|
|
|
@ -20,8 +20,8 @@ nouveau_mc(void *obj)
|
|||
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
|
||||
}
|
||||
|
||||
#define nouveau_mc_create(p,e,o,d) \
|
||||
nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
|
||||
#define nouveau_mc_create(p,e,o,m,d) \
|
||||
nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
|
||||
#define nouveau_mc_destroy(p) ({ \
|
||||
struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
|
||||
})
|
||||
|
@ -33,7 +33,8 @@ nouveau_mc(void *obj)
|
|||
})
|
||||
|
||||
int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
|
||||
struct nouveau_oclass *, int, void **);
|
||||
struct nouveau_oclass *, const struct nouveau_mc_intr *,
|
||||
int, void **);
|
||||
void _nouveau_mc_dtor(struct nouveau_object *);
|
||||
int _nouveau_mc_init(struct nouveau_object *);
|
||||
int _nouveau_mc_fini(struct nouveau_object *, bool);
|
||||
|
|
|
@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
return ret;
|
||||
|
||||
switch (pfb914 & 0x00000003) {
|
||||
case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break;
|
||||
case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break;
|
||||
case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break;
|
||||
case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
|
||||
case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
|
||||
case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
|
||||
case 0x00000003: break;
|
||||
}
|
||||
|
||||
pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
|
||||
pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
|
||||
pfb->ram->tags = nv_rd32(pfb, 0x100320);
|
||||
ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
|
||||
ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
|
||||
ram->tags = nv_rd32(pfb, 0x100320);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
|
||||
pfb->ram->type = NV_MEM_TYPE_STOLEN;
|
||||
ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
|
||||
ram->type = NV_MEM_TYPE_STOLEN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,8 +30,9 @@ struct nvc0_ltcg_priv {
|
|||
struct nouveau_ltcg base;
|
||||
u32 part_nr;
|
||||
u32 subp_nr;
|
||||
struct nouveau_mm tags;
|
||||
u32 num_tags;
|
||||
u32 tag_base;
|
||||
struct nouveau_mm tags;
|
||||
struct nouveau_mm_node *tag_ram;
|
||||
};
|
||||
|
||||
|
@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
|
|||
u32 tag_size, tag_margin, tag_align;
|
||||
int ret;
|
||||
|
||||
nv_wr32(priv, 0x17e8d8, priv->part_nr);
|
||||
if (nv_device(pfb)->card_type >= NV_E0)
|
||||
nv_wr32(priv, 0x17e000, priv->part_nr);
|
||||
|
||||
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
|
||||
priv->num_tags = (pfb->ram->size >> 17) / 4;
|
||||
if (priv->num_tags > (1 << 17))
|
||||
|
@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
|
|||
tag_size += tag_align;
|
||||
tag_size = (tag_size + 0xfff) >> 12; /* round up */
|
||||
|
||||
ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1,
|
||||
ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1,
|
||||
&priv->tag_ram);
|
||||
if (ret) {
|
||||
priv->num_tags = 0;
|
||||
|
@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
|
|||
tag_base += tag_align - 1;
|
||||
ret = do_div(tag_base, tag_align);
|
||||
|
||||
nv_wr32(priv, 0x17e8d4, tag_base);
|
||||
priv->tag_base = tag_base;
|
||||
}
|
||||
ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
|
||||
|
||||
|
@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
}
|
||||
priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
|
||||
|
||||
nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
|
||||
|
||||
ret = nvc0_ltcg_init_tag_ram(pfb, priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object)
|
|||
nouveau_ltcg_destroy(ltcg);
|
||||
}
|
||||
|
||||
static int
|
||||
nvc0_ltcg_init(struct nouveau_object *object)
|
||||
{
|
||||
struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
|
||||
struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_ltcg_init(ltcg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
|
||||
nv_wr32(priv, 0x17e8d8, priv->part_nr);
|
||||
if (nv_device(ltcg)->card_type >= NV_E0)
|
||||
nv_wr32(priv, 0x17e000, priv->part_nr);
|
||||
nv_wr32(priv, 0x17e8d4, priv->tag_base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nouveau_oclass
|
||||
nvc0_ltcg_oclass = {
|
||||
.handle = NV_SUBDEV(LTCG, 0xc0),
|
||||
.ofuncs = &(struct nouveau_ofuncs) {
|
||||
.ctor = nvc0_ltcg_ctor,
|
||||
.dtor = nvc0_ltcg_dtor,
|
||||
.init = _nouveau_ltcg_init,
|
||||
.init = nvc0_ltcg_init,
|
||||
.fini = _nouveau_ltcg_fini,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object)
|
|||
|
||||
int
|
||||
nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
|
||||
struct nouveau_oclass *oclass, int length, void **pobject)
|
||||
struct nouveau_oclass *oclass,
|
||||
const struct nouveau_mc_intr *intr_map,
|
||||
int length, void **pobject)
|
||||
{
|
||||
struct nouveau_device *device = nv_device(parent);
|
||||
struct nouveau_mc *pmc;
|
||||
|
@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
pmc->intr_map = intr_map;
|
||||
|
||||
ret = request_irq(device->pdev->irq, nouveau_mc_intr,
|
||||
IRQF_SHARED, "nouveau", pmc);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
struct nv04_mc_priv *priv;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_mc_create(parent, engine, oclass, &priv);
|
||||
ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.intr_map = nv04_mc_intr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
struct nv44_mc_priv *priv;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_mc_create(parent, engine, oclass, &priv);
|
||||
ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.intr_map = nv04_mc_intr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
struct nv50_mc_priv *priv;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_mc_create(parent, engine, oclass, &priv);
|
||||
ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.intr_map = nv50_mc_intr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
struct nv98_mc_priv *priv;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_mc_create(parent, engine, oclass, &priv);
|
||||
ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.intr_map = nv98_mc_intr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
struct nvc0_mc_priv *priv;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_mc_create(parent, engine, oclass, &priv);
|
||||
ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
|
||||
*pobject = nv_object(priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->base.intr_map = nvc0_mc_intr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -606,6 +606,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
|
|||
regp->ramdac_a34 = 0x1;
|
||||
}
|
||||
|
||||
static int
|
||||
nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
|
||||
{
|
||||
struct nv04_display *disp = nv04_display(crtc->dev);
|
||||
struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
int ret;
|
||||
|
||||
ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
|
||||
if (ret == 0) {
|
||||
if (disp->image[nv_crtc->index])
|
||||
nouveau_bo_unpin(disp->image[nv_crtc->index]);
|
||||
nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets up registers for the given mode/adjusted_mode pair.
|
||||
*
|
||||
|
@ -622,10 +640,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
|||
struct drm_device *dev = crtc->dev;
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
|
||||
drm_mode_debug_printmodeline(adjusted_mode);
|
||||
|
||||
ret = nv_crtc_swap_fbs(crtc, old_fb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* unlock must come after turning off FP_TG_CONTROL in output_prepare */
|
||||
nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
|
||||
|
||||
|
@ -722,6 +745,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
|
|||
|
||||
static void nv_crtc_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
struct nv04_display *disp = nv04_display(crtc->dev);
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
|
||||
if (!nv_crtc)
|
||||
|
@ -729,6 +753,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
|
|||
|
||||
drm_crtc_cleanup(crtc);
|
||||
|
||||
if (disp->image[nv_crtc->index])
|
||||
nouveau_bo_unpin(disp->image[nv_crtc->index]);
|
||||
nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
|
||||
|
||||
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
|
||||
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
|
||||
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
|
||||
|
@ -753,6 +781,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
|
|||
nouveau_hw_load_state_palette(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
|
||||
}
|
||||
|
||||
static void
|
||||
nv_crtc_disable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct nv04_display *disp = nv04_display(crtc->dev);
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
if (disp->image[nv_crtc->index])
|
||||
nouveau_bo_unpin(disp->image[nv_crtc->index]);
|
||||
nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
|
||||
}
|
||||
|
||||
static void
|
||||
nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
|
||||
uint32_t size)
|
||||
|
@ -791,7 +829,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
|||
struct drm_framebuffer *drm_fb;
|
||||
struct nouveau_framebuffer *fb;
|
||||
int arb_burst, arb_lwm;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(drm, "index %d\n", nv_crtc->index);
|
||||
|
||||
|
@ -801,10 +838,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* If atomic, we want to switch to the fb we were passed, so
|
||||
* now we update pointers to do that. (We don't pin; just
|
||||
* assume we're already pinned and update the base address.)
|
||||
* now we update pointers to do that.
|
||||
*/
|
||||
if (atomic) {
|
||||
drm_fb = passed_fb;
|
||||
|
@ -812,17 +847,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
|||
} else {
|
||||
drm_fb = crtc->fb;
|
||||
fb = nouveau_framebuffer(crtc->fb);
|
||||
/* If not atomic, we can go ahead and pin, and unpin the
|
||||
* old fb we were passed.
|
||||
*/
|
||||
ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (passed_fb) {
|
||||
struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
|
||||
nouveau_bo_unpin(ofb->nvbo);
|
||||
}
|
||||
}
|
||||
|
||||
nv_crtc->fb.offset = fb->nvbo->bo.offset;
|
||||
|
@ -877,6 +901,9 @@ static int
|
|||
nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb)
|
||||
{
|
||||
int ret = nv_crtc_swap_fbs(crtc, old_fb);
|
||||
if (ret)
|
||||
return ret;
|
||||
return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
|
||||
}
|
||||
|
||||
|
@ -1027,6 +1054,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
|
|||
.mode_set_base = nv04_crtc_mode_set_base,
|
||||
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
|
||||
.load_lut = nv_crtc_gamma_load,
|
||||
.disable = nv_crtc_disable,
|
||||
};
|
||||
|
||||
int
|
||||
|
|
|
@ -81,6 +81,7 @@ struct nv04_display {
|
|||
uint32_t saved_vga_font[4][16384];
|
||||
uint32_t dac_users[4];
|
||||
struct nouveau_object *core;
|
||||
struct nouveau_bo *image[2];
|
||||
};
|
||||
|
||||
static inline struct nv04_display *
|
||||
|
|
|
@ -577,6 +577,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
|||
ret = nv50_display_flip_next(crtc, fb, chan, 0);
|
||||
if (ret)
|
||||
goto fail_unreserve;
|
||||
} else {
|
||||
struct nv04_display *dispnv04 = nv04_display(dev);
|
||||
nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
|
||||
}
|
||||
|
||||
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
|
||||
|
|
|
@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
|
|||
if (clk < pll->vco1.max_freq)
|
||||
pll->vco2.max_freq = 0;
|
||||
|
||||
pclk->pll_calc(pclk, pll, clk, &coef);
|
||||
ret = pclk->pll_calc(pclk, pll, clk, &coef);
|
||||
if (ret == 0)
|
||||
return -ERANGE;
|
||||
|
||||
|
|
|
@ -29,7 +29,9 @@
|
|||
#include <drm/drmP.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
|
||||
#define VMW_PPN_SIZE sizeof(unsigned long)
|
||||
#define VMW_PPN_SIZE (sizeof(unsigned long))
|
||||
/* A future safe maximum remap size. */
|
||||
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
|
||||
|
||||
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
|
||||
struct page *pages[],
|
||||
|
@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
|
|||
{
|
||||
SVGAFifoCmdDefineGMR2 define_cmd;
|
||||
SVGAFifoCmdRemapGMR2 remap_cmd;
|
||||
uint32_t define_size = sizeof(define_cmd) + 4;
|
||||
uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
|
||||
uint32_t *cmd;
|
||||
uint32_t *cmd_orig;
|
||||
uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
|
||||
uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
|
||||
uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
|
||||
uint32_t remap_pos = 0;
|
||||
uint32_t cmd_size = define_size + remap_size;
|
||||
uint32_t i;
|
||||
|
||||
cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
|
||||
cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
|
||||
if (unlikely(cmd == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
define_cmd.gmrId = gmr_id;
|
||||
define_cmd.numPages = num_pages;
|
||||
|
||||
*cmd++ = SVGA_CMD_DEFINE_GMR2;
|
||||
memcpy(cmd, &define_cmd, sizeof(define_cmd));
|
||||
cmd += sizeof(define_cmd) / sizeof(*cmd);
|
||||
|
||||
/*
|
||||
* Need to split the command if there are too many
|
||||
* pages that goes into the gmr.
|
||||
*/
|
||||
|
||||
remap_cmd.gmrId = gmr_id;
|
||||
remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
|
||||
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
|
||||
remap_cmd.offsetPages = 0;
|
||||
remap_cmd.numPages = num_pages;
|
||||
|
||||
*cmd++ = SVGA_CMD_DEFINE_GMR2;
|
||||
memcpy(cmd, &define_cmd, sizeof(define_cmd));
|
||||
cmd += sizeof(define_cmd) / sizeof(uint32);
|
||||
while (num_pages > 0) {
|
||||
unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
|
||||
|
||||
*cmd++ = SVGA_CMD_REMAP_GMR2;
|
||||
memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
|
||||
cmd += sizeof(remap_cmd) / sizeof(uint32);
|
||||
remap_cmd.offsetPages = remap_pos;
|
||||
remap_cmd.numPages = nr;
|
||||
|
||||
for (i = 0; i < num_pages; ++i) {
|
||||
if (VMW_PPN_SIZE <= 4)
|
||||
*cmd = page_to_pfn(*pages++);
|
||||
else
|
||||
*((uint64_t *)cmd) = page_to_pfn(*pages++);
|
||||
*cmd++ = SVGA_CMD_REMAP_GMR2;
|
||||
memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
|
||||
cmd += sizeof(remap_cmd) / sizeof(*cmd);
|
||||
|
||||
cmd += VMW_PPN_SIZE / sizeof(*cmd);
|
||||
for (i = 0; i < nr; ++i) {
|
||||
if (VMW_PPN_SIZE <= 4)
|
||||
*cmd = page_to_pfn(*pages++);
|
||||
else
|
||||
*((uint64_t *)cmd) = page_to_pfn(*pages++);
|
||||
|
||||
cmd += VMW_PPN_SIZE / sizeof(*cmd);
|
||||
}
|
||||
|
||||
num_pages -= nr;
|
||||
remap_pos += nr;
|
||||
}
|
||||
|
||||
vmw_fifo_commit(dev_priv, define_size + remap_size);
|
||||
BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
|
||||
|
||||
vmw_fifo_commit(dev_priv, cmd_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev,
|
|||
|
||||
switch (mask) {
|
||||
case IIO_CHAN_INFO_RAW:
|
||||
ret = adjd_s311_read_data(indio_dev, chan->address, val);
|
||||
ret = adjd_s311_read_data(indio_dev,
|
||||
ADJD_S311_DATA_REG(chan->address), val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return IIO_VAL_INT;
|
||||
|
|
|
@ -167,6 +167,7 @@ static const struct xpad_device {
|
|||
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
|
||||
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
|
||||
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
|
||||
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
|
||||
|
|
|
@ -672,6 +672,7 @@ static int elantech_packet_check_v2(struct psmouse *psmouse)
|
|||
*/
|
||||
static int elantech_packet_check_v3(struct psmouse *psmouse)
|
||||
{
|
||||
struct elantech_data *etd = psmouse->private;
|
||||
const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff };
|
||||
unsigned char *packet = psmouse->packet;
|
||||
|
||||
|
@ -682,19 +683,48 @@ static int elantech_packet_check_v3(struct psmouse *psmouse)
|
|||
if (!memcmp(packet, debounce_packet, sizeof(debounce_packet)))
|
||||
return PACKET_DEBOUNCE;
|
||||
|
||||
if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02)
|
||||
return PACKET_V3_HEAD;
|
||||
/*
|
||||
* If the hardware flag 'crc_enabled' is set the packets have
|
||||
* different signatures.
|
||||
*/
|
||||
if (etd->crc_enabled) {
|
||||
if ((packet[3] & 0x09) == 0x08)
|
||||
return PACKET_V3_HEAD;
|
||||
|
||||
if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
|
||||
return PACKET_V3_TAIL;
|
||||
if ((packet[3] & 0x09) == 0x09)
|
||||
return PACKET_V3_TAIL;
|
||||
} else {
|
||||
if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02)
|
||||
return PACKET_V3_HEAD;
|
||||
|
||||
if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
|
||||
return PACKET_V3_TAIL;
|
||||
}
|
||||
|
||||
return PACKET_UNKNOWN;
|
||||
}
|
||||
|
||||
static int elantech_packet_check_v4(struct psmouse *psmouse)
|
||||
{
|
||||
struct elantech_data *etd = psmouse->private;
|
||||
unsigned char *packet = psmouse->packet;
|
||||
unsigned char packet_type = packet[3] & 0x03;
|
||||
bool sanity_check;
|
||||
|
||||
/*
|
||||
* Sanity check based on the constant bits of a packet.
|
||||
* The constant bits change depending on the value of
|
||||
* the hardware flag 'crc_enabled' but are the same for
|
||||
* every packet, regardless of the type.
|
||||
*/
|
||||
if (etd->crc_enabled)
|
||||
sanity_check = ((packet[3] & 0x08) == 0x00);
|
||||
else
|
||||
sanity_check = ((packet[0] & 0x0c) == 0x04 &&
|
||||
(packet[3] & 0x1c) == 0x10);
|
||||
|
||||
if (!sanity_check)
|
||||
return PACKET_UNKNOWN;
|
||||
|
||||
switch (packet_type) {
|
||||
case 0:
|
||||
|
@ -1313,6 +1343,12 @@ static int elantech_set_properties(struct elantech_data *etd)
|
|||
etd->reports_pressure = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* The signatures of v3 and v4 packets change depending on the
|
||||
* value of this hardware flag.
|
||||
*/
|
||||
etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -129,6 +129,7 @@ struct elantech_data {
|
|||
bool paritycheck;
|
||||
bool jumpy_cursor;
|
||||
bool reports_pressure;
|
||||
bool crc_enabled;
|
||||
unsigned char hw_version;
|
||||
unsigned int fw_version;
|
||||
unsigned int single_finger_reports;
|
||||
|
|
|
@ -22,7 +22,8 @@ config SERIO_I8042
|
|||
tristate "i8042 PC Keyboard controller" if EXPERT || !X86
|
||||
default y
|
||||
depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
|
||||
(!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390
|
||||
(!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
|
||||
!ARC
|
||||
help
|
||||
i8042 is the chip over which the standard AT keyboard and PS/2
|
||||
mouse are connected to the computer. If you use these devices,
|
||||
|
|
|
@ -2112,7 +2112,7 @@ static const struct wacom_features wacom_features_0xDA =
|
|||
{ "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
|
||||
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
|
||||
.touch_max = 2 };
|
||||
static struct wacom_features wacom_features_0xDB =
|
||||
static const struct wacom_features wacom_features_0xDB =
|
||||
{ "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
|
||||
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
|
||||
.touch_max = 2 };
|
||||
|
@ -2127,6 +2127,12 @@ static const struct wacom_features wacom_features_0xDF =
|
|||
{ "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023,
|
||||
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
|
||||
.touch_max = 16 };
|
||||
static const struct wacom_features wacom_features_0x300 =
|
||||
{ "Wacom Bamboo One S", WACOM_PKGLEN_BBPEN, 14720, 9225, 1023,
|
||||
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
|
||||
static const struct wacom_features wacom_features_0x301 =
|
||||
{ "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023,
|
||||
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
|
||||
static const struct wacom_features wacom_features_0x6004 =
|
||||
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
|
||||
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
|
||||
|
@ -2253,6 +2259,8 @@ const struct usb_device_id wacom_ids[] = {
|
|||
{ USB_DEVICE_WACOM(0x100) },
|
||||
{ USB_DEVICE_WACOM(0x101) },
|
||||
{ USB_DEVICE_WACOM(0x10D) },
|
||||
{ USB_DEVICE_WACOM(0x300) },
|
||||
{ USB_DEVICE_WACOM(0x301) },
|
||||
{ USB_DEVICE_WACOM(0x304) },
|
||||
{ USB_DEVICE_WACOM(0x4001) },
|
||||
{ USB_DEVICE_WACOM(0x47) },
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#define SIRFSOC_INT_RISC_LEVEL1 0x0024
|
||||
#define SIRFSOC_INIT_IRQ_ID 0x0038
|
||||
|
||||
#define SIRFSOC_NUM_IRQS 128
|
||||
#define SIRFSOC_NUM_IRQS 64
|
||||
|
||||
static struct irq_domain *sirfsoc_irqdomain;
|
||||
|
||||
|
@ -32,15 +32,18 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
|
|||
{
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_chip_type *ct;
|
||||
int ret;
|
||||
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
|
||||
|
||||
gc = irq_alloc_generic_chip("SIRFINTC", 1, irq_start, base, handle_level_irq);
|
||||
ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
|
||||
handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
|
||||
|
||||
gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
|
||||
gc->reg_base = base;
|
||||
ct = gc->chip_types;
|
||||
|
||||
ct->chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
ct->chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
|
||||
|
||||
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0);
|
||||
}
|
||||
|
||||
static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
|
||||
|
@ -60,9 +63,8 @@ static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *p
|
|||
if (!base)
|
||||
panic("unable to map intc cpu registers\n");
|
||||
|
||||
/* using legacy because irqchip_generic does not work with linear */
|
||||
sirfsoc_irqdomain = irq_domain_add_legacy(np, SIRFSOC_NUM_IRQS, 0, 0,
|
||||
&irq_domain_simple_ops, base);
|
||||
sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS,
|
||||
&irq_generic_chip_ops, base);
|
||||
|
||||
sirfsoc_alloc_gc(base, 0, 32);
|
||||
sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32);
|
||||
|
|
|
@ -288,8 +288,10 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
|
|||
u8 *data;
|
||||
int len;
|
||||
|
||||
if (skb->len < sizeof(int))
|
||||
if (skb->len < sizeof(int)) {
|
||||
printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
cont = *((int *)skb->data);
|
||||
len = skb->len - sizeof(int);
|
||||
data = skb->data + sizeof(int);
|
||||
|
|
|
@ -148,7 +148,7 @@ config PCMCIA_PCNET
|
|||
|
||||
config NE_H8300
|
||||
tristate "NE2000 compatible support for H8/300"
|
||||
depends on H8300
|
||||
depends on H8300H_AKI3068NET || H8300H_H8MAX
|
||||
---help---
|
||||
Say Y here if you want to use the NE2000 compatible
|
||||
controller on the Renesas H8/300 processor.
|
||||
|
|
|
@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
|
|||
struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
|
||||
int old_max_eth_txqs, new_max_eth_txqs;
|
||||
int old_txdata_index = 0, new_txdata_index = 0;
|
||||
struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
|
||||
|
||||
/* Copy the NAPI object as it has been already initialized */
|
||||
from_fp->napi = to_fp->napi;
|
||||
|
@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
|
|||
memcpy(to_fp, from_fp, sizeof(*to_fp));
|
||||
to_fp->index = to;
|
||||
|
||||
/* Retain the tpa_info of the original `to' version as we don't want
|
||||
* 2 FPs to contain the same tpa_info pointer.
|
||||
*/
|
||||
to_fp->tpa_info = old_tpa_info;
|
||||
|
||||
/* move sp_objs contents as well, as their indices match fp ones */
|
||||
memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
|
||||
|
||||
|
@ -2959,8 +2965,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
|
|||
if (IS_PF(bp)) {
|
||||
if (CNIC_LOADED(bp))
|
||||
bnx2x_free_mem_cnic(bp);
|
||||
bnx2x_free_mem(bp);
|
||||
}
|
||||
bnx2x_free_mem(bp);
|
||||
|
||||
bp->state = BNX2X_STATE_CLOSED;
|
||||
bp->cnic_loaded = false;
|
||||
|
||||
|
|
|
@ -6501,12 +6501,13 @@ static int bnx2x_link_initialize(struct link_params *params,
|
|||
struct bnx2x_phy *phy = ¶ms->phy[INT_PHY];
|
||||
if (vars->line_speed == SPEED_AUTO_NEG &&
|
||||
(CHIP_IS_E1x(bp) ||
|
||||
CHIP_IS_E2(bp)))
|
||||
CHIP_IS_E2(bp))) {
|
||||
bnx2x_set_parallel_detection(phy, params);
|
||||
if (params->phy[INT_PHY].config_init)
|
||||
params->phy[INT_PHY].config_init(phy,
|
||||
params,
|
||||
vars);
|
||||
}
|
||||
}
|
||||
|
||||
/* Init external phy*/
|
||||
|
|
|
@ -7855,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
|
|||
{
|
||||
int i;
|
||||
|
||||
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
|
||||
sizeof(struct host_sp_status_block));
|
||||
|
||||
BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
|
||||
bp->fw_stats_data_sz + bp->fw_stats_req_sz);
|
||||
|
||||
if (IS_VF(bp))
|
||||
return;
|
||||
|
||||
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
|
||||
sizeof(struct host_sp_status_block));
|
||||
|
||||
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
|
||||
sizeof(struct bnx2x_slowpath));
|
||||
|
||||
|
|
|
@ -545,23 +545,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bnx2x_vfop_config_vlan0(struct bnx2x *bp,
|
||||
struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
|
||||
bool add)
|
||||
{
|
||||
int rc;
|
||||
|
||||
vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
|
||||
BNX2X_VLAN_MAC_DEL;
|
||||
vlan_mac->user_req.u.vlan.vlan = 0;
|
||||
|
||||
rc = bnx2x_config_vlan_mac(bp, vlan_mac);
|
||||
if (rc == -EEXIST)
|
||||
rc = 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnx2x_vfop_config_list(struct bnx2x *bp,
|
||||
struct bnx2x_vfop_filters *filters,
|
||||
struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
|
||||
|
@ -666,30 +649,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|||
|
||||
case BNX2X_VFOP_VLAN_CONFIG_LIST:
|
||||
/* next state */
|
||||
vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
|
||||
vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
|
||||
|
||||
/* remove vlan0 - could be no-op */
|
||||
vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
|
||||
if (vfop->rc)
|
||||
goto op_err;
|
||||
|
||||
/* Do vlan list config. if this operation fails we try to
|
||||
* restore vlan0 to keep the queue is working order
|
||||
*/
|
||||
/* do list config */
|
||||
vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
|
||||
if (!vfop->rc) {
|
||||
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
|
||||
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
|
||||
}
|
||||
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
|
||||
|
||||
case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
|
||||
/* next state */
|
||||
vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
|
||||
|
||||
if (list_empty(&obj->head))
|
||||
/* add vlan0 */
|
||||
vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
|
||||
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
|
||||
|
||||
default:
|
||||
|
@ -2833,6 +2800,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct set_vf_state_cookie {
|
||||
struct bnx2x_virtf *vf;
|
||||
u8 state;
|
||||
};
|
||||
|
||||
void bnx2x_set_vf_state(void *cookie)
|
||||
{
|
||||
struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
|
||||
|
||||
p->vf->state = p->state;
|
||||
}
|
||||
|
||||
/* VFOP close (teardown the queues, delete mcasts and close HW) */
|
||||
static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
||||
{
|
||||
|
@ -2883,7 +2862,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|||
op_err:
|
||||
BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
|
||||
op_done:
|
||||
vf->state = VF_ACQUIRED;
|
||||
|
||||
/* need to make sure there are no outstanding stats ramrods which may
|
||||
* cause the device to access the VF's stats buffer which it will free
|
||||
* as soon as we return from the close flow.
|
||||
*/
|
||||
{
|
||||
struct set_vf_state_cookie cookie;
|
||||
|
||||
cookie.vf = vf;
|
||||
cookie.state = VF_ACQUIRED;
|
||||
bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_IOV, "set state to acquired\n");
|
||||
bnx2x_vfop_end(bp, vf, vfop);
|
||||
}
|
||||
|
|
|
@ -522,20 +522,16 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
|
|||
/* should be called under stats_sema */
|
||||
static void __bnx2x_stats_start(struct bnx2x *bp)
|
||||
{
|
||||
/* vfs travel through here as part of the statistics FSM, but no action
|
||||
* is required
|
||||
*/
|
||||
if (IS_VF(bp))
|
||||
return;
|
||||
if (IS_PF(bp)) {
|
||||
if (bp->port.pmf)
|
||||
bnx2x_port_stats_init(bp);
|
||||
|
||||
if (bp->port.pmf)
|
||||
bnx2x_port_stats_init(bp);
|
||||
else if (bp->func_stx)
|
||||
bnx2x_func_stats_init(bp);
|
||||
|
||||
else if (bp->func_stx)
|
||||
bnx2x_func_stats_init(bp);
|
||||
|
||||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_storm_stats_post(bp);
|
||||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_storm_stats_post(bp);
|
||||
}
|
||||
|
||||
bp->stats_started = true;
|
||||
}
|
||||
|
@ -1997,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
|
|||
estats->mac_discard);
|
||||
}
|
||||
}
|
||||
|
||||
void bnx2x_stats_safe_exec(struct bnx2x *bp,
|
||||
void (func_to_exec)(void *cookie),
|
||||
void *cookie){
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
bnx2x_stats_comp(bp);
|
||||
func_to_exec(cookie);
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
|
|
@ -539,6 +539,9 @@ struct bnx2x;
|
|||
void bnx2x_memset_stats(struct bnx2x *bp);
|
||||
void bnx2x_stats_init(struct bnx2x *bp);
|
||||
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
|
||||
void bnx2x_stats_safe_exec(struct bnx2x *bp,
|
||||
void (func_to_exec)(void *cookie),
|
||||
void *cookie);
|
||||
|
||||
/**
|
||||
* bnx2x_save_statistics - save statistics when unloading.
|
||||
|
|
|
@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool tg3_phy_led_bug(struct tg3 *tp)
|
||||
{
|
||||
switch (tg3_asic_rev(tp)) {
|
||||
case ASIC_REV_5719:
|
||||
if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
|
||||
!tp->pci_fn)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
|
||||
{
|
||||
u32 val;
|
||||
|
@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
|
|||
}
|
||||
return;
|
||||
} else if (do_low_power) {
|
||||
tg3_writephy(tp, MII_TG3_EXT_CTRL,
|
||||
MII_TG3_EXT_CTRL_FORCE_LED_OFF);
|
||||
if (!tg3_phy_led_bug(tp))
|
||||
tg3_writephy(tp, MII_TG3_EXT_CTRL,
|
||||
MII_TG3_EXT_CTRL_FORCE_LED_OFF);
|
||||
|
||||
val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
|
||||
MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
|
||||
|
|
|
@ -353,11 +353,9 @@ struct xgmac_extra_stats {
|
|||
/* Receive errors */
|
||||
unsigned long rx_watchdog;
|
||||
unsigned long rx_da_filter_fail;
|
||||
unsigned long rx_sa_filter_fail;
|
||||
unsigned long rx_payload_error;
|
||||
unsigned long rx_ip_header_error;
|
||||
/* Tx/Rx IRQ errors */
|
||||
unsigned long tx_undeflow;
|
||||
unsigned long tx_process_stopped;
|
||||
unsigned long rx_buf_unav;
|
||||
unsigned long rx_process_stopped;
|
||||
|
@ -393,6 +391,7 @@ struct xgmac_priv {
|
|||
char rx_pause;
|
||||
char tx_pause;
|
||||
int wolopts;
|
||||
struct work_struct tx_timeout_work;
|
||||
};
|
||||
|
||||
/* XGMAC Configuration Settings */
|
||||
|
@ -409,6 +408,9 @@ struct xgmac_priv {
|
|||
#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
|
||||
#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
|
||||
|
||||
#define tx_dma_ring_space(p) \
|
||||
dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
|
||||
|
||||
/* XGMAC Descriptor Access Helpers */
|
||||
static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
|
||||
{
|
||||
|
@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
|
|||
|
||||
static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
|
||||
{
|
||||
u32 len = cpu_to_le32(p->flags);
|
||||
u32 len = le32_to_cpu(p->buf_size);
|
||||
return (len & DESC_BUFFER1_SZ_MASK) +
|
||||
((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
|
||||
}
|
||||
|
@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
|
|||
p->flags = cpu_to_le32(tmpflags);
|
||||
}
|
||||
|
||||
static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
|
||||
{
|
||||
u32 tmpflags = le32_to_cpu(p->flags);
|
||||
tmpflags &= TXDESC_END_RING;
|
||||
p->flags = cpu_to_le32(tmpflags);
|
||||
}
|
||||
|
||||
static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
|
||||
{
|
||||
return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
|
||||
}
|
||||
|
||||
static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
|
||||
{
|
||||
return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
|
||||
}
|
||||
|
||||
static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
|
||||
{
|
||||
return le32_to_cpu(p->buf1_addr);
|
||||
|
@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
|
|||
{
|
||||
u32 data;
|
||||
|
||||
data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
|
||||
writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
|
||||
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
|
||||
writel(data, ioaddr + XGMAC_ADDR_LOW(num));
|
||||
if (addr) {
|
||||
data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
|
||||
writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
|
||||
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
|
||||
writel(data, ioaddr + XGMAC_ADDR_LOW(num));
|
||||
} else {
|
||||
writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
|
||||
writel(0, ioaddr + XGMAC_ADDR_LOW(num));
|
||||
}
|
||||
}
|
||||
|
||||
static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
|
||||
|
@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
|
|||
if (unlikely(skb == NULL))
|
||||
break;
|
||||
|
||||
priv->rx_skbuff[entry] = skb;
|
||||
paddr = dma_map_single(priv->device, skb->data,
|
||||
bufsz, DMA_FROM_DEVICE);
|
||||
priv->dma_buf_sz - NET_IP_ALIGN,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(priv->device, paddr)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
break;
|
||||
}
|
||||
priv->rx_skbuff[entry] = skb;
|
||||
desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
|
||||
}
|
||||
|
||||
|
@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
|
|||
return;
|
||||
|
||||
for (i = 0; i < DMA_RX_RING_SZ; i++) {
|
||||
if (priv->rx_skbuff[i] == NULL)
|
||||
struct sk_buff *skb = priv->rx_skbuff[i];
|
||||
if (skb == NULL)
|
||||
continue;
|
||||
|
||||
p = priv->dma_rx + i;
|
||||
dma_unmap_single(priv->device, desc_get_buf_addr(p),
|
||||
priv->dma_buf_sz, DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(priv->rx_skbuff[i]);
|
||||
priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
priv->rx_skbuff[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
|
||||
{
|
||||
int i, f;
|
||||
int i;
|
||||
struct xgmac_dma_desc *p;
|
||||
|
||||
if (!priv->tx_skbuff)
|
||||
|
@ -806,16 +831,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
|
|||
continue;
|
||||
|
||||
p = priv->dma_tx + i;
|
||||
dma_unmap_single(priv->device, desc_get_buf_addr(p),
|
||||
desc_get_buf_len(p), DMA_TO_DEVICE);
|
||||
|
||||
for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
|
||||
p = priv->dma_tx + i++;
|
||||
if (desc_get_tx_fs(p))
|
||||
dma_unmap_single(priv->device, desc_get_buf_addr(p),
|
||||
desc_get_buf_len(p), DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_page(priv->device, desc_get_buf_addr(p),
|
||||
desc_get_buf_len(p), DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
dev_kfree_skb_any(priv->tx_skbuff[i]);
|
||||
if (desc_get_tx_ls(p))
|
||||
dev_kfree_skb_any(priv->tx_skbuff[i]);
|
||||
priv->tx_skbuff[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
|
|||
*/
|
||||
static void xgmac_tx_complete(struct xgmac_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
|
||||
unsigned int entry = priv->tx_tail;
|
||||
struct sk_buff *skb = priv->tx_skbuff[entry];
|
||||
|
@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
|
|||
if (desc_get_owner(p))
|
||||
break;
|
||||
|
||||
/* Verify tx error by looking at the last segment */
|
||||
if (desc_get_tx_ls(p))
|
||||
desc_get_tx_status(priv, p);
|
||||
|
||||
netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
|
||||
priv->tx_head, priv->tx_tail);
|
||||
|
||||
dma_unmap_single(priv->device, desc_get_buf_addr(p),
|
||||
desc_get_buf_len(p), DMA_TO_DEVICE);
|
||||
if (desc_get_tx_fs(p))
|
||||
dma_unmap_single(priv->device, desc_get_buf_addr(p),
|
||||
desc_get_buf_len(p), DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_page(priv->device, desc_get_buf_addr(p),
|
||||
desc_get_buf_len(p), DMA_TO_DEVICE);
|
||||
|
||||
/* Check tx error on the last segment */
|
||||
if (desc_get_tx_ls(p)) {
|
||||
desc_get_tx_status(priv, p);
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
priv->tx_skbuff[entry] = NULL;
|
||||
priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
|
||||
|
||||
if (!skb) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
|
||||
DMA_TX_RING_SZ);
|
||||
p = priv->dma_tx + priv->tx_tail;
|
||||
|
||||
dma_unmap_page(priv->device, desc_get_buf_addr(p),
|
||||
desc_get_buf_len(p), DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
|
||||
MAX_SKB_FRAGS)
|
||||
/* Ensure tx_tail is visible to xgmac_xmit */
|
||||
smp_mb();
|
||||
if (unlikely(netif_queue_stopped(priv->dev) &&
|
||||
(tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
|
||||
netif_wake_queue(priv->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* xgmac_tx_err:
|
||||
* @priv: pointer to the private device structure
|
||||
* Description: it cleans the descriptors and restarts the transmission
|
||||
* in case of errors.
|
||||
*/
|
||||
static void xgmac_tx_err(struct xgmac_priv *priv)
|
||||
static void xgmac_tx_timeout_work(struct work_struct *work)
|
||||
{
|
||||
u32 reg, value, inten;
|
||||
u32 reg, value;
|
||||
struct xgmac_priv *priv =
|
||||
container_of(work, struct xgmac_priv, tx_timeout_work);
|
||||
|
||||
netif_stop_queue(priv->dev);
|
||||
napi_disable(&priv->napi);
|
||||
|
||||
inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
|
||||
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
|
||||
|
||||
netif_tx_lock(priv->dev);
|
||||
|
||||
reg = readl(priv->base + XGMAC_DMA_CONTROL);
|
||||
writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
|
||||
do {
|
||||
|
@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
|
|||
|
||||
writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
|
||||
priv->base + XGMAC_DMA_STATUS);
|
||||
writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
|
||||
|
||||
netif_tx_unlock(priv->dev);
|
||||
netif_wake_queue(priv->dev);
|
||||
|
||||
napi_enable(&priv->napi);
|
||||
|
||||
/* Enable interrupts */
|
||||
writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
|
||||
writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
|
||||
}
|
||||
|
||||
static int xgmac_hw_init(struct net_device *dev)
|
||||
|
@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev)
|
|||
DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
|
||||
writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
|
||||
|
||||
/* Enable interrupts */
|
||||
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
|
||||
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
|
||||
writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
|
||||
|
||||
/* Mask power mgt interrupt */
|
||||
writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
|
||||
|
@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev)
|
|||
napi_enable(&priv->napi);
|
||||
netif_start_queue(dev);
|
||||
|
||||
/* Enable interrupts */
|
||||
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
|
||||
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(priv->device, paddr)) {
|
||||
dev_kfree_skb(skb);
|
||||
return -EIO;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
priv->tx_skbuff[entry] = skb;
|
||||
desc_set_buf_addr_and_size(desc, paddr, len);
|
||||
|
@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
paddr = skb_frag_dma_map(priv->device, frag, 0, len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(priv->device, paddr)) {
|
||||
dev_kfree_skb(skb);
|
||||
return -EIO;
|
||||
}
|
||||
if (dma_mapping_error(priv->device, paddr))
|
||||
goto dma_err;
|
||||
|
||||
entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
|
||||
desc = priv->dma_tx + entry;
|
||||
priv->tx_skbuff[entry] = NULL;
|
||||
priv->tx_skbuff[entry] = skb;
|
||||
|
||||
desc_set_buf_addr_and_size(desc, paddr, len);
|
||||
if (i < (nfrags - 1))
|
||||
|
@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
wmb();
|
||||
desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
|
||||
|
||||
writel(1, priv->base + XGMAC_DMA_TX_POLL);
|
||||
|
||||
priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
|
||||
|
||||
writel(1, priv->base + XGMAC_DMA_TX_POLL);
|
||||
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
|
||||
MAX_SKB_FRAGS)
|
||||
/* Ensure tx_head update is visible to tx completion */
|
||||
smp_mb();
|
||||
if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
|
||||
netif_stop_queue(dev);
|
||||
/* Ensure netif_stop_queue is visible to tx completion */
|
||||
smp_mb();
|
||||
if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
|
||||
netif_start_queue(dev);
|
||||
}
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
dma_err:
|
||||
entry = priv->tx_head;
|
||||
for ( ; i > 0; i--) {
|
||||
entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
|
||||
desc = priv->dma_tx + entry;
|
||||
priv->tx_skbuff[entry] = NULL;
|
||||
dma_unmap_page(priv->device, desc_get_buf_addr(desc),
|
||||
desc_get_buf_len(desc), DMA_TO_DEVICE);
|
||||
desc_clear_tx_owner(desc);
|
||||
}
|
||||
desc = first;
|
||||
dma_unmap_single(priv->device, desc_get_buf_addr(desc),
|
||||
desc_get_buf_len(desc), DMA_TO_DEVICE);
|
||||
dev_kfree_skb(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
|
|||
|
||||
skb_put(skb, frame_len);
|
||||
dma_unmap_single(priv->device, desc_get_buf_addr(p),
|
||||
frame_len, DMA_FROM_DEVICE);
|
||||
priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
|
||||
|
||||
skb->protocol = eth_type_trans(skb, priv->dev);
|
||||
skb->ip_summed = ip_checksum;
|
||||
|
@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
|
|||
static void xgmac_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
struct xgmac_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* Clear Tx resources and restart transmitting again */
|
||||
xgmac_tx_err(priv);
|
||||
schedule_work(&priv->tx_timeout_work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
|
|||
if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
|
||||
use_hash = true;
|
||||
value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
|
||||
} else {
|
||||
use_hash = false;
|
||||
}
|
||||
netdev_for_each_mc_addr(ha, dev) {
|
||||
if (use_hash) {
|
||||
|
@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
|
|||
}
|
||||
|
||||
out:
|
||||
for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
|
||||
xgmac_set_mac_addr(ioaddr, NULL, reg);
|
||||
for (i = 0; i < XGMAC_NUM_HASH; i++)
|
||||
writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
|
||||
|
||||
|
@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
|
|||
static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
u32 intr_status;
|
||||
bool tx_err = false;
|
||||
struct net_device *dev = (struct net_device *)dev_id;
|
||||
struct xgmac_priv *priv = netdev_priv(dev);
|
||||
struct xgmac_extra_stats *x = &priv->xstats;
|
||||
|
@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
|
|||
if (intr_status & DMA_STATUS_TPS) {
|
||||
netdev_err(priv->dev, "transmit process stopped\n");
|
||||
x->tx_process_stopped++;
|
||||
tx_err = true;
|
||||
schedule_work(&priv->tx_timeout_work);
|
||||
}
|
||||
if (intr_status & DMA_STATUS_FBI) {
|
||||
netdev_err(priv->dev, "fatal bus error\n");
|
||||
x->fatal_bus_error++;
|
||||
tx_err = true;
|
||||
}
|
||||
|
||||
if (tx_err)
|
||||
xgmac_tx_err(priv);
|
||||
}
|
||||
|
||||
/* TX/RX NORMAL interrupts */
|
||||
|
@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = {
|
|||
XGMAC_STAT(rx_payload_error),
|
||||
XGMAC_STAT(rx_ip_header_error),
|
||||
XGMAC_STAT(rx_da_filter_fail),
|
||||
XGMAC_STAT(rx_sa_filter_fail),
|
||||
XGMAC_STAT(fatal_bus_error),
|
||||
XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
|
||||
XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
|
||||
|
@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev)
|
|||
ndev->netdev_ops = &xgmac_netdev_ops;
|
||||
SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
|
||||
spin_lock_init(&priv->stats_lock);
|
||||
INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
|
||||
|
||||
priv->device = &pdev->dev;
|
||||
priv->dev = ndev;
|
||||
|
@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev)
|
|||
if (device_can_wakeup(priv->device))
|
||||
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
|
||||
|
||||
ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
|
||||
ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
|
||||
if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
|
||||
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_RXCSUM;
|
||||
|
|
|
@ -4476,6 +4476,10 @@ static int be_resume(struct pci_dev *pdev)
|
|||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
|
||||
status = be_fw_wait_ready(adapter);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
/* tell fw we're ready to fire cmds */
|
||||
status = be_cmd_fw_init(adapter);
|
||||
if (status)
|
||||
|
|
|
@ -296,6 +296,9 @@ struct fec_enet_private {
|
|||
/* The ring entries to be free()ed */
|
||||
struct bufdesc *dirty_tx;
|
||||
|
||||
unsigned short tx_ring_size;
|
||||
unsigned short rx_ring_size;
|
||||
|
||||
struct platform_device *pdev;
|
||||
|
||||
int opened;
|
||||
|
|
|
@ -238,22 +238,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
|
|||
|
||||
static int mii_cnt;
|
||||
|
||||
static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex)
|
||||
static inline
|
||||
struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
|
||||
{
|
||||
struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
|
||||
if (is_ex)
|
||||
return (struct bufdesc *)(ex + 1);
|
||||
struct bufdesc *new_bd = bdp + 1;
|
||||
struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
|
||||
struct bufdesc_ex *ex_base;
|
||||
struct bufdesc *base;
|
||||
int ring_size;
|
||||
|
||||
if (bdp >= fep->tx_bd_base) {
|
||||
base = fep->tx_bd_base;
|
||||
ring_size = fep->tx_ring_size;
|
||||
ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
|
||||
} else {
|
||||
base = fep->rx_bd_base;
|
||||
ring_size = fep->rx_ring_size;
|
||||
ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
|
||||
}
|
||||
|
||||
if (fep->bufdesc_ex)
|
||||
return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
|
||||
ex_base : ex_new_bd);
|
||||
else
|
||||
return bdp + 1;
|
||||
return (new_bd >= (base + ring_size)) ?
|
||||
base : new_bd;
|
||||
}
|
||||
|
||||
static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex)
|
||||
static inline
|
||||
struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
|
||||
{
|
||||
struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
|
||||
if (is_ex)
|
||||
return (struct bufdesc *)(ex - 1);
|
||||
struct bufdesc *new_bd = bdp - 1;
|
||||
struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
|
||||
struct bufdesc_ex *ex_base;
|
||||
struct bufdesc *base;
|
||||
int ring_size;
|
||||
|
||||
if (bdp >= fep->tx_bd_base) {
|
||||
base = fep->tx_bd_base;
|
||||
ring_size = fep->tx_ring_size;
|
||||
ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
|
||||
} else {
|
||||
base = fep->rx_bd_base;
|
||||
ring_size = fep->rx_ring_size;
|
||||
ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
|
||||
}
|
||||
|
||||
if (fep->bufdesc_ex)
|
||||
return (struct bufdesc *)((ex_new_bd < ex_base) ?
|
||||
(ex_new_bd + ring_size) : ex_new_bd);
|
||||
else
|
||||
return bdp - 1;
|
||||
return (new_bd < base) ? (new_bd + ring_size) : new_bd;
|
||||
}
|
||||
|
||||
static void *swap_buffer(void *bufaddr, int len)
|
||||
|
@ -379,7 +414,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
}
|
||||
}
|
||||
|
||||
bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
|
||||
bdp_pre = fec_enet_get_prevdesc(bdp, fep);
|
||||
if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
|
||||
!(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
|
||||
fep->delay_work.trig_tx = true;
|
||||
|
@ -388,10 +423,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
}
|
||||
|
||||
/* If this was the last BD in the ring, start at the beginning again. */
|
||||
if (status & BD_ENET_TX_WRAP)
|
||||
bdp = fep->tx_bd_base;
|
||||
else
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep);
|
||||
|
||||
fep->cur_tx = bdp;
|
||||
|
||||
|
@ -416,18 +448,18 @@ static void fec_enet_bd_init(struct net_device *dev)
|
|||
|
||||
/* Initialize the receive buffer descriptors. */
|
||||
bdp = fep->rx_bd_base;
|
||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||
for (i = 0; i < fep->rx_ring_size; i++) {
|
||||
|
||||
/* Initialize the BD for every fragment in the page. */
|
||||
if (bdp->cbd_bufaddr)
|
||||
bdp->cbd_sc = BD_ENET_RX_EMPTY;
|
||||
else
|
||||
bdp->cbd_sc = 0;
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep);
|
||||
}
|
||||
|
||||
/* Set the last buffer to wrap */
|
||||
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_prevdesc(bdp, fep);
|
||||
bdp->cbd_sc |= BD_SC_WRAP;
|
||||
|
||||
fep->cur_rx = fep->rx_bd_base;
|
||||
|
@ -435,7 +467,7 @@ static void fec_enet_bd_init(struct net_device *dev)
|
|||
/* ...and the same for transmit */
|
||||
bdp = fep->tx_bd_base;
|
||||
fep->cur_tx = bdp;
|
||||
for (i = 0; i < TX_RING_SIZE; i++) {
|
||||
for (i = 0; i < fep->tx_ring_size; i++) {
|
||||
|
||||
/* Initialize the BD for every fragment in the page. */
|
||||
bdp->cbd_sc = 0;
|
||||
|
@ -444,11 +476,11 @@ static void fec_enet_bd_init(struct net_device *dev)
|
|||
fep->tx_skbuff[i] = NULL;
|
||||
}
|
||||
bdp->cbd_bufaddr = 0;
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep);
|
||||
}
|
||||
|
||||
/* Set the last buffer to wrap */
|
||||
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_prevdesc(bdp, fep);
|
||||
bdp->cbd_sc |= BD_SC_WRAP;
|
||||
fep->dirty_tx = bdp;
|
||||
}
|
||||
|
@ -509,10 +541,10 @@ fec_restart(struct net_device *ndev, int duplex)
|
|||
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
|
||||
if (fep->bufdesc_ex)
|
||||
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
|
||||
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
|
||||
* fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
|
||||
else
|
||||
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
|
||||
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
|
||||
* fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
|
||||
|
||||
|
||||
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
|
||||
|
@ -726,10 +758,7 @@ fec_enet_tx(struct net_device *ndev)
|
|||
bdp = fep->dirty_tx;
|
||||
|
||||
/* get next bdp of dirty_tx */
|
||||
if (bdp->cbd_sc & BD_ENET_TX_WRAP)
|
||||
bdp = fep->tx_bd_base;
|
||||
else
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep);
|
||||
|
||||
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
|
||||
|
||||
|
@ -799,10 +828,7 @@ fec_enet_tx(struct net_device *ndev)
|
|||
fep->dirty_tx = bdp;
|
||||
|
||||
/* Update pointer to next buffer descriptor to be transmitted */
|
||||
if (status & BD_ENET_TX_WRAP)
|
||||
bdp = fep->tx_bd_base;
|
||||
else
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep);
|
||||
|
||||
/* Since we have freed up a buffer, the ring is no longer full
|
||||
*/
|
||||
|
@ -970,8 +996,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
|
|||
htons(ETH_P_8021Q),
|
||||
vlan_tag);
|
||||
|
||||
if (!skb_defer_rx_timestamp(skb))
|
||||
napi_gro_receive(&fep->napi, skb);
|
||||
napi_gro_receive(&fep->napi, skb);
|
||||
}
|
||||
|
||||
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
|
||||
|
@ -993,10 +1018,8 @@ fec_enet_rx(struct net_device *ndev, int budget)
|
|||
}
|
||||
|
||||
/* Update BD pointer to next entry */
|
||||
if (status & BD_ENET_RX_WRAP)
|
||||
bdp = fep->rx_bd_base;
|
||||
else
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep);
|
||||
|
||||
/* Doing this here will keep the FEC running while we process
|
||||
* incoming frames. On a heavily loaded network, we should be
|
||||
* able to keep up at the expense of system resources.
|
||||
|
@ -1662,7 +1685,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
|
|||
struct bufdesc *bdp;
|
||||
|
||||
bdp = fep->rx_bd_base;
|
||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||
for (i = 0; i < fep->rx_ring_size; i++) {
|
||||
skb = fep->rx_skbuff[i];
|
||||
|
||||
if (bdp->cbd_bufaddr)
|
||||
|
@ -1670,11 +1693,11 @@ static void fec_enet_free_buffers(struct net_device *ndev)
|
|||
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
|
||||
if (skb)
|
||||
dev_kfree_skb(skb);
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep);
|
||||
}
|
||||
|
||||
bdp = fep->tx_bd_base;
|
||||
for (i = 0; i < TX_RING_SIZE; i++)
|
||||
for (i = 0; i < fep->tx_ring_size; i++)
|
||||
kfree(fep->tx_bounce[i]);
|
||||
}
|
||||
|
||||
|
@ -1686,7 +1709,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
|
|||
struct bufdesc *bdp;
|
||||
|
||||
bdp = fep->rx_bd_base;
|
||||
for (i = 0; i < RX_RING_SIZE; i++) {
|
||||
for (i = 0; i < fep->rx_ring_size; i++) {
|
||||
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
|
||||
if (!skb) {
|
||||
fec_enet_free_buffers(ndev);
|
||||
|
@ -1703,15 +1726,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
|
|||
ebdp->cbd_esc = BD_ENET_RX_INT;
|
||||
}
|
||||
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep);
|
||||
}
|
||||
|
||||
/* Set the last buffer to wrap. */
|
||||
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_prevdesc(bdp, fep);
|
||||
bdp->cbd_sc |= BD_SC_WRAP;
|
||||
|
||||
bdp = fep->tx_bd_base;
|
||||
for (i = 0; i < TX_RING_SIZE; i++) {
|
||||
for (i = 0; i < fep->tx_ring_size; i++) {
|
||||
fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
|
||||
|
||||
bdp->cbd_sc = 0;
|
||||
|
@ -1722,11 +1745,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
|
|||
ebdp->cbd_esc = BD_ENET_TX_INT;
|
||||
}
|
||||
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_nextdesc(bdp, fep);
|
||||
}
|
||||
|
||||
/* Set the last buffer to wrap. */
|
||||
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
|
||||
bdp = fec_enet_get_prevdesc(bdp, fep);
|
||||
bdp->cbd_sc |= BD_SC_WRAP;
|
||||
|
||||
return 0;
|
||||
|
@ -1966,13 +1989,17 @@ static int fec_enet_init(struct net_device *ndev)
|
|||
/* Get the Ethernet address */
|
||||
fec_get_mac(ndev);
|
||||
|
||||
/* init the tx & rx ring size */
|
||||
fep->tx_ring_size = TX_RING_SIZE;
|
||||
fep->rx_ring_size = RX_RING_SIZE;
|
||||
|
||||
/* Set receive and transmit descriptor base. */
|
||||
fep->rx_bd_base = cbd_base;
|
||||
if (fep->bufdesc_ex)
|
||||
fep->tx_bd_base = (struct bufdesc *)
|
||||
(((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE);
|
||||
(((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
|
||||
else
|
||||
fep->tx_bd_base = cbd_base + RX_RING_SIZE;
|
||||
fep->tx_bd_base = cbd_base + fep->rx_ring_size;
|
||||
|
||||
/* The FEC Ethernet specific entries in the device structure */
|
||||
ndev->watchdog_timeo = TX_TIMEOUT;
|
||||
|
|
|
@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev,
|
|||
jwrite32(jme, JME_APMC, apmc);
|
||||
}
|
||||
|
||||
NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
|
||||
NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT)
|
||||
|
||||
spin_lock_init(&jme->phy_lock);
|
||||
spin_lock_init(&jme->macaddr_lock);
|
||||
|
|
|
@ -138,7 +138,9 @@
|
|||
#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
|
||||
#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
|
||||
#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
|
||||
#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
|
||||
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
|
||||
#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
|
||||
#define MVNETA_MIB_COUNTERS_BASE 0x3080
|
||||
#define MVNETA_MIB_LATE_COLLISION 0x7c
|
||||
#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
|
||||
|
@ -948,6 +950,13 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
|
|||
/* Assign port SDMA configuration */
|
||||
mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
|
||||
|
||||
/* Disable PHY polling in hardware, since we're using the
|
||||
* kernel phylib to do this.
|
||||
*/
|
||||
val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
|
||||
val &= ~MVNETA_PHY_POLLING_ENABLE;
|
||||
mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
|
||||
|
||||
mvneta_set_ucast_table(pp, -1);
|
||||
mvneta_set_special_mcast_table(pp, -1);
|
||||
mvneta_set_other_mcast_table(pp, -1);
|
||||
|
@ -2340,7 +2349,9 @@ static void mvneta_adjust_link(struct net_device *ndev)
|
|||
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
|
||||
val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
|
||||
MVNETA_GMAC_CONFIG_GMII_SPEED |
|
||||
MVNETA_GMAC_CONFIG_FULL_DUPLEX);
|
||||
MVNETA_GMAC_CONFIG_FULL_DUPLEX |
|
||||
MVNETA_GMAC_AN_SPEED_EN |
|
||||
MVNETA_GMAC_AN_DUPLEX_EN);
|
||||
|
||||
if (phydev->duplex)
|
||||
val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
|
||||
|
@ -2473,6 +2484,21 @@ static int mvneta_stop(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
||||
{
|
||||
struct mvneta_port *pp = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
if (!pp->phy_dev)
|
||||
return -ENOTSUPP;
|
||||
|
||||
ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
|
||||
if (!ret)
|
||||
mvneta_adjust_link(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Ethtool methods */
|
||||
|
||||
/* Get settings (phy address, speed) for ethtools */
|
||||
|
@ -2591,6 +2617,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
|
|||
.ndo_change_mtu = mvneta_change_mtu,
|
||||
.ndo_tx_timeout = mvneta_tx_timeout,
|
||||
.ndo_get_stats64 = mvneta_get_stats64,
|
||||
.ndo_do_ioctl = mvneta_ioctl,
|
||||
};
|
||||
|
||||
const struct ethtool_ops mvneta_eth_tool_ops = {
|
||||
|
|
|
@ -1171,7 +1171,6 @@ typedef struct {
|
|||
|
||||
#define NETXEN_DB_MAPSIZE_BYTES 0x1000
|
||||
|
||||
#define NETXEN_NETDEV_WEIGHT 128
|
||||
#define NETXEN_ADAPTER_UP_MAGIC 777
|
||||
#define NETXEN_NIC_PEG_TUNE 0
|
||||
|
||||
|
|
|
@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
|
|||
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
|
||||
sds_ring = &recv_ctx->sds_rings[ring];
|
||||
netif_napi_add(netdev, &sds_ring->napi,
|
||||
netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
|
||||
netxen_nic_poll, NAPI_POLL_WEIGHT);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1348,7 +1348,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|||
DMA_FROM_DEVICE);
|
||||
skb_put(skb, pkt_len);
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
netif_rx(skb);
|
||||
netif_receive_skb(skb);
|
||||
ndev->stats.rx_packets++;
|
||||
ndev->stats.rx_bytes += pkt_len;
|
||||
}
|
||||
|
@ -1906,11 +1906,13 @@ static int sh_eth_open(struct net_device *ndev)
|
|||
|
||||
pm_runtime_get_sync(&mdp->pdev->dev);
|
||||
|
||||
napi_enable(&mdp->napi);
|
||||
|
||||
ret = request_irq(ndev->irq, sh_eth_interrupt,
|
||||
mdp->cd->irq_flags, ndev->name, ndev);
|
||||
if (ret) {
|
||||
dev_err(&ndev->dev, "Can not assign IRQ number\n");
|
||||
return ret;
|
||||
goto out_napi_off;
|
||||
}
|
||||
|
||||
/* Descriptor set */
|
||||
|
@ -1928,12 +1930,12 @@ static int sh_eth_open(struct net_device *ndev)
|
|||
if (ret)
|
||||
goto out_free_irq;
|
||||
|
||||
napi_enable(&mdp->napi);
|
||||
|
||||
return ret;
|
||||
|
||||
out_free_irq:
|
||||
free_irq(ndev->irq, ndev);
|
||||
out_napi_off:
|
||||
napi_disable(&mdp->napi);
|
||||
pm_runtime_put_sync(&mdp->pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2025,8 +2027,6 @@ static int sh_eth_close(struct net_device *ndev)
|
|||
{
|
||||
struct sh_eth_private *mdp = netdev_priv(ndev);
|
||||
|
||||
napi_disable(&mdp->napi);
|
||||
|
||||
netif_stop_queue(ndev);
|
||||
|
||||
/* Disable interrupts by clearing the interrupt mask. */
|
||||
|
@ -2044,6 +2044,8 @@ static int sh_eth_close(struct net_device *ndev)
|
|||
|
||||
free_irq(ndev->irq, ndev);
|
||||
|
||||
napi_disable(&mdp->napi);
|
||||
|
||||
/* Free all the skbuffs in the Rx queue. */
|
||||
sh_eth_ring_free(ndev);
|
||||
|
||||
|
|
|
@ -71,19 +71,22 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
|
|||
plat->force_sf_dma_mode = 1;
|
||||
}
|
||||
|
||||
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
|
||||
if (!dma_cfg)
|
||||
return -ENOMEM;
|
||||
|
||||
plat->dma_cfg = dma_cfg;
|
||||
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
|
||||
dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
|
||||
dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
|
||||
if (of_find_property(np, "snps,pbl", NULL)) {
|
||||
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
|
||||
GFP_KERNEL);
|
||||
if (!dma_cfg)
|
||||
return -ENOMEM;
|
||||
plat->dma_cfg = dma_cfg;
|
||||
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
|
||||
dma_cfg->fixed_burst =
|
||||
of_property_read_bool(np, "snps,fixed-burst");
|
||||
dma_cfg->mixed_burst =
|
||||
of_property_read_bool(np, "snps,mixed-burst");
|
||||
}
|
||||
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
|
||||
if (plat->force_thresh_dma_mode) {
|
||||
plat->force_sf_dma_mode = 0;
|
||||
pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
|
|||
{
|
||||
netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
|
||||
/* NAPI */
|
||||
netif_napi_add(netdev, napi,
|
||||
gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
|
||||
netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT);
|
||||
netdev->ethtool_ops = &gelic_ether_ethtool_ops;
|
||||
netdev->netdev_ops = &gelic_netdevice_ops;
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#define GELIC_NET_RXBUF_ALIGN 128
|
||||
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
|
||||
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
|
||||
#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS)
|
||||
#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL
|
||||
|
||||
#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */
|
||||
|
|
|
@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
|
|||
printk(KERN_WARNING "Setting MDIO clock divisor to "
|
||||
"default %d\n", DEFAULT_CLOCK_DIVISOR);
|
||||
clk_div = DEFAULT_CLOCK_DIVISOR;
|
||||
of_node_put(np1);
|
||||
goto issue;
|
||||
}
|
||||
|
||||
|
|
|
@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {
|
|||
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
|
||||
},
|
||||
/* HP hs2434 Mobile Broadband Module needs ZLPs */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
|
||||
},
|
||||
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&cdc_mbim_info,
|
||||
},
|
||||
|
|
|
@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
|
|||
|
||||
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
|
||||
zfcp_erp_action_dismiss(&port->erp_action);
|
||||
else
|
||||
shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
else {
|
||||
spin_lock(port->adapter->scsi_host->host_lock);
|
||||
__shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
if (sdev_to_zfcp(sdev)->port == port)
|
||||
zfcp_erp_action_dismiss_lun(sdev);
|
||||
spin_unlock(port->adapter->scsi_host->host_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
|
||||
|
@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
|
|||
{
|
||||
struct scsi_device *sdev;
|
||||
|
||||
shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
spin_lock(port->adapter->scsi_host->host_lock);
|
||||
__shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
if (sdev_to_zfcp(sdev)->port == port)
|
||||
_zfcp_erp_lun_reopen(sdev, clear, id, 0);
|
||||
spin_unlock(port->adapter->scsi_host->host_lock);
|
||||
}
|
||||
|
||||
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
|
||||
|
@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
|
|||
atomic_set_mask(common_mask, &port->status);
|
||||
read_unlock_irqrestore(&adapter->port_list_lock, flags);
|
||||
|
||||
shost_for_each_device(sdev, adapter->scsi_host)
|
||||
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
|
||||
__shost_for_each_device(sdev, adapter->scsi_host)
|
||||
atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
|
||||
spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
|
|||
}
|
||||
read_unlock_irqrestore(&adapter->port_list_lock, flags);
|
||||
|
||||
shost_for_each_device(sdev, adapter->scsi_host) {
|
||||
spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
|
||||
__shost_for_each_device(sdev, adapter->scsi_host) {
|
||||
atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
|
||||
if (clear_counter)
|
||||
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
|
||||
}
|
||||
spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
|
|||
{
|
||||
struct scsi_device *sdev;
|
||||
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
|
||||
unsigned long flags;
|
||||
|
||||
atomic_set_mask(mask, &port->status);
|
||||
|
||||
if (!common_mask)
|
||||
return;
|
||||
|
||||
shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
|
||||
__shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
if (sdev_to_zfcp(sdev)->port == port)
|
||||
atomic_set_mask(common_mask,
|
||||
&sdev_to_zfcp(sdev)->status);
|
||||
spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
|
|||
struct scsi_device *sdev;
|
||||
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
|
||||
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
|
||||
unsigned long flags;
|
||||
|
||||
atomic_clear_mask(mask, &port->status);
|
||||
|
||||
|
@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
|
|||
if (clear_counter)
|
||||
atomic_set(&port->erp_counter, 0);
|
||||
|
||||
shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
|
||||
__shost_for_each_device(sdev, port->adapter->scsi_host)
|
||||
if (sdev_to_zfcp(sdev)->port == port) {
|
||||
atomic_clear_mask(common_mask,
|
||||
&sdev_to_zfcp(sdev)->status);
|
||||
if (clear_counter)
|
||||
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
|
||||
}
|
||||
spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
|
|||
|
||||
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
|
||||
{
|
||||
spin_lock_irq(&qdio->req_q_lock);
|
||||
if (atomic_read(&qdio->req_q_free) ||
|
||||
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
|
||||
return 1;
|
||||
spin_unlock_irq(&qdio->req_q_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
|
|||
{
|
||||
long ret;
|
||||
|
||||
spin_unlock_irq(&qdio->req_q_lock);
|
||||
ret = wait_event_interruptible_timeout(qdio->req_q_wq,
|
||||
zfcp_qdio_sbal_check(qdio), 5 * HZ);
|
||||
ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
|
||||
zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
|
||||
|
||||
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
|
||||
return -EIO;
|
||||
|
@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
|
|||
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
|
||||
}
|
||||
|
||||
spin_lock_irq(&qdio->req_q_lock);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
|
|||
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
|
||||
zfcp_sysfs_##_feat##_##_name##_show, NULL);
|
||||
|
||||
#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
|
||||
static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
|
||||
struct device_attribute *at,\
|
||||
char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, _format, _value); \
|
||||
} \
|
||||
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
|
||||
zfcp_sysfs_##_feat##_##_name##_show, NULL);
|
||||
|
||||
#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
|
||||
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
|
||||
struct device_attribute *at,\
|
||||
|
@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
|
|||
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
|
||||
(zfcp_unit_sdev_status(unit) &
|
||||
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
|
||||
ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
|
||||
ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
|
||||
|
||||
static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
|
@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = {
|
|||
&dev_attr_unit_in_recovery.attr,
|
||||
&dev_attr_unit_status.attr,
|
||||
&dev_attr_unit_access_denied.attr,
|
||||
&dev_attr_unit_access_shared.attr,
|
||||
&dev_attr_unit_access_readonly.attr,
|
||||
NULL
|
||||
};
|
||||
static struct attribute_group zfcp_unit_attr_group = {
|
||||
|
|
|
@ -1353,7 +1353,6 @@ config SCSI_LPFC
|
|||
tristate "Emulex LightPulse Fibre Channel Support"
|
||||
depends on PCI && SCSI
|
||||
select SCSI_FC_ATTRS
|
||||
select GENERIC_CSUM
|
||||
select CRC_T10DIF
|
||||
help
|
||||
This lpfc driver supports the Emulex LightPulse
|
||||
|
|
|
@ -482,7 +482,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
|
|||
ret = comedi_device_postconfig(dev);
|
||||
if (ret < 0) {
|
||||
comedi_device_detach(dev);
|
||||
module_put(dev->driver->module);
|
||||
module_put(driv->module);
|
||||
}
|
||||
/* On success, the driver module count has been incremented. */
|
||||
return ret;
|
||||
|
|
|
@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv)
|
|||
|
||||
pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno);
|
||||
|
||||
/* Try for up to 200s */
|
||||
for (timeout = 0; timeout < 20; timeout++) {
|
||||
/* Try for up to 400ms */
|
||||
for (timeout = 0; timeout < 40; timeout++) {
|
||||
if (pv->established)
|
||||
goto established;
|
||||
if (!hvsi_get_packet(pv))
|
||||
|
|
|
@ -304,6 +304,13 @@ static int __init ohci_pci_init(void)
|
|||
pr_info("%s: " DRIVER_DESC "\n", hcd_name);
|
||||
|
||||
ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/* Entries for the PCI suspend/resume callbacks are special */
|
||||
ohci_pci_hc_driver.pci_suspend = ohci_suspend;
|
||||
ohci_pci_hc_driver.pci_resume = ohci_resume;
|
||||
#endif
|
||||
|
||||
return pci_register_driver(&ohci_pci_driver);
|
||||
}
|
||||
module_init(ohci_pci_init);
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#include "otg_fsm.h"
|
||||
#include "phy-fsm-usb.h"
|
||||
#include <linux/usb/otg.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include <linux/usb/gadget.h>
|
||||
#include <linux/usb/otg.h>
|
||||
|
||||
#include "phy-otg-fsm.h"
|
||||
#include "phy-fsm-usb.h"
|
||||
|
||||
/* Change USB protocol when there is a protocol change */
|
||||
static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
|
||||
|
|
|
@ -40,7 +40,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
|
|||
int block, off;
|
||||
|
||||
inode = iget_locked(sb, ino);
|
||||
if (IS_ERR(inode))
|
||||
if (!inode)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (!(inode->i_state & I_NEW))
|
||||
return inode;
|
||||
|
|
20
fs/bio.c
20
fs/bio.c
|
@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
|
|||
int bio_uncopy_user(struct bio *bio)
|
||||
{
|
||||
struct bio_map_data *bmd = bio->bi_private;
|
||||
int ret = 0;
|
||||
struct bio_vec *bvec;
|
||||
int ret = 0, i;
|
||||
|
||||
if (!bio_flagged(bio, BIO_NULL_MAPPED))
|
||||
ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
|
||||
bmd->nr_sgvecs, bio_data_dir(bio) == READ,
|
||||
0, bmd->is_our_pages);
|
||||
if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
|
||||
/*
|
||||
* if we're in a workqueue, the request is orphaned, so
|
||||
* don't copy into a random user address space, just free.
|
||||
*/
|
||||
if (current->mm)
|
||||
ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
|
||||
bmd->nr_sgvecs, bio_data_dir(bio) == READ,
|
||||
0, bmd->is_our_pages);
|
||||
else if (bmd->is_our_pages)
|
||||
bio_for_each_segment_all(bvec, bio, i)
|
||||
__free_page(bvec->bv_page);
|
||||
}
|
||||
bio_free_map_data(bmd);
|
||||
bio_put(bio);
|
||||
return ret;
|
||||
|
|
68
fs/dcache.c
68
fs/dcache.c
|
@ -229,7 +229,7 @@ static void __d_free(struct rcu_head *head)
|
|||
*/
|
||||
static void d_free(struct dentry *dentry)
|
||||
{
|
||||
BUG_ON(dentry->d_count);
|
||||
BUG_ON(dentry->d_lockref.count);
|
||||
this_cpu_dec(nr_dentry);
|
||||
if (dentry->d_op && dentry->d_op->d_release)
|
||||
dentry->d_op->d_release(dentry);
|
||||
|
@ -467,7 +467,7 @@ static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
|
|||
}
|
||||
|
||||
if (ref)
|
||||
dentry->d_count--;
|
||||
dentry->d_lockref.count--;
|
||||
/*
|
||||
* inform the fs via d_prune that this dentry is about to be
|
||||
* unhashed and destroyed.
|
||||
|
@ -513,15 +513,10 @@ void dput(struct dentry *dentry)
|
|||
return;
|
||||
|
||||
repeat:
|
||||
if (dentry->d_count == 1)
|
||||
if (dentry->d_lockref.count == 1)
|
||||
might_sleep();
|
||||
spin_lock(&dentry->d_lock);
|
||||
BUG_ON(!dentry->d_count);
|
||||
if (dentry->d_count > 1) {
|
||||
dentry->d_count--;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
if (lockref_put_or_lock(&dentry->d_lockref))
|
||||
return;
|
||||
}
|
||||
|
||||
if (dentry->d_flags & DCACHE_OP_DELETE) {
|
||||
if (dentry->d_op->d_delete(dentry))
|
||||
|
@ -535,7 +530,7 @@ void dput(struct dentry *dentry)
|
|||
dentry->d_flags |= DCACHE_REFERENCED;
|
||||
dentry_lru_add(dentry);
|
||||
|
||||
dentry->d_count--;
|
||||
dentry->d_lockref.count--;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
return;
|
||||
|
||||
|
@ -590,7 +585,7 @@ int d_invalidate(struct dentry * dentry)
|
|||
* We also need to leave mountpoints alone,
|
||||
* directory or not.
|
||||
*/
|
||||
if (dentry->d_count > 1 && dentry->d_inode) {
|
||||
if (dentry->d_lockref.count > 1 && dentry->d_inode) {
|
||||
if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
return -EBUSY;
|
||||
|
@ -606,14 +601,12 @@ EXPORT_SYMBOL(d_invalidate);
|
|||
/* This must be called with d_lock held */
|
||||
static inline void __dget_dlock(struct dentry *dentry)
|
||||
{
|
||||
dentry->d_count++;
|
||||
dentry->d_lockref.count++;
|
||||
}
|
||||
|
||||
static inline void __dget(struct dentry *dentry)
|
||||
{
|
||||
spin_lock(&dentry->d_lock);
|
||||
__dget_dlock(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
lockref_get(&dentry->d_lockref);
|
||||
}
|
||||
|
||||
struct dentry *dget_parent(struct dentry *dentry)
|
||||
|
@ -634,8 +627,8 @@ struct dentry *dget_parent(struct dentry *dentry)
|
|||
goto repeat;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
BUG_ON(!ret->d_count);
|
||||
ret->d_count++;
|
||||
BUG_ON(!ret->d_lockref.count);
|
||||
ret->d_lockref.count++;
|
||||
spin_unlock(&ret->d_lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -718,7 +711,7 @@ void d_prune_aliases(struct inode *inode)
|
|||
spin_lock(&inode->i_lock);
|
||||
hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (!dentry->d_count) {
|
||||
if (!dentry->d_lockref.count) {
|
||||
__dget_dlock(dentry);
|
||||
__d_drop(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
|
@ -763,12 +756,8 @@ static void try_prune_one_dentry(struct dentry *dentry)
|
|||
/* Prune ancestors. */
|
||||
dentry = parent;
|
||||
while (dentry) {
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (dentry->d_count > 1) {
|
||||
dentry->d_count--;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
if (lockref_put_or_lock(&dentry->d_lockref))
|
||||
return;
|
||||
}
|
||||
dentry = dentry_kill(dentry, 1);
|
||||
}
|
||||
}
|
||||
|
@ -793,7 +782,7 @@ static void shrink_dentry_list(struct list_head *list)
|
|||
* the LRU because of laziness during lookup. Do not free
|
||||
* it - just keep it off the LRU list.
|
||||
*/
|
||||
if (dentry->d_count) {
|
||||
if (dentry->d_lockref.count) {
|
||||
dentry_lru_del(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
continue;
|
||||
|
@ -913,7 +902,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|||
dentry_lru_del(dentry);
|
||||
__d_shrink(dentry);
|
||||
|
||||
if (dentry->d_count != 0) {
|
||||
if (dentry->d_lockref.count != 0) {
|
||||
printk(KERN_ERR
|
||||
"BUG: Dentry %p{i=%lx,n=%s}"
|
||||
" still in use (%d)"
|
||||
|
@ -922,7 +911,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|||
dentry->d_inode ?
|
||||
dentry->d_inode->i_ino : 0UL,
|
||||
dentry->d_name.name,
|
||||
dentry->d_count,
|
||||
dentry->d_lockref.count,
|
||||
dentry->d_sb->s_type->name,
|
||||
dentry->d_sb->s_id);
|
||||
BUG();
|
||||
|
@ -933,7 +922,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
|
|||
list_del(&dentry->d_u.d_child);
|
||||
} else {
|
||||
parent = dentry->d_parent;
|
||||
parent->d_count--;
|
||||
parent->d_lockref.count--;
|
||||
list_del(&dentry->d_u.d_child);
|
||||
}
|
||||
|
||||
|
@ -981,7 +970,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
|
|||
|
||||
dentry = sb->s_root;
|
||||
sb->s_root = NULL;
|
||||
dentry->d_count--;
|
||||
dentry->d_lockref.count--;
|
||||
shrink_dcache_for_umount_subtree(dentry);
|
||||
|
||||
while (!hlist_bl_empty(&sb->s_anon)) {
|
||||
|
@ -1147,7 +1136,7 @@ static int select_parent(struct dentry *parent, struct list_head *dispose)
|
|||
* loop in shrink_dcache_parent() might not make any progress
|
||||
* and loop forever.
|
||||
*/
|
||||
if (dentry->d_count) {
|
||||
if (dentry->d_lockref.count) {
|
||||
dentry_lru_del(dentry);
|
||||
} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
|
||||
dentry_lru_move_list(dentry, dispose);
|
||||
|
@ -1269,7 +1258,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
|
|||
smp_wmb();
|
||||
dentry->d_name.name = dname;
|
||||
|
||||
dentry->d_count = 1;
|
||||
dentry->d_lockref.count = 1;
|
||||
dentry->d_flags = 0;
|
||||
spin_lock_init(&dentry->d_lock);
|
||||
seqcount_init(&dentry->d_seq);
|
||||
|
@ -1970,7 +1959,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
|
|||
goto next;
|
||||
}
|
||||
|
||||
dentry->d_count++;
|
||||
dentry->d_lockref.count++;
|
||||
found = dentry;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
break;
|
||||
|
@ -2069,7 +2058,7 @@ void d_delete(struct dentry * dentry)
|
|||
spin_lock(&dentry->d_lock);
|
||||
inode = dentry->d_inode;
|
||||
isdir = S_ISDIR(inode->i_mode);
|
||||
if (dentry->d_count == 1) {
|
||||
if (dentry->d_lockref.count == 1) {
|
||||
if (!spin_trylock(&inode->i_lock)) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
cpu_relax();
|
||||
|
@ -2724,6 +2713,17 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
|
|||
return memcpy(buffer, temp, sz);
|
||||
}
|
||||
|
||||
char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
|
||||
{
|
||||
char *end = buffer + buflen;
|
||||
/* these dentries are never renamed, so d_lock is not needed */
|
||||
if (prepend(&end, &buflen, " (deleted)", 11) ||
|
||||
prepend_name(&end, &buflen, &dentry->d_name) ||
|
||||
prepend(&end, &buflen, "/", 1))
|
||||
end = ERR_PTR(-ENAMETOOLONG);
|
||||
return end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write full pathname from the root of the filesystem into the buffer.
|
||||
*/
|
||||
|
@ -2937,7 +2937,7 @@ void d_genocide(struct dentry *root)
|
|||
}
|
||||
if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
|
||||
dentry->d_flags |= DCACHE_GENOCIDE;
|
||||
dentry->d_count--;
|
||||
dentry->d_lockref.count--;
|
||||
}
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
|
@ -2945,7 +2945,7 @@ void d_genocide(struct dentry *root)
|
|||
struct dentry *child = this_parent;
|
||||
if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
|
||||
this_parent->d_flags |= DCACHE_GENOCIDE;
|
||||
this_parent->d_count--;
|
||||
this_parent->d_lockref.count--;
|
||||
}
|
||||
this_parent = try_to_ascend(this_parent, locked, seq);
|
||||
if (!this_parent)
|
||||
|
|
|
@ -57,7 +57,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
|
|||
struct inode *inode;
|
||||
|
||||
inode = iget_locked(super, ino);
|
||||
if (IS_ERR(inode))
|
||||
if (!inode)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (!(inode->i_state & I_NEW))
|
||||
return inode;
|
||||
|
|
|
@ -926,14 +926,8 @@ static int get_hstate_idx(int page_size_log)
|
|||
return h - hstates;
|
||||
}
|
||||
|
||||
static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen)
|
||||
{
|
||||
return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
|
||||
dentry->d_name.name);
|
||||
}
|
||||
|
||||
static struct dentry_operations anon_ops = {
|
||||
.d_dname = hugetlb_dname
|
||||
.d_dname = simple_dname
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -3047,6 +3047,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
|
||||
dir_index = (u32) ctx->pos;
|
||||
|
||||
/*
|
||||
* NFSv4 reserves cookies 1 and 2 for . and .. so the value
|
||||
* we return to the vfs is one greater than the one we use
|
||||
* internally.
|
||||
*/
|
||||
if (dir_index)
|
||||
dir_index--;
|
||||
|
||||
if (dir_index > 1) {
|
||||
struct dir_table_slot dirtab_slot;
|
||||
|
||||
|
@ -3086,7 +3094,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
if (p->header.flag & BT_INTERNAL) {
|
||||
jfs_err("jfs_readdir: bad index table");
|
||||
DT_PUTPAGE(mp);
|
||||
ctx->pos = -1;
|
||||
ctx->pos = DIREND;
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
|
@ -3094,14 +3102,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
/*
|
||||
* self "."
|
||||
*/
|
||||
ctx->pos = 0;
|
||||
ctx->pos = 1;
|
||||
if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* parent ".."
|
||||
*/
|
||||
ctx->pos = 1;
|
||||
ctx->pos = 2;
|
||||
if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
|
||||
return 0;
|
||||
|
||||
|
@ -3122,22 +3130,23 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
/*
|
||||
* Legacy filesystem - OS/2 & Linux JFS < 0.3.6
|
||||
*
|
||||
* pn = index = 0: First entry "."
|
||||
* pn = 0; index = 1: Second entry ".."
|
||||
* pn = 0; index = 1: First entry "."
|
||||
* pn = 0; index = 2: Second entry ".."
|
||||
* pn > 0: Real entries, pn=1 -> leftmost page
|
||||
* pn = index = -1: No more entries
|
||||
*/
|
||||
dtpos = ctx->pos;
|
||||
if (dtpos == 0) {
|
||||
if (dtpos < 2) {
|
||||
/* build "." entry */
|
||||
ctx->pos = 1;
|
||||
if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
|
||||
return 0;
|
||||
dtoffset->index = 1;
|
||||
dtoffset->index = 2;
|
||||
ctx->pos = dtpos;
|
||||
}
|
||||
|
||||
if (dtoffset->pn == 0) {
|
||||
if (dtoffset->index == 1) {
|
||||
if (dtoffset->index == 2) {
|
||||
/* build ".." entry */
|
||||
if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
|
||||
return 0;
|
||||
|
@ -3228,6 +3237,12 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
|
|||
}
|
||||
jfs_dirent->position = unique_pos++;
|
||||
}
|
||||
/*
|
||||
* We add 1 to the index because we may
|
||||
* use a value of 2 internally, and NFSv4
|
||||
* doesn't like that.
|
||||
*/
|
||||
jfs_dirent->position++;
|
||||
} else {
|
||||
jfs_dirent->position = dtpos;
|
||||
len = min(d_namleft, DTLHDRDATALEN_LEGACY);
|
||||
|
|
16
fs/namei.c
16
fs/namei.c
|
@ -536,8 +536,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
|
|||
* a reference at this point.
|
||||
*/
|
||||
BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
|
||||
BUG_ON(!parent->d_count);
|
||||
parent->d_count++;
|
||||
BUG_ON(!parent->d_lockref.count);
|
||||
parent->d_lockref.count++;
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
spin_unlock(&parent->d_lock);
|
||||
|
@ -3327,7 +3327,7 @@ void dentry_unhash(struct dentry *dentry)
|
|||
{
|
||||
shrink_dcache_parent(dentry);
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (dentry->d_count == 1)
|
||||
if (dentry->d_lockref.count == 1)
|
||||
__d_drop(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
|
@ -3671,11 +3671,15 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
|
|||
if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* Using empty names is equivalent to using AT_SYMLINK_FOLLOW
|
||||
* on /proc/self/fd/<fd>.
|
||||
* To use null names we require CAP_DAC_READ_SEARCH
|
||||
* This ensures that not everyone will be able to create
|
||||
* handlink using the passed filedescriptor.
|
||||
*/
|
||||
if (flags & AT_EMPTY_PATH)
|
||||
if (flags & AT_EMPTY_PATH) {
|
||||
if (!capable(CAP_DAC_READ_SEARCH))
|
||||
return -ENOENT;
|
||||
how = LOOKUP_EMPTY;
|
||||
}
|
||||
|
||||
if (flags & AT_SYMLINK_FOLLOW)
|
||||
how |= LOOKUP_FOLLOW;
|
||||
|
|
|
@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path)
|
|||
CL_COPY_ALL | CL_PRIVATE);
|
||||
namespace_unlock();
|
||||
if (IS_ERR(tree))
|
||||
return NULL;
|
||||
return ERR_CAST(tree);
|
||||
return &tree->mnt;
|
||||
}
|
||||
|
||||
|
|
|
@ -1022,7 +1022,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
|
|||
struct inode *inode = NULL;
|
||||
struct ocfs2_super *osb = NULL;
|
||||
struct buffer_head *bh = NULL;
|
||||
char nodestr[8];
|
||||
char nodestr[12];
|
||||
struct ocfs2_blockcheck_stats stats;
|
||||
|
||||
trace_ocfs2_fill_super(sb, data, silent);
|
||||
|
|
|
@ -228,8 +228,6 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
|
|||
if (!p)
|
||||
return -ENOENT;
|
||||
|
||||
if (!dir_emit_dots(file, ctx))
|
||||
goto out;
|
||||
if (!dir_emit_dots(file, ctx))
|
||||
goto out;
|
||||
files = get_files_struct(p);
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/seqlock.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/lockref.h>
|
||||
|
||||
struct nameidata;
|
||||
struct path;
|
||||
|
@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int);
|
|||
# endif
|
||||
#endif
|
||||
|
||||
#define d_lock d_lockref.lock
|
||||
|
||||
struct dentry {
|
||||
/* RCU lookup touched fields */
|
||||
unsigned int d_flags; /* protected by d_lock */
|
||||
|
@ -112,8 +115,7 @@ struct dentry {
|
|||
unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
|
||||
|
||||
/* Ref lookup also touches following */
|
||||
unsigned int d_count; /* protected by d_lock */
|
||||
spinlock_t d_lock; /* per dentry lock */
|
||||
struct lockref d_lockref; /* per-dentry lock and refcount */
|
||||
const struct dentry_operations *d_op;
|
||||
struct super_block *d_sb; /* The root of the dentry tree */
|
||||
unsigned long d_time; /* used by d_revalidate */
|
||||
|
@ -318,7 +320,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
|
|||
assert_spin_locked(&dentry->d_lock);
|
||||
if (!read_seqcount_retry(&dentry->d_seq, seq)) {
|
||||
ret = 1;
|
||||
dentry->d_count++;
|
||||
dentry->d_lockref.count++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -326,7 +328,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
|
|||
|
||||
static inline unsigned d_count(const struct dentry *dentry)
|
||||
{
|
||||
return dentry->d_count;
|
||||
return dentry->d_lockref.count;
|
||||
}
|
||||
|
||||
/* validate "insecure" dentry pointer */
|
||||
|
@ -336,6 +338,7 @@ extern int d_validate(struct dentry *, struct dentry *);
|
|||
* helper function for dentry_operations.d_dname() members
|
||||
*/
|
||||
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
|
||||
extern char *simple_dname(struct dentry *, char *, int);
|
||||
|
||||
extern char *__d_path(const struct path *, const struct path *, char *, int);
|
||||
extern char *d_absolute_path(const struct path *, char *, int);
|
||||
|
@ -356,17 +359,14 @@ extern char *dentry_path(struct dentry *, char *, int);
|
|||
static inline struct dentry *dget_dlock(struct dentry *dentry)
|
||||
{
|
||||
if (dentry)
|
||||
dentry->d_count++;
|
||||
dentry->d_lockref.count++;
|
||||
return dentry;
|
||||
}
|
||||
|
||||
static inline struct dentry *dget(struct dentry *dentry)
|
||||
{
|
||||
if (dentry) {
|
||||
spin_lock(&dentry->d_lock);
|
||||
dget_dlock(dentry);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
}
|
||||
if (dentry)
|
||||
lockref_get(&dentry->d_lockref);
|
||||
return dentry;
|
||||
}
|
||||
|
||||
|
|
71
include/linux/lockref.h
Normal file
71
include/linux/lockref.h
Normal file
|
@ -0,0 +1,71 @@
|
|||
#ifndef __LINUX_LOCKREF_H
|
||||
#define __LINUX_LOCKREF_H
|
||||
|
||||
/*
|
||||
* Locked reference counts.
|
||||
*
|
||||
* These are different from just plain atomic refcounts in that they
|
||||
* are atomic with respect to the spinlock that goes with them. In
|
||||
* particular, there can be implementations that don't actually get
|
||||
* the spinlock for the common decrement/increment operations, but they
|
||||
* still have to check that the operation is done semantically as if
|
||||
* the spinlock had been taken (using a cmpxchg operation that covers
|
||||
* both the lock and the count word, or using memory transactions, for
|
||||
* example).
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct lockref {
|
||||
spinlock_t lock;
|
||||
unsigned int count;
|
||||
};
|
||||
|
||||
/**
|
||||
* lockref_get - Increments reference count unconditionally
|
||||
* @lockcnt: pointer to lockref structure
|
||||
*
|
||||
* This operation is only valid if you already hold a reference
|
||||
* to the object, so you know the count cannot be zero.
|
||||
*/
|
||||
static inline void lockref_get(struct lockref *lockref)
|
||||
{
|
||||
spin_lock(&lockref->lock);
|
||||
lockref->count++;
|
||||
spin_unlock(&lockref->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* lockref_get_not_zero - Increments count unless the count is 0
|
||||
* @lockcnt: pointer to lockref structure
|
||||
* Return: 1 if count updated successfully or 0 if count is 0
|
||||
*/
|
||||
static inline int lockref_get_not_zero(struct lockref *lockref)
|
||||
{
|
||||
int retval = 0;
|
||||
|
||||
spin_lock(&lockref->lock);
|
||||
if (lockref->count) {
|
||||
lockref->count++;
|
||||
retval = 1;
|
||||
}
|
||||
spin_unlock(&lockref->lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* lockref_put_or_lock - decrements count unless count <= 1 before decrement
|
||||
* @lockcnt: pointer to lockref structure
|
||||
* Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
|
||||
*/
|
||||
static inline int lockref_put_or_lock(struct lockref *lockref)
|
||||
{
|
||||
spin_lock(&lockref->lock);
|
||||
if (lockref->count <= 1)
|
||||
return 0;
|
||||
lockref->count--;
|
||||
spin_unlock(&lockref->lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_LOCKREF_H */
|
|
@ -14,6 +14,10 @@ struct fs_struct;
|
|||
* A structure to contain pointers to all per-process
|
||||
* namespaces - fs (mount), uts, network, sysvipc, etc.
|
||||
*
|
||||
* The pid namespace is an exception -- it's accessed using
|
||||
* task_active_pid_ns. The pid namespace here is the
|
||||
* namespace that children will use.
|
||||
*
|
||||
* 'count' is the number of tasks holding a reference.
|
||||
* The count for each namespace, then, will be the number
|
||||
* of nsproxies pointing to it, not the number of tasks.
|
||||
|
@ -27,7 +31,7 @@ struct nsproxy {
|
|||
struct uts_namespace *uts_ns;
|
||||
struct ipc_namespace *ipc_ns;
|
||||
struct mnt_namespace *mnt_ns;
|
||||
struct pid_namespace *pid_ns;
|
||||
struct pid_namespace *pid_ns_for_children;
|
||||
struct net *net_ns;
|
||||
};
|
||||
extern struct nsproxy init_nsproxy;
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/bug.h>
|
||||
|
||||
struct module;
|
||||
struct device;
|
||||
|
|
|
@ -811,6 +811,63 @@ do { \
|
|||
__ret; \
|
||||
})
|
||||
|
||||
#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
|
||||
lock, ret) \
|
||||
do { \
|
||||
DEFINE_WAIT(__wait); \
|
||||
\
|
||||
for (;;) { \
|
||||
prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
|
||||
if (condition) \
|
||||
break; \
|
||||
if (signal_pending(current)) { \
|
||||
ret = -ERESTARTSYS; \
|
||||
break; \
|
||||
} \
|
||||
spin_unlock_irq(&lock); \
|
||||
ret = schedule_timeout(ret); \
|
||||
spin_lock_irq(&lock); \
|
||||
if (!ret) \
|
||||
break; \
|
||||
} \
|
||||
finish_wait(&wq, &__wait); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
|
||||
* The condition is checked under the lock. This is expected
|
||||
* to be called with the lock taken.
|
||||
* @wq: the waitqueue to wait on
|
||||
* @condition: a C expression for the event to wait for
|
||||
* @lock: a locked spinlock_t, which will be released before schedule()
|
||||
* and reacquired afterwards.
|
||||
* @timeout: timeout, in jiffies
|
||||
*
|
||||
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
|
||||
* @condition evaluates to true or signal is received. The @condition is
|
||||
* checked each time the waitqueue @wq is woken up.
|
||||
*
|
||||
* wake_up() has to be called after changing any variable that could
|
||||
* change the result of the wait condition.
|
||||
*
|
||||
* This is supposed to be called while holding the lock. The lock is
|
||||
* dropped before going to sleep and is reacquired afterwards.
|
||||
*
|
||||
* The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
|
||||
* was interrupted by a signal, and the remaining jiffies otherwise
|
||||
* if the condition evaluated to true before the timeout elapsed.
|
||||
*/
|
||||
#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
|
||||
timeout) \
|
||||
({ \
|
||||
int __ret = timeout; \
|
||||
\
|
||||
if (!(condition)) \
|
||||
__wait_event_interruptible_lock_irq_timeout( \
|
||||
wq, condition, lock, __ret); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
|
||||
/*
|
||||
* These are the old interfaces to sleep waiting for an event.
|
||||
|
|
|
@ -123,6 +123,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
|
|||
/* local bh are disabled so it is ok to use _BH */
|
||||
NET_ADD_STATS_BH(sock_net(sk),
|
||||
LINUX_MIB_BUSYPOLLRXPACKETS, rc);
|
||||
cpu_relax();
|
||||
|
||||
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
|
||||
!need_resched() && !busy_loop_timeout(end_time));
|
||||
|
|
|
@ -61,6 +61,7 @@ struct genl_family {
|
|||
struct list_head ops_list; /* private */
|
||||
struct list_head family_list; /* private */
|
||||
struct list_head mcast_groups; /* private */
|
||||
struct module *module;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -121,9 +122,24 @@ struct genl_ops {
|
|||
struct list_head ops_list;
|
||||
};
|
||||
|
||||
extern int genl_register_family(struct genl_family *family);
|
||||
extern int genl_register_family_with_ops(struct genl_family *family,
|
||||
extern int __genl_register_family(struct genl_family *family);
|
||||
|
||||
static inline int genl_register_family(struct genl_family *family)
|
||||
{
|
||||
family->module = THIS_MODULE;
|
||||
return __genl_register_family(family);
|
||||
}
|
||||
|
||||
extern int __genl_register_family_with_ops(struct genl_family *family,
|
||||
struct genl_ops *ops, size_t n_ops);
|
||||
|
||||
static inline int genl_register_family_with_ops(struct genl_family *family,
|
||||
struct genl_ops *ops, size_t n_ops)
|
||||
{
|
||||
family->module = THIS_MODULE;
|
||||
return __genl_register_family_with_ops(family, ops, n_ops);
|
||||
}
|
||||
|
||||
extern int genl_unregister_family(struct genl_family *family);
|
||||
extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
|
||||
extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
|
||||
|
|
|
@ -317,4 +317,12 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
|
|||
return hoplimit;
|
||||
}
|
||||
|
||||
static inline int ip_skb_dst_mtu(struct sk_buff *skb)
|
||||
{
|
||||
struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
|
||||
|
||||
return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
|
||||
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
|
||||
}
|
||||
|
||||
#endif /* _ROUTE_H */
|
||||
|
|
|
@ -341,10 +341,13 @@ struct xfrm_state_afinfo {
|
|||
struct sk_buff *skb);
|
||||
int (*transport_finish)(struct sk_buff *skb,
|
||||
int async);
|
||||
void (*local_error)(struct sk_buff *skb, u32 mtu);
|
||||
};
|
||||
|
||||
extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
|
||||
extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
|
||||
extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
|
||||
extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
|
||||
|
||||
extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
|
||||
|
||||
|
@ -1477,6 +1480,7 @@ extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
|
|||
extern int xfrm_output_resume(struct sk_buff *skb, int err);
|
||||
extern int xfrm_output(struct sk_buff *skb);
|
||||
extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
|
||||
extern void xfrm_local_error(struct sk_buff *skb, int mtu);
|
||||
extern int xfrm4_extract_header(struct sk_buff *skb);
|
||||
extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
|
||||
extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
|
||||
|
@ -1497,6 +1501,7 @@ extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short fam
|
|||
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
|
||||
extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
|
||||
extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
|
||||
extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
|
||||
extern int xfrm6_extract_header(struct sk_buff *skb);
|
||||
extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
|
||||
extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
|
||||
|
@ -1514,6 +1519,7 @@ extern int xfrm6_output(struct sk_buff *skb);
|
|||
extern int xfrm6_output_finish(struct sk_buff *skb);
|
||||
extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
|
||||
u8 **prevhdr);
|
||||
extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
|
||||
|
||||
#ifdef CONFIG_XFRM
|
||||
extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _UAPI_CM4000_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define MAX_ATR 33
|
||||
|
||||
|
|
|
@ -115,6 +115,8 @@ struct icmp6hdr {
|
|||
#define ICMPV6_NOT_NEIGHBOUR 2
|
||||
#define ICMPV6_ADDR_UNREACH 3
|
||||
#define ICMPV6_PORT_UNREACH 4
|
||||
#define ICMPV6_POLICY_FAIL 5
|
||||
#define ICMPV6_REJECT_ROUTE 6
|
||||
|
||||
/*
|
||||
* Codes for Time Exceeded
|
||||
|
|
|
@ -839,7 +839,7 @@ static inline void free_copy(struct msg_msg *copy)
|
|||
|
||||
static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
|
||||
{
|
||||
struct msg_msg *msg;
|
||||
struct msg_msg *msg, *found = NULL;
|
||||
long count = 0;
|
||||
|
||||
list_for_each_entry(msg, &msq->q_messages, m_list) {
|
||||
|
@ -848,6 +848,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
|
|||
*msgtyp, mode)) {
|
||||
if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
|
||||
*msgtyp = msg->m_type - 1;
|
||||
found = msg;
|
||||
} else if (mode == SEARCH_NUMBER) {
|
||||
if (*msgtyp == count)
|
||||
return msg;
|
||||
|
@ -857,7 +858,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
|
|||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-EAGAIN);
|
||||
return found ?: ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,
|
||||
|
|
|
@ -4480,6 +4480,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
|
|||
struct dentry *d = cgrp->dentry;
|
||||
struct cgroup_event *event, *tmp;
|
||||
struct cgroup_subsys *ss;
|
||||
struct cgroup *child;
|
||||
bool empty;
|
||||
|
||||
lockdep_assert_held(&d->d_inode->i_mutex);
|
||||
|
@ -4490,11 +4491,27 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
|
|||
* @cgrp from being removed while __put_css_set() is in progress.
|
||||
*/
|
||||
read_lock(&css_set_lock);
|
||||
empty = list_empty(&cgrp->cset_links) && list_empty(&cgrp->children);
|
||||
empty = list_empty(&cgrp->cset_links);
|
||||
read_unlock(&css_set_lock);
|
||||
if (!empty)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Make sure there's no live children. We can't test ->children
|
||||
* emptiness as dead children linger on it while being destroyed;
|
||||
* otherwise, "rmdir parent/child parent" may fail with -EBUSY.
|
||||
*/
|
||||
empty = true;
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(child, &cgrp->children, sibling) {
|
||||
empty = cgroup_is_dead(child);
|
||||
if (!empty)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (!empty)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Block new css_tryget() by killing css refcnts. cgroup core
|
||||
* guarantees that, by the time ->css_offline() is invoked, no new
|
||||
|
|
|
@ -475,13 +475,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
|
|||
|
||||
/*
|
||||
* Cpusets with tasks - existing or newly being attached - can't
|
||||
* have empty cpus_allowed or mems_allowed.
|
||||
* be changed to have empty cpus_allowed or mems_allowed.
|
||||
*/
|
||||
ret = -ENOSPC;
|
||||
if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
|
||||
(cpumask_empty(trial->cpus_allowed) &&
|
||||
nodes_empty(trial->mems_allowed)))
|
||||
goto out;
|
||||
if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
|
||||
if (!cpumask_empty(cur->cpus_allowed) &&
|
||||
cpumask_empty(trial->cpus_allowed))
|
||||
goto out;
|
||||
if (!nodes_empty(cur->mems_allowed) &&
|
||||
nodes_empty(trial->mems_allowed))
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
|
|
|
@ -1177,7 +1177,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
* don't allow the creation of threads.
|
||||
*/
|
||||
if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
|
||||
(task_active_pid_ns(current) != current->nsproxy->pid_ns))
|
||||
(task_active_pid_ns(current) !=
|
||||
current->nsproxy->pid_ns_for_children))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
retval = security_task_create(clone_flags);
|
||||
|
@ -1351,7 +1352,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
|
||||
if (pid != &init_struct_pid) {
|
||||
retval = -ENOMEM;
|
||||
pid = alloc_pid(p->nsproxy->pid_ns);
|
||||
pid = alloc_pid(p->nsproxy->pid_ns_for_children);
|
||||
if (!pid)
|
||||
goto bad_fork_cleanup_io;
|
||||
}
|
||||
|
|
|
@ -29,15 +29,15 @@
|
|||
static struct kmem_cache *nsproxy_cachep;
|
||||
|
||||
struct nsproxy init_nsproxy = {
|
||||
.count = ATOMIC_INIT(1),
|
||||
.uts_ns = &init_uts_ns,
|
||||
.count = ATOMIC_INIT(1),
|
||||
.uts_ns = &init_uts_ns,
|
||||
#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
|
||||
.ipc_ns = &init_ipc_ns,
|
||||
.ipc_ns = &init_ipc_ns,
|
||||
#endif
|
||||
.mnt_ns = NULL,
|
||||
.pid_ns = &init_pid_ns,
|
||||
.mnt_ns = NULL,
|
||||
.pid_ns_for_children = &init_pid_ns,
|
||||
#ifdef CONFIG_NET
|
||||
.net_ns = &init_net,
|
||||
.net_ns = &init_net,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -85,9 +85,10 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
|
|||
goto out_ipc;
|
||||
}
|
||||
|
||||
new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns);
|
||||
if (IS_ERR(new_nsp->pid_ns)) {
|
||||
err = PTR_ERR(new_nsp->pid_ns);
|
||||
new_nsp->pid_ns_for_children =
|
||||
copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children);
|
||||
if (IS_ERR(new_nsp->pid_ns_for_children)) {
|
||||
err = PTR_ERR(new_nsp->pid_ns_for_children);
|
||||
goto out_pid;
|
||||
}
|
||||
|
||||
|
@ -100,8 +101,8 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
|
|||
return new_nsp;
|
||||
|
||||
out_net:
|
||||
if (new_nsp->pid_ns)
|
||||
put_pid_ns(new_nsp->pid_ns);
|
||||
if (new_nsp->pid_ns_for_children)
|
||||
put_pid_ns(new_nsp->pid_ns_for_children);
|
||||
out_pid:
|
||||
if (new_nsp->ipc_ns)
|
||||
put_ipc_ns(new_nsp->ipc_ns);
|
||||
|
@ -174,8 +175,8 @@ void free_nsproxy(struct nsproxy *ns)
|
|||
put_uts_ns(ns->uts_ns);
|
||||
if (ns->ipc_ns)
|
||||
put_ipc_ns(ns->ipc_ns);
|
||||
if (ns->pid_ns)
|
||||
put_pid_ns(ns->pid_ns);
|
||||
if (ns->pid_ns_for_children)
|
||||
put_pid_ns(ns->pid_ns_for_children);
|
||||
put_net(ns->net_ns);
|
||||
kmem_cache_free(nsproxy_cachep, ns);
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue