Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (24 commits) [NETFILTER]: xt_connlimit needs to depend on nf_conntrack [NETFILTER]: ipt_iprange.h must #include <linux/types.h> [IrDA]: Fix IrDA build failure [ATM]: nicstar needs virt_to_bus [NET]: move __dev_addr_discard adjacent to dev_addr_discard for readability [NET]: merge dev_unicast_discard and dev_mc_discard into one [NET]: move dev_mc_discard from dev_mcast.c to dev.c [NETLINK]: negative groups in netlink_setsockopt [PPPOL2TP]: Reset meta-data in xmit function [PPPOL2TP]: Fix use-after-free [PKT_SCHED]: Some typo fixes in net/sched/Kconfig [XFRM]: Fix crash introduced by struct dst_entry reordering [TCP]: remove unused argument to cong_avoid op [ATM]: [idt77252] Rename CONFIG_ATM_IDT77252_SEND_IDLE to not resemble a Kconfig variable [ATM]: [drivers] ioremap balanced with iounmap [ATM]: [lanai] sram_test_word() must be __devinit [ATM]: [nicstar] Replace C code with call to ARRAY_SIZE() macro. [ATM]: Eliminate dead config variable CONFIG_BR2684_FAST_TRANS. [ATM]: Replacing kmalloc/memset combination with kzalloc. [NET]: gen_estimator deadlock fix ...
This commit is contained in:
commit
485cf925d8
43 changed files with 232 additions and 173 deletions
|
@ -172,7 +172,7 @@ config ATM_ZATM_DEBUG
|
|||
|
||||
config ATM_NICSTAR
|
||||
tristate "IDT 77201 (NICStAR) (ForeRunnerLE)"
|
||||
depends on PCI && !64BIT
|
||||
depends on PCI && !64BIT && VIRT_TO_BUS
|
||||
help
|
||||
The NICStAR chipset family is used in a large number of ATM NICs for
|
||||
25 and for 155 Mbps, including IDT cards and the Fore ForeRunnerLE
|
||||
|
|
|
@ -1738,7 +1738,8 @@ static int __devinit eni_do_init(struct atm_dev *dev)
|
|||
printk(KERN_ERR KERN_ERR DEV_LABEL "(itf %d): bad "
|
||||
"magic - expected 0x%x, got 0x%x\n",dev->number,
|
||||
ENI155_MAGIC,(unsigned) readl(&eprom->magic));
|
||||
return -EINVAL;
|
||||
error = -EINVAL;
|
||||
goto unmap;
|
||||
}
|
||||
}
|
||||
eni_dev->phy = base+PHY_BASE;
|
||||
|
@ -1765,17 +1766,27 @@ static int __devinit eni_do_init(struct atm_dev *dev)
|
|||
printk(")\n");
|
||||
printk(KERN_ERR DEV_LABEL "(itf %d): ERROR - wrong id 0x%x\n",
|
||||
dev->number,(unsigned) eni_in(MID_RES_ID_MCON));
|
||||
return -EINVAL;
|
||||
error = -EINVAL;
|
||||
goto unmap;
|
||||
}
|
||||
error = eni_dev->asic ? get_esi_asic(dev) : get_esi_fpga(dev,base);
|
||||
if (error) return error;
|
||||
if (error)
|
||||
goto unmap;
|
||||
for (i = 0; i < ESI_LEN; i++)
|
||||
printk("%s%02X",i ? "-" : "",dev->esi[i]);
|
||||
printk(")\n");
|
||||
printk(KERN_NOTICE DEV_LABEL "(itf %d): %s,%s\n",dev->number,
|
||||
eni_in(MID_RES_ID_MCON) & 0x200 ? "ASIC" : "FPGA",
|
||||
media_name[eni_in(MID_RES_ID_MCON) & DAUGTHER_ID]);
|
||||
return suni_init(dev);
|
||||
|
||||
error = suni_init(dev);
|
||||
if (error)
|
||||
goto unmap;
|
||||
out:
|
||||
return error;
|
||||
unmap:
|
||||
iounmap(base);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -1710,7 +1710,7 @@ static int __devinit fs_init (struct fs_dev *dev)
|
|||
/* This bit is documented as "RESERVED" */
|
||||
if (isr & ISR_INIT_ERR) {
|
||||
printk (KERN_ERR "Error initializing the FS... \n");
|
||||
return 1;
|
||||
goto unmap;
|
||||
}
|
||||
if (isr & ISR_INIT) {
|
||||
fs_dprintk (FS_DEBUG_INIT, "Ha! Initialized OK!\n");
|
||||
|
@ -1723,7 +1723,7 @@ static int __devinit fs_init (struct fs_dev *dev)
|
|||
|
||||
if (!to) {
|
||||
printk (KERN_ERR "timeout initializing the FS... \n");
|
||||
return 1;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
/* XXX fix for fs155 */
|
||||
|
@ -1803,7 +1803,7 @@ static int __devinit fs_init (struct fs_dev *dev)
|
|||
if (!dev->atm_vccs) {
|
||||
printk (KERN_WARNING "Couldn't allocate memory for VCC buffers. Woops!\n");
|
||||
/* XXX Clean up..... */
|
||||
return 1;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
dev->tx_inuse = kzalloc (dev->nchannels / 8 /* bits/byte */ , GFP_KERNEL);
|
||||
|
@ -1813,7 +1813,7 @@ static int __devinit fs_init (struct fs_dev *dev)
|
|||
if (!dev->tx_inuse) {
|
||||
printk (KERN_WARNING "Couldn't allocate memory for tx_inuse bits!\n");
|
||||
/* XXX Clean up..... */
|
||||
return 1;
|
||||
goto unmap;
|
||||
}
|
||||
/* -- RAS1 : FS155 and 50 differ. Default (0) should be OK for both */
|
||||
/* -- RAS2 : FS50 only: Default is OK. */
|
||||
|
@ -1840,7 +1840,7 @@ static int __devinit fs_init (struct fs_dev *dev)
|
|||
if (request_irq (dev->irq, fs_irq, IRQF_SHARED, "firestream", dev)) {
|
||||
printk (KERN_WARNING "couldn't get irq %d for firestream.\n", pci_dev->irq);
|
||||
/* XXX undo all previous stuff... */
|
||||
return 1;
|
||||
goto unmap;
|
||||
}
|
||||
fs_dprintk (FS_DEBUG_INIT, "Grabbed irq %d for dev at %p.\n", dev->irq, dev);
|
||||
|
||||
|
@ -1890,6 +1890,9 @@ static int __devinit fs_init (struct fs_dev *dev)
|
|||
|
||||
func_exit ();
|
||||
return 0;
|
||||
unmap:
|
||||
iounmap(dev->base);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __devinit firestream_init_one (struct pci_dev *pci_dev,
|
||||
|
@ -2012,6 +2015,7 @@ static void __devexit firestream_remove_one (struct pci_dev *pdev)
|
|||
for (i=0;i < FS_NR_RX_QUEUES;i++)
|
||||
free_queue (dev, &dev->rx_rq[i]);
|
||||
|
||||
iounmap(dev->base);
|
||||
fs_dprintk (FS_DEBUG_ALLOC, "Free fs-dev: %p\n", dev);
|
||||
nxtdev = dev->next;
|
||||
kfree (dev);
|
||||
|
|
|
@ -65,7 +65,7 @@ static char const rcsid[] =
|
|||
static unsigned int vpibits = 1;
|
||||
|
||||
|
||||
#define CONFIG_ATM_IDT77252_SEND_IDLE 1
|
||||
#define ATM_IDT77252_SEND_IDLE 1
|
||||
|
||||
|
||||
/*
|
||||
|
@ -3404,7 +3404,7 @@ init_card(struct atm_dev *dev)
|
|||
conf = SAR_CFG_TX_FIFO_SIZE_9 | /* Use maximum fifo size */
|
||||
SAR_CFG_RXSTQ_SIZE_8k | /* Receive Status Queue is 8k */
|
||||
SAR_CFG_IDLE_CLP | /* Set CLP on idle cells */
|
||||
#ifndef CONFIG_ATM_IDT77252_SEND_IDLE
|
||||
#ifndef ATM_IDT77252_SEND_IDLE
|
||||
SAR_CFG_NO_IDLE | /* Do not send idle cells */
|
||||
#endif
|
||||
0;
|
||||
|
@ -3541,7 +3541,7 @@ init_card(struct atm_dev *dev)
|
|||
printk("%s: Linkrate on ATM line : %u bit/s, %u cell/s.\n",
|
||||
card->name, linkrate, card->link_pcr);
|
||||
|
||||
#ifdef CONFIG_ATM_IDT77252_SEND_IDLE
|
||||
#ifdef ATM_IDT77252_SEND_IDLE
|
||||
card->utopia_pcr = card->link_pcr;
|
||||
#else
|
||||
card->utopia_pcr = (160000000 / 8 / 54);
|
||||
|
|
|
@ -552,8 +552,8 @@ static inline void sram_write(const struct lanai_dev *lanai,
|
|||
writel(val, sram_addr(lanai, offset));
|
||||
}
|
||||
|
||||
static int __init sram_test_word(
|
||||
const struct lanai_dev *lanai, int offset, u32 pattern)
|
||||
static int __devinit sram_test_word(const struct lanai_dev *lanai,
|
||||
int offset, u32 pattern)
|
||||
{
|
||||
u32 readback;
|
||||
sram_write(lanai, pattern, offset);
|
||||
|
|
|
@ -134,7 +134,7 @@ nicstar_read_eprom_status( virt_addr_t base )
|
|||
/* Send read instruction */
|
||||
val = NICSTAR_REG_READ( base, NICSTAR_REG_GENERAL_PURPOSE ) & 0xFFFFFFF0;
|
||||
|
||||
for (i=0; i<sizeof rdsrtab/sizeof rdsrtab[0]; i++)
|
||||
for (i=0; i<ARRAY_SIZE(rdsrtab); i++)
|
||||
{
|
||||
NICSTAR_REG_WRITE( base, NICSTAR_REG_GENERAL_PURPOSE,
|
||||
(val | rdsrtab[i]) );
|
||||
|
|
|
@ -54,8 +54,8 @@
|
|||
|
||||
#define DRV_MODULE_NAME "bnx2"
|
||||
#define PFX DRV_MODULE_NAME ": "
|
||||
#define DRV_MODULE_VERSION "1.6.2"
|
||||
#define DRV_MODULE_RELDATE "July 6, 2007"
|
||||
#define DRV_MODULE_VERSION "1.6.3"
|
||||
#define DRV_MODULE_RELDATE "July 16, 2007"
|
||||
|
||||
#define RUN_AT(x) (jiffies + (x))
|
||||
|
||||
|
@ -126,91 +126,102 @@ static struct pci_device_id bnx2_pci_tbl[] = {
|
|||
|
||||
static struct flash_spec flash_table[] =
|
||||
{
|
||||
#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
|
||||
#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
|
||||
/* Slow EEPROM */
|
||||
{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
|
||||
1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
|
||||
BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
|
||||
SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
|
||||
"EEPROM - slow"},
|
||||
/* Expansion entry 0001 */
|
||||
{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
|
||||
0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
|
||||
"Entry 0001"},
|
||||
/* Saifun SA25F010 (non-buffered flash) */
|
||||
/* strap, cfg1, & write1 need updates */
|
||||
{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
|
||||
0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
|
||||
"Non-buffered flash (128kB)"},
|
||||
/* Saifun SA25F020 (non-buffered flash) */
|
||||
/* strap, cfg1, & write1 need updates */
|
||||
{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
|
||||
0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
|
||||
"Non-buffered flash (256kB)"},
|
||||
/* Expansion entry 0100 */
|
||||
{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
|
||||
0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
|
||||
"Entry 0100"},
|
||||
/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
|
||||
{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
|
||||
0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
|
||||
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
|
||||
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
|
||||
"Entry 0101: ST M45PE10 (128kB non-bufferred)"},
|
||||
/* Entry 0110: ST M45PE20 (non-buffered flash)*/
|
||||
{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
|
||||
0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
|
||||
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
|
||||
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
|
||||
"Entry 0110: ST M45PE20 (256kB non-bufferred)"},
|
||||
/* Saifun SA25F005 (non-buffered flash) */
|
||||
/* strap, cfg1, & write1 need updates */
|
||||
{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
|
||||
0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
|
||||
"Non-buffered flash (64kB)"},
|
||||
/* Fast EEPROM */
|
||||
{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
|
||||
1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
|
||||
BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
|
||||
SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
|
||||
"EEPROM - fast"},
|
||||
/* Expansion entry 1001 */
|
||||
{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
|
||||
0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
|
||||
"Entry 1001"},
|
||||
/* Expansion entry 1010 */
|
||||
{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
|
||||
0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
|
||||
"Entry 1010"},
|
||||
/* ATMEL AT45DB011B (buffered flash) */
|
||||
{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
|
||||
1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
|
||||
BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
|
||||
BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
|
||||
"Buffered flash (128kB)"},
|
||||
/* Expansion entry 1100 */
|
||||
{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
|
||||
0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
|
||||
"Entry 1100"},
|
||||
/* Expansion entry 1101 */
|
||||
{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
|
||||
0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
|
||||
SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
|
||||
"Entry 1101"},
|
||||
/* Ateml Expansion entry 1110 */
|
||||
{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
|
||||
1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
|
||||
BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
|
||||
BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
|
||||
"Entry 1110 (Atmel)"},
|
||||
/* ATMEL AT45DB021B (buffered flash) */
|
||||
{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
|
||||
1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
|
||||
BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
|
||||
BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
|
||||
"Buffered flash (256kB)"},
|
||||
};
|
||||
|
||||
static struct flash_spec flash_5709 = {
|
||||
.flags = BNX2_NV_BUFFERED,
|
||||
.page_bits = BCM5709_FLASH_PAGE_BITS,
|
||||
.page_size = BCM5709_FLASH_PAGE_SIZE,
|
||||
.addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
|
||||
.total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
|
||||
.name = "5709 Buffered flash (256kB)",
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
|
||||
|
||||
static inline u32 bnx2_tx_avail(struct bnx2 *bp)
|
||||
|
@ -3289,7 +3300,7 @@ bnx2_enable_nvram_write(struct bnx2 *bp)
|
|||
val = REG_RD(bp, BNX2_MISC_CFG);
|
||||
REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
|
||||
|
||||
if (!bp->flash_info->buffered) {
|
||||
if (bp->flash_info->flags & BNX2_NV_WREN) {
|
||||
int j;
|
||||
|
||||
REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
|
||||
|
@ -3349,7 +3360,7 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
|
|||
u32 cmd;
|
||||
int j;
|
||||
|
||||
if (bp->flash_info->buffered)
|
||||
if (bp->flash_info->flags & BNX2_NV_BUFFERED)
|
||||
/* Buffered flash, no erase needed */
|
||||
return 0;
|
||||
|
||||
|
@ -3392,8 +3403,8 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
|
|||
/* Build the command word. */
|
||||
cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
|
||||
|
||||
/* Calculate an offset of a buffered flash. */
|
||||
if (bp->flash_info->buffered) {
|
||||
/* Calculate an offset of a buffered flash, not needed for 5709. */
|
||||
if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
|
||||
offset = ((offset / bp->flash_info->page_size) <<
|
||||
bp->flash_info->page_bits) +
|
||||
(offset % bp->flash_info->page_size);
|
||||
|
@ -3439,8 +3450,8 @@ bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
|
|||
/* Build the command word. */
|
||||
cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
|
||||
|
||||
/* Calculate an offset of a buffered flash. */
|
||||
if (bp->flash_info->buffered) {
|
||||
/* Calculate an offset of a buffered flash, not needed for 5709. */
|
||||
if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
|
||||
offset = ((offset / bp->flash_info->page_size) <<
|
||||
bp->flash_info->page_bits) +
|
||||
(offset % bp->flash_info->page_size);
|
||||
|
@ -3478,15 +3489,19 @@ static int
|
|||
bnx2_init_nvram(struct bnx2 *bp)
|
||||
{
|
||||
u32 val;
|
||||
int j, entry_count, rc;
|
||||
int j, entry_count, rc = 0;
|
||||
struct flash_spec *flash;
|
||||
|
||||
if (CHIP_NUM(bp) == CHIP_NUM_5709) {
|
||||
bp->flash_info = &flash_5709;
|
||||
goto get_flash_size;
|
||||
}
|
||||
|
||||
/* Determine the selected interface. */
|
||||
val = REG_RD(bp, BNX2_NVM_CFG1);
|
||||
|
||||
entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
|
||||
|
||||
rc = 0;
|
||||
if (val & 0x40000000) {
|
||||
|
||||
/* Flash interface has been reconfigured */
|
||||
|
@ -3542,6 +3557,7 @@ bnx2_init_nvram(struct bnx2 *bp)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
get_flash_size:
|
||||
val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
|
||||
val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
|
||||
if (val)
|
||||
|
@ -3706,7 +3722,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
|
|||
buf = align_buf;
|
||||
}
|
||||
|
||||
if (bp->flash_info->buffered == 0) {
|
||||
if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
|
||||
flash_buffer = kmalloc(264, GFP_KERNEL);
|
||||
if (flash_buffer == NULL) {
|
||||
rc = -ENOMEM;
|
||||
|
@ -3739,7 +3755,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
|
|||
bnx2_enable_nvram_access(bp);
|
||||
|
||||
cmd_flags = BNX2_NVM_COMMAND_FIRST;
|
||||
if (bp->flash_info->buffered == 0) {
|
||||
if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
|
||||
int j;
|
||||
|
||||
/* Read the whole page into the buffer
|
||||
|
@ -3767,7 +3783,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
|
|||
/* Loop to write back the buffer data from page_start to
|
||||
* data_start */
|
||||
i = 0;
|
||||
if (bp->flash_info->buffered == 0) {
|
||||
if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
|
||||
/* Erase the page */
|
||||
if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
|
||||
goto nvram_write_end;
|
||||
|
@ -3791,7 +3807,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
|
|||
/* Loop to write the new data from data_start to data_end */
|
||||
for (addr = data_start; addr < data_end; addr += 4, i += 4) {
|
||||
if ((addr == page_end - 4) ||
|
||||
((bp->flash_info->buffered) &&
|
||||
((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
|
||||
(addr == data_end - 4))) {
|
||||
|
||||
cmd_flags |= BNX2_NVM_COMMAND_LAST;
|
||||
|
@ -3808,7 +3824,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
|
|||
|
||||
/* Loop to write back the buffer data from data_end
|
||||
* to page_end */
|
||||
if (bp->flash_info->buffered == 0) {
|
||||
if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
|
||||
for (addr = data_end; addr < page_end;
|
||||
addr += 4, i += 4) {
|
||||
|
||||
|
@ -4107,7 +4123,7 @@ bnx2_init_chip(struct bnx2 *bp)
|
|||
if (CHIP_NUM(bp) == CHIP_NUM_5708)
|
||||
REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
|
||||
else
|
||||
REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
|
||||
REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
|
||||
REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
|
||||
|
||||
if (CHIP_ID(bp) == CHIP_ID_5706_A1)
|
||||
|
@ -4127,10 +4143,6 @@ bnx2_init_chip(struct bnx2 *bp)
|
|||
|
||||
REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
|
||||
|
||||
if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
|
||||
BNX2_PORT_FEATURE_ASF_ENABLED)
|
||||
bp->flags |= ASF_ENABLE_FLAG;
|
||||
|
||||
/* Initialize the receive filter. */
|
||||
bnx2_set_rx_mode(bp->dev);
|
||||
|
||||
|
@ -5786,8 +5798,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
|
|||
if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
|
||||
bp->stats_ticks = USEC_PER_SEC;
|
||||
}
|
||||
if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
|
||||
bp->stats_ticks &= 0xffff00;
|
||||
if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
|
||||
bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
|
||||
bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
|
||||
|
||||
if (netif_running(bp->dev)) {
|
||||
bnx2_netif_stop(bp);
|
||||
|
@ -6629,6 +6642,18 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
|
|||
if (i != 2)
|
||||
bp->fw_version[j++] = '.';
|
||||
}
|
||||
if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
|
||||
BNX2_PORT_FEATURE_ASF_ENABLED) {
|
||||
bp->flags |= ASF_ENABLE_FLAG;
|
||||
|
||||
for (i = 0; i < 30; i++) {
|
||||
reg = REG_RD_IND(bp, bp->shmem_base +
|
||||
BNX2_BC_STATE_CONDITION);
|
||||
if (reg & BNX2_CONDITION_MFW_RUN_MASK)
|
||||
break;
|
||||
msleep(10);
|
||||
}
|
||||
}
|
||||
reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
|
||||
reg &= BNX2_CONDITION_MFW_RUN_MASK;
|
||||
if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
|
||||
|
@ -6672,7 +6697,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
|
|||
bp->rx_ticks_int = 18;
|
||||
bp->rx_ticks = 18;
|
||||
|
||||
bp->stats_ticks = 1000000 & 0xffff00;
|
||||
bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
|
||||
|
||||
bp->timer_interval = HZ;
|
||||
bp->current_interval = HZ;
|
||||
|
|
|
@ -6433,6 +6433,11 @@ struct sw_bd {
|
|||
#define ST_MICRO_FLASH_PAGE_SIZE 256
|
||||
#define ST_MICRO_FLASH_BASE_TOTAL_SIZE 65536
|
||||
|
||||
#define BCM5709_FLASH_PAGE_BITS 8
|
||||
#define BCM5709_FLASH_PHY_PAGE_SIZE (1 << BCM5709_FLASH_PAGE_BITS)
|
||||
#define BCM5709_FLASH_BYTE_ADDR_MASK (BCM5709_FLASH_PHY_PAGE_SIZE-1)
|
||||
#define BCM5709_FLASH_PAGE_SIZE 256
|
||||
|
||||
#define NVRAM_TIMEOUT_COUNT 30000
|
||||
|
||||
|
||||
|
@ -6449,7 +6454,10 @@ struct flash_spec {
|
|||
u32 config2;
|
||||
u32 config3;
|
||||
u32 write1;
|
||||
u32 buffered;
|
||||
u32 flags;
|
||||
#define BNX2_NV_BUFFERED 0x00000001
|
||||
#define BNX2_NV_TRANSLATE 0x00000002
|
||||
#define BNX2_NV_WREN 0x00000004
|
||||
u32 page_bits;
|
||||
u32 page_size;
|
||||
u32 addr_mask;
|
||||
|
|
|
@ -824,6 +824,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
|
|||
struct pppol2tp_session *session;
|
||||
struct pppol2tp_tunnel *tunnel;
|
||||
struct udphdr *uh;
|
||||
unsigned int len;
|
||||
|
||||
error = -ENOTCONN;
|
||||
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
|
||||
|
@ -912,14 +913,15 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
|
|||
}
|
||||
|
||||
/* Queue the packet to IP for output */
|
||||
len = skb->len;
|
||||
error = ip_queue_xmit(skb, 1);
|
||||
|
||||
/* Update stats */
|
||||
if (error >= 0) {
|
||||
tunnel->stats.tx_packets++;
|
||||
tunnel->stats.tx_bytes += skb->len;
|
||||
tunnel->stats.tx_bytes += len;
|
||||
session->stats.tx_packets++;
|
||||
session->stats.tx_bytes += skb->len;
|
||||
session->stats.tx_bytes += len;
|
||||
} else {
|
||||
tunnel->stats.tx_errors++;
|
||||
session->stats.tx_errors++;
|
||||
|
@ -958,6 +960,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
|||
__wsum csum = 0;
|
||||
struct sk_buff *skb2 = NULL;
|
||||
struct udphdr *uh;
|
||||
unsigned int len;
|
||||
|
||||
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
|
||||
goto abort;
|
||||
|
@ -1046,18 +1049,25 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
|
|||
printk("\n");
|
||||
}
|
||||
|
||||
memset(&(IPCB(skb2)->opt), 0, sizeof(IPCB(skb2)->opt));
|
||||
IPCB(skb2)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
|
||||
IPSKB_REROUTED);
|
||||
nf_reset(skb2);
|
||||
|
||||
/* Get routing info from the tunnel socket */
|
||||
dst_release(skb2->dst);
|
||||
skb2->dst = sk_dst_get(sk_tun);
|
||||
|
||||
/* Queue the packet to IP for output */
|
||||
len = skb2->len;
|
||||
rc = ip_queue_xmit(skb2, 1);
|
||||
|
||||
/* Update stats */
|
||||
if (rc >= 0) {
|
||||
tunnel->stats.tx_packets++;
|
||||
tunnel->stats.tx_bytes += skb2->len;
|
||||
tunnel->stats.tx_bytes += len;
|
||||
session->stats.tx_packets++;
|
||||
session->stats.tx_bytes += skb2->len;
|
||||
session->stats.tx_bytes += len;
|
||||
} else {
|
||||
tunnel->stats.tx_errors++;
|
||||
session->stats.tx_errors++;
|
||||
|
|
|
@ -1098,10 +1098,8 @@ extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all
|
|||
extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
|
||||
extern int dev_mc_sync(struct net_device *to, struct net_device *from);
|
||||
extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
|
||||
extern void dev_mc_discard(struct net_device *dev);
|
||||
extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
|
||||
extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
|
||||
extern void __dev_addr_discard(struct dev_addr_list **list);
|
||||
extern void dev_set_promiscuity(struct net_device *dev, int inc);
|
||||
extern void dev_set_allmulti(struct net_device *dev, int inc);
|
||||
extern void netdev_state_change(struct net_device *dev);
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef _IPT_IPRANGE_H
|
||||
#define _IPT_IPRANGE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define IPRANGE_SRC 0x01 /* Match source IP address */
|
||||
#define IPRANGE_DST 0x02 /* Match destination IP address */
|
||||
#define IPRANGE_SRC_INV 0x10 /* Negate the condition */
|
||||
|
|
|
@ -652,8 +652,7 @@ struct tcp_congestion_ops {
|
|||
/* lower bound for congestion window (optional) */
|
||||
u32 (*min_cwnd)(const struct sock *sk);
|
||||
/* do new cwnd calculation (required) */
|
||||
void (*cong_avoid)(struct sock *sk, u32 ack,
|
||||
u32 rtt, u32 in_flight, int good_ack);
|
||||
void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight, int good_ack);
|
||||
/* call before changing ca_state (optional) */
|
||||
void (*set_state)(struct sock *sk, u8 new_state);
|
||||
/* call when cwnd event occurs (optional) */
|
||||
|
@ -684,8 +683,7 @@ extern void tcp_slow_start(struct tcp_sock *tp);
|
|||
|
||||
extern struct tcp_congestion_ops tcp_init_congestion_ops;
|
||||
extern u32 tcp_reno_ssthresh(struct sock *sk);
|
||||
extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 rtt, u32 in_flight, int flag);
|
||||
extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag);
|
||||
extern u32 tcp_reno_min_cwnd(const struct sock *sk);
|
||||
extern struct tcp_congestion_ops tcp_reno;
|
||||
|
||||
|
|
|
@ -585,7 +585,6 @@ static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ct
|
|||
struct xfrm_dst
|
||||
{
|
||||
union {
|
||||
struct xfrm_dst *next;
|
||||
struct dst_entry dst;
|
||||
struct rtable rt;
|
||||
struct rt6_info rt6;
|
||||
|
|
|
@ -460,11 +460,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
|
|||
skb_pull(skb, plen);
|
||||
skb_set_mac_header(skb, -ETH_HLEN);
|
||||
skb->pkt_type = PACKET_HOST;
|
||||
#ifdef CONFIG_BR2684_FAST_TRANS
|
||||
skb->protocol = ((u16 *) skb->data)[-1];
|
||||
#else /* some protocols might require this: */
|
||||
skb->protocol = br_type_trans(skb, net_dev);
|
||||
#endif /* CONFIG_BR2684_FAST_TRANS */
|
||||
#else
|
||||
skb_pull(skb, plen - ETH_HLEN);
|
||||
skb->protocol = eth_type_trans(skb, net_dev);
|
||||
|
|
|
@ -2715,20 +2715,6 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void __dev_addr_discard(struct dev_addr_list **list)
|
||||
{
|
||||
struct dev_addr_list *tmp;
|
||||
|
||||
while (*list != NULL) {
|
||||
tmp = *list;
|
||||
*list = tmp->next;
|
||||
if (tmp->da_users > tmp->da_gusers)
|
||||
printk("__dev_addr_discard: address leakage! "
|
||||
"da_users=%d\n", tmp->da_users);
|
||||
kfree(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_unicast_delete - Release secondary unicast address.
|
||||
* @dev: device
|
||||
|
@ -2777,11 +2763,30 @@ int dev_unicast_add(struct net_device *dev, void *addr, int alen)
|
|||
}
|
||||
EXPORT_SYMBOL(dev_unicast_add);
|
||||
|
||||
static void dev_unicast_discard(struct net_device *dev)
|
||||
static void __dev_addr_discard(struct dev_addr_list **list)
|
||||
{
|
||||
struct dev_addr_list *tmp;
|
||||
|
||||
while (*list != NULL) {
|
||||
tmp = *list;
|
||||
*list = tmp->next;
|
||||
if (tmp->da_users > tmp->da_gusers)
|
||||
printk("__dev_addr_discard: address leakage! "
|
||||
"da_users=%d\n", tmp->da_users);
|
||||
kfree(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static void dev_addr_discard(struct net_device *dev)
|
||||
{
|
||||
netif_tx_lock_bh(dev);
|
||||
|
||||
__dev_addr_discard(&dev->uc_list);
|
||||
dev->uc_count = 0;
|
||||
|
||||
__dev_addr_discard(&dev->mc_list);
|
||||
dev->mc_count = 0;
|
||||
|
||||
netif_tx_unlock_bh(dev);
|
||||
}
|
||||
|
||||
|
@ -3739,8 +3744,7 @@ void unregister_netdevice(struct net_device *dev)
|
|||
/*
|
||||
* Flush the unicast and multicast chains
|
||||
*/
|
||||
dev_unicast_discard(dev);
|
||||
dev_mc_discard(dev);
|
||||
dev_addr_discard(dev);
|
||||
|
||||
if (dev->uninit)
|
||||
dev->uninit(dev);
|
||||
|
|
|
@ -177,18 +177,6 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
|
|||
}
|
||||
EXPORT_SYMBOL(dev_mc_unsync);
|
||||
|
||||
/*
|
||||
* Discard multicast list when a device is downed
|
||||
*/
|
||||
|
||||
void dev_mc_discard(struct net_device *dev)
|
||||
{
|
||||
netif_tx_lock_bh(dev);
|
||||
__dev_addr_discard(&dev->mc_list);
|
||||
dev->mc_count = 0;
|
||||
netif_tx_unlock_bh(dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static void *dev_mc_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
|
|
|
@ -79,27 +79,27 @@
|
|||
|
||||
struct gen_estimator
|
||||
{
|
||||
struct gen_estimator *next;
|
||||
struct list_head list;
|
||||
struct gnet_stats_basic *bstats;
|
||||
struct gnet_stats_rate_est *rate_est;
|
||||
spinlock_t *stats_lock;
|
||||
unsigned interval;
|
||||
int ewma_log;
|
||||
u64 last_bytes;
|
||||
u32 last_packets;
|
||||
u32 avpps;
|
||||
u32 avbps;
|
||||
struct rcu_head e_rcu;
|
||||
};
|
||||
|
||||
struct gen_estimator_head
|
||||
{
|
||||
struct timer_list timer;
|
||||
struct gen_estimator *list;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
|
||||
|
||||
/* Estimator array lock */
|
||||
/* Protects against NULL dereference */
|
||||
static DEFINE_RWLOCK(est_lock);
|
||||
|
||||
static void est_timer(unsigned long arg)
|
||||
|
@ -107,13 +107,17 @@ static void est_timer(unsigned long arg)
|
|||
int idx = (int)arg;
|
||||
struct gen_estimator *e;
|
||||
|
||||
read_lock(&est_lock);
|
||||
for (e = elist[idx].list; e; e = e->next) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(e, &elist[idx].list, list) {
|
||||
u64 nbytes;
|
||||
u32 npackets;
|
||||
u32 rate;
|
||||
|
||||
spin_lock(e->stats_lock);
|
||||
read_lock(&est_lock);
|
||||
if (e->bstats == NULL)
|
||||
goto skip;
|
||||
|
||||
nbytes = e->bstats->bytes;
|
||||
npackets = e->bstats->packets;
|
||||
rate = (nbytes - e->last_bytes)<<(7 - idx);
|
||||
|
@ -125,12 +129,14 @@ static void est_timer(unsigned long arg)
|
|||
e->last_packets = npackets;
|
||||
e->avpps += ((long)rate - (long)e->avpps) >> e->ewma_log;
|
||||
e->rate_est->pps = (e->avpps+0x1FF)>>10;
|
||||
skip:
|
||||
read_unlock(&est_lock);
|
||||
spin_unlock(e->stats_lock);
|
||||
}
|
||||
|
||||
if (elist[idx].list != NULL)
|
||||
if (!list_empty(&elist[idx].list))
|
||||
mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
|
||||
read_unlock(&est_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -147,12 +153,17 @@ static void est_timer(unsigned long arg)
|
|||
* &rate_est with the statistics lock grabed during this period.
|
||||
*
|
||||
* Returns 0 on success or a negative error code.
|
||||
*
|
||||
* NOTE: Called under rtnl_mutex
|
||||
*/
|
||||
int gen_new_estimator(struct gnet_stats_basic *bstats,
|
||||
struct gnet_stats_rate_est *rate_est, spinlock_t *stats_lock, struct rtattr *opt)
|
||||
struct gnet_stats_rate_est *rate_est,
|
||||
spinlock_t *stats_lock,
|
||||
struct rtattr *opt)
|
||||
{
|
||||
struct gen_estimator *est;
|
||||
struct gnet_estimator *parm = RTA_DATA(opt);
|
||||
int idx;
|
||||
|
||||
if (RTA_PAYLOAD(opt) < sizeof(*parm))
|
||||
return -EINVAL;
|
||||
|
@ -164,7 +175,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
|
|||
if (est == NULL)
|
||||
return -ENOBUFS;
|
||||
|
||||
est->interval = parm->interval + 2;
|
||||
idx = parm->interval + 2;
|
||||
est->bstats = bstats;
|
||||
est->rate_est = rate_est;
|
||||
est->stats_lock = stats_lock;
|
||||
|
@ -174,20 +185,25 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
|
|||
est->last_packets = bstats->packets;
|
||||
est->avpps = rate_est->pps<<10;
|
||||
|
||||
est->next = elist[est->interval].list;
|
||||
if (est->next == NULL) {
|
||||
init_timer(&elist[est->interval].timer);
|
||||
elist[est->interval].timer.data = est->interval;
|
||||
elist[est->interval].timer.expires = jiffies + ((HZ<<est->interval)/4);
|
||||
elist[est->interval].timer.function = est_timer;
|
||||
add_timer(&elist[est->interval].timer);
|
||||
if (!elist[idx].timer.function) {
|
||||
INIT_LIST_HEAD(&elist[idx].list);
|
||||
setup_timer(&elist[idx].timer, est_timer, idx);
|
||||
}
|
||||
write_lock_bh(&est_lock);
|
||||
elist[est->interval].list = est;
|
||||
write_unlock_bh(&est_lock);
|
||||
|
||||
if (list_empty(&elist[idx].list))
|
||||
mod_timer(&elist[idx].timer, jiffies + ((HZ<<idx)/4));
|
||||
|
||||
list_add_rcu(&est->list, &elist[idx].list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __gen_kill_estimator(struct rcu_head *head)
|
||||
{
|
||||
struct gen_estimator *e = container_of(head,
|
||||
struct gen_estimator, e_rcu);
|
||||
kfree(e);
|
||||
}
|
||||
|
||||
/**
|
||||
* gen_kill_estimator - remove a rate estimator
|
||||
* @bstats: basic statistics
|
||||
|
@ -195,31 +211,32 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
|
|||
*
|
||||
* Removes the rate estimator specified by &bstats and &rate_est
|
||||
* and deletes the timer.
|
||||
*
|
||||
* NOTE: Called under rtnl_mutex
|
||||
*/
|
||||
void gen_kill_estimator(struct gnet_stats_basic *bstats,
|
||||
struct gnet_stats_rate_est *rate_est)
|
||||
{
|
||||
int idx;
|
||||
struct gen_estimator *est, **pest;
|
||||
struct gen_estimator *e, *n;
|
||||
|
||||
for (idx=0; idx <= EST_MAX_INTERVAL; idx++) {
|
||||
int killed = 0;
|
||||
pest = &elist[idx].list;
|
||||
while ((est=*pest) != NULL) {
|
||||
if (est->rate_est != rate_est || est->bstats != bstats) {
|
||||
pest = &est->next;
|
||||
|
||||
/* Skip non initialized indexes */
|
||||
if (!elist[idx].timer.function)
|
||||
continue;
|
||||
|
||||
list_for_each_entry_safe(e, n, &elist[idx].list, list) {
|
||||
if (e->rate_est != rate_est || e->bstats != bstats)
|
||||
continue;
|
||||
}
|
||||
|
||||
write_lock_bh(&est_lock);
|
||||
*pest = est->next;
|
||||
e->bstats = NULL;
|
||||
write_unlock_bh(&est_lock);
|
||||
|
||||
kfree(est);
|
||||
killed++;
|
||||
list_del_rcu(&e->list);
|
||||
call_rcu(&e->e_rcu, __gen_kill_estimator);
|
||||
}
|
||||
if (killed && elist[idx].list == NULL)
|
||||
del_timer(&elist[idx].timer);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -137,7 +137,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
|
|||
}
|
||||
|
||||
static void bictcp_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 seq_rtt, u32 in_flight, int data_acked)
|
||||
u32 in_flight, int data_acked)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct bictcp *ca = inet_csk_ca(sk);
|
||||
|
|
|
@ -324,8 +324,7 @@ EXPORT_SYMBOL_GPL(tcp_slow_start);
|
|||
/* This is Jacobson's slow start and congestion avoidance.
|
||||
* SIGCOMM '88, p. 328.
|
||||
*/
|
||||
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
|
||||
int flag)
|
||||
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
|
|
|
@ -270,7 +270,7 @@ static inline void measure_delay(struct sock *sk)
|
|||
}
|
||||
|
||||
static void bictcp_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 seq_rtt, u32 in_flight, int data_acked)
|
||||
u32 in_flight, int data_acked)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct bictcp *ca = inet_csk_ca(sk);
|
||||
|
|
|
@ -109,7 +109,7 @@ static void hstcp_init(struct sock *sk)
|
|||
tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
|
||||
}
|
||||
|
||||
static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
|
||||
static void hstcp_cong_avoid(struct sock *sk, u32 adk,
|
||||
u32 in_flight, int data_acked)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
|
|
@ -225,7 +225,7 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
|
|||
return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
|
||||
}
|
||||
|
||||
static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
||||
static void htcp_cong_avoid(struct sock *sk, u32 ack, s32 rtt,
|
||||
u32 in_flight, int data_acked)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
|
|
@ -85,7 +85,7 @@ static inline u32 hybla_fraction(u32 odds)
|
|||
* o Give cwnd a new value based on the model proposed
|
||||
* o remember increments <1
|
||||
*/
|
||||
static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
||||
static void hybla_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
@ -103,7 +103,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
|||
return;
|
||||
|
||||
if (!ca->hybla_en)
|
||||
return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag);
|
||||
return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
|
||||
|
||||
if (ca->rho == 0)
|
||||
hybla_recalc_param(sk);
|
||||
|
|
|
@ -258,7 +258,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
|
|||
/*
|
||||
* Increase window in response to successful acknowledgment.
|
||||
*/
|
||||
static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
||||
static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
|
|
@ -2323,11 +2323,11 @@ static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
|
|||
tcp_ack_no_tstamp(sk, seq_rtt, flag);
|
||||
}
|
||||
|
||||
static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
||||
static void tcp_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 in_flight, int good)
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
icsk->icsk_ca_ops->cong_avoid(sk, ack, rtt, in_flight, good);
|
||||
icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight, good);
|
||||
tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
|
||||
}
|
||||
|
||||
|
@ -2826,11 +2826,11 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
|
|||
/* Advance CWND, if state allows this. */
|
||||
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
|
||||
tcp_may_raise_cwnd(sk, flag))
|
||||
tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0);
|
||||
tcp_cong_avoid(sk, ack, prior_in_flight, 0);
|
||||
tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
|
||||
} else {
|
||||
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
|
||||
tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 1);
|
||||
tcp_cong_avoid(sk, ack, prior_in_flight, 1);
|
||||
}
|
||||
|
||||
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
|
||||
|
|
|
@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)
|
|||
* Will only call newReno CA when away from inference.
|
||||
* From TCP-LP's paper, this will be handled in additive increasement.
|
||||
*/
|
||||
static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
|
||||
int flag)
|
||||
static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
|
||||
{
|
||||
struct lp *lp = inet_csk_ca(sk);
|
||||
|
||||
if (!(lp->flag & LP_WITHIN_INF))
|
||||
tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag);
|
||||
tcp_reno_cong_avoid(sk, ack, in_flight, flag);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
#define TCP_SCALABLE_AI_CNT 50U
|
||||
#define TCP_SCALABLE_MD_SCALE 3
|
||||
|
||||
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
|
||||
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
|
|
@ -163,13 +163,13 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
|
|||
EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
|
||||
|
||||
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 seq_rtt, u32 in_flight, int flag)
|
||||
u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct vegas *vegas = inet_csk_ca(sk);
|
||||
|
||||
if (!vegas->doing_vegas_now)
|
||||
return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
|
||||
return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
|
||||
|
||||
/* The key players are v_beg_snd_una and v_beg_snd_nxt.
|
||||
*
|
||||
|
@ -228,7 +228,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
|
|||
/* We don't have enough RTT samples to do the Vegas
|
||||
* calculation, so we'll behave like Reno.
|
||||
*/
|
||||
tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
|
||||
tcp_reno_cong_avoid(sk, ack, in_flight, flag);
|
||||
} else {
|
||||
u32 rtt, target_cwnd, diff;
|
||||
|
||||
|
|
|
@ -115,13 +115,13 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
|
|||
}
|
||||
|
||||
static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 seq_rtt, u32 in_flight, int flag)
|
||||
u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct veno *veno = inet_csk_ca(sk);
|
||||
|
||||
if (!veno->doing_veno_now)
|
||||
return tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
|
||||
return tcp_reno_cong_avoid(sk, ack, in_flight, flag);
|
||||
|
||||
/* limited by applications */
|
||||
if (!tcp_is_cwnd_limited(sk, in_flight))
|
||||
|
@ -132,7 +132,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack,
|
|||
/* We don't have enough rtt samples to do the Veno
|
||||
* calculation, so we'll behave like Reno.
|
||||
*/
|
||||
tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
|
||||
tcp_reno_cong_avoid(sk, ack, in_flight, flag);
|
||||
} else {
|
||||
u32 rtt, target_cwnd;
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, ktime_t last)
|
|||
}
|
||||
|
||||
static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack,
|
||||
u32 seq_rtt, u32 in_flight, int flag)
|
||||
u32 in_flight, int flag)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct yeah *yeah = inet_csk_ca(sk);
|
||||
|
|
|
@ -2567,7 +2567,7 @@ int __init irsock_init(void)
|
|||
* Remove IrDA protocol
|
||||
*
|
||||
*/
|
||||
void __exit irsock_cleanup(void)
|
||||
void irsock_cleanup(void)
|
||||
{
|
||||
sock_unregister(PF_IRDA);
|
||||
proto_unregister(&irda_proto);
|
||||
|
|
|
@ -95,14 +95,14 @@ int __init irda_device_init( void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __exit leftover_dongle(void *arg)
|
||||
static void leftover_dongle(void *arg)
|
||||
{
|
||||
struct dongle_reg *reg = arg;
|
||||
IRDA_WARNING("IrDA: Dongle type %x not unregistered\n",
|
||||
reg->type);
|
||||
}
|
||||
|
||||
void __exit irda_device_cleanup(void)
|
||||
void irda_device_cleanup(void)
|
||||
{
|
||||
IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ int __init iriap_init(void)
|
|||
* Initializes the IrIAP layer, called by the module cleanup code in
|
||||
* irmod.c
|
||||
*/
|
||||
void __exit iriap_cleanup(void)
|
||||
void iriap_cleanup(void)
|
||||
{
|
||||
irlmp_unregister_service(service_handle);
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ int __init irlap_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void __exit irlap_cleanup(void)
|
||||
void irlap_cleanup(void)
|
||||
{
|
||||
IRDA_ASSERT(irlap != NULL, return;);
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ int __init irlmp_init(void)
|
|||
* Remove IrLMP layer
|
||||
*
|
||||
*/
|
||||
void __exit irlmp_cleanup(void)
|
||||
void irlmp_cleanup(void)
|
||||
{
|
||||
/* Check for main structure */
|
||||
IRDA_ASSERT(irlmp != NULL, return;);
|
||||
|
|
|
@ -84,7 +84,7 @@ void __init irda_proc_register(void)
|
|||
* Unregister irda entry in /proc file system
|
||||
*
|
||||
*/
|
||||
void __exit irda_proc_unregister(void)
|
||||
void irda_proc_unregister(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
|
@ -287,7 +287,7 @@ int __init irda_sysctl_register(void)
|
|||
* Unregister our sysctl interface
|
||||
*
|
||||
*/
|
||||
void __exit irda_sysctl_unregister(void)
|
||||
void irda_sysctl_unregister(void)
|
||||
{
|
||||
unregister_sysctl_table(irda_table_header);
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ int __init irttp_init(void)
|
|||
* Called by module destruction/cleanup code
|
||||
*
|
||||
*/
|
||||
void __exit irttp_cleanup(void)
|
||||
void irttp_cleanup(void)
|
||||
{
|
||||
/* Check for main structure */
|
||||
IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;);
|
||||
|
|
|
@ -436,6 +436,7 @@ config NETFILTER_XT_MATCH_CONNBYTES
|
|||
config NETFILTER_XT_MATCH_CONNLIMIT
|
||||
tristate '"connlimit" match support"'
|
||||
depends on NETFILTER_XTABLES
|
||||
depends on NF_CONNTRACK
|
||||
---help---
|
||||
This match allows you to match against the number of parallel
|
||||
connections to a server per client IP address (or address block).
|
||||
|
|
|
@ -1012,13 +1012,14 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
|
|||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct netlink_sock *nlk = nlk_sk(sk);
|
||||
int val = 0, err;
|
||||
unsigned int val = 0;
|
||||
int err;
|
||||
|
||||
if (level != SOL_NETLINK)
|
||||
return -ENOPROTOOPT;
|
||||
|
||||
if (optlen >= sizeof(int) &&
|
||||
get_user(val, (int __user *)optval))
|
||||
get_user(val, (unsigned int __user *)optval))
|
||||
return -EFAULT;
|
||||
|
||||
switch (optname) {
|
||||
|
|
|
@ -97,7 +97,7 @@ config NET_SCH_ATM
|
|||
select classes of this queuing discipline. Each class maps
|
||||
the flow(s) it is handling to a given virtual circuit.
|
||||
|
||||
See the top of <file:net/sched/sch_atm.c>) for more details.
|
||||
See the top of <file:net/sched/sch_atm.c> for more details.
|
||||
|
||||
To compile this code as a module, choose M here: the
|
||||
module will be called sch_atm.
|
||||
|
@ -137,7 +137,7 @@ config NET_SCH_SFQ
|
|||
tristate "Stochastic Fairness Queueing (SFQ)"
|
||||
---help---
|
||||
Say Y here if you want to use the Stochastic Fairness Queueing (SFQ)
|
||||
packet scheduling algorithm .
|
||||
packet scheduling algorithm.
|
||||
|
||||
See the top of <file:net/sched/sch_sfq.c> for more details.
|
||||
|
||||
|
@ -306,7 +306,7 @@ config NET_CLS_RSVP6
|
|||
is important for real time data such as streaming sound or video.
|
||||
|
||||
Say Y here if you want to be able to classify outgoing packets based
|
||||
on their RSVP requests and you are using the IPv6.
|
||||
on their RSVP requests and you are using the IPv6 protocol.
|
||||
|
||||
To compile this code as a module, choose M here: the
|
||||
module will be called cls_rsvp6.
|
||||
|
|
|
@ -292,13 +292,12 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
|
|||
}
|
||||
}
|
||||
DPRINTK("atm_tc_change: new id %x\n", classid);
|
||||
flow = kmalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
|
||||
flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
|
||||
DPRINTK("atm_tc_change: flow %p\n", flow);
|
||||
if (!flow) {
|
||||
error = -ENOBUFS;
|
||||
goto err_out;
|
||||
}
|
||||
memset(flow, 0, sizeof(*flow));
|
||||
flow->filter_list = NULL;
|
||||
if (!(flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid)))
|
||||
flow->q = &noop_qdisc;
|
||||
|
|
|
@ -2141,7 +2141,7 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
|
|||
if (last == first)
|
||||
break;
|
||||
|
||||
last = last->u.next;
|
||||
last = (struct xfrm_dst *)last->u.dst.next;
|
||||
last->child_mtu_cached = mtu;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue