Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking updates from David Miller: 1) Make syn floods consume significantly less resources by a) Not pre-COW'ing routing metrics for SYN/ACKs b) Mirroring the device queue mapping of the SYN for the SYN/ACK reply. Both from Eric Dumazet. 2) Fix calculation errors in Byte Queue Limiting, from Hiroaki SHIMODA. 3) Validate the length requested when building a paged SKB for a socket, so we don't overrun the page vector accidently. From Jason Wang. 4) When netlabel is disabled, we abort all IP option processing when we see a CIPSO option. This isn't the right thing to do, we should simply skip over it and continue processing the remaining options (if any). Fix from Paul Moore. 5) SRIOV fixes for the mellanox driver from Jack orgenstein and Marcel Apfelbaum. 6) 8139cp enables the receiver before the ring address is properly programmed, which potentially lets the device crap over random memory. Fix from Jason Wang. 7) e1000/e1000e fixes for i217 RST handling, and an improper buffer address reference in jumbo RX frame processing from Bruce Allan and Sebastian Andrzej Siewior, respectively. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: fec_mpc52xx: fix timestamp filtering mcs7830: Implement link state detection e1000e: fix Rapid Start Technology support for i217 e1000: look into the page instead of skb->data for e1000_tbi_adjust_stats() r8169: call netif_napi_del at errpaths and at driver unload tcp: reflect SYN queue_mapping into SYNACK packets tcp: do not create inetpeer on SYNACK message 8139cp/8139too: terminate the eeprom access with the right opmode 8139cp: set ring address before enabling receiver cipso: handle CIPSO options correctly when NetLabel is disabled net: sock: validate data_len before allocating skb in sock_alloc_send_pskb() bql: Avoid possible inconsistent calculation. bql: Avoid unneeded limit decrement. bql: Fix POSDIFF() to integer overflow aware. net/mlx4_core: Fix obscure mlx4_cmd_box parameter in QUERY_DEV_CAP net/mlx4_core: Check port out-of-range before using in mlx4_slave_cap net/mlx4_core: Fixes for VF / Guest startup flow net/mlx4_en: Fix improper use of "port" parameter in mlx4_en_event net/mlx4_core: Fix number of EQs used in ICM initialisation net/mlx4_core: Fix the slave_id out-of-range test in mlx4_eq_int
This commit is contained in:
commit
4fc3acf291
21 changed files with 201 additions and 84 deletions
|
@ -437,7 +437,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
|
|||
length = status & BCOM_FEC_RX_BD_LEN_MASK;
|
||||
skb_put(rskb, length - 4); /* length without CRC32 */
|
||||
rskb->protocol = eth_type_trans(rskb, dev);
|
||||
if (!skb_defer_rx_timestamp(skb))
|
||||
if (!skb_defer_rx_timestamp(rskb))
|
||||
netif_rx(rskb);
|
||||
|
||||
spin_lock(&priv->lock);
|
||||
|
|
|
@ -4080,7 +4080,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
|
|||
spin_lock_irqsave(&adapter->stats_lock,
|
||||
irq_flags);
|
||||
e1000_tbi_adjust_stats(hw, &adapter->stats,
|
||||
length, skb->data);
|
||||
length, mapped);
|
||||
spin_unlock_irqrestore(&adapter->stats_lock,
|
||||
irq_flags);
|
||||
length--;
|
||||
|
|
|
@ -165,14 +165,14 @@
|
|||
#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
|
||||
|
||||
/* Intel Rapid Start Technology Support */
|
||||
#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70)
|
||||
#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
|
||||
#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
|
||||
#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
|
||||
#define I217_SxCTRL_MASK 0x1000
|
||||
#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
|
||||
#define I217_CGFREG PHY_REG(772, 29)
|
||||
#define I217_CGFREG_MASK 0x0002
|
||||
#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
|
||||
#define I217_MEMPWR PHY_REG(772, 26)
|
||||
#define I217_MEMPWR_MASK 0x0010
|
||||
#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
|
||||
|
||||
/* Strapping Option Register - RO */
|
||||
#define E1000_STRAP 0x0000C
|
||||
|
@ -4089,12 +4089,12 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
|
|||
* power good.
|
||||
*/
|
||||
e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
|
||||
phy_reg |= I217_SxCTRL_MASK;
|
||||
phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
|
||||
e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
|
||||
|
||||
/* Disable the SMB release on LCD reset. */
|
||||
e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
|
||||
phy_reg &= ~I217_MEMPWR;
|
||||
phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
|
||||
e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
|
||||
}
|
||||
|
||||
|
@ -4103,7 +4103,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
|
|||
* Support
|
||||
*/
|
||||
e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
|
||||
phy_reg |= I217_CGFREG_MASK;
|
||||
phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
|
||||
e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
|
||||
|
||||
release:
|
||||
|
@ -4176,7 +4176,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
|
|||
ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
|
||||
if (ret_val)
|
||||
goto release;
|
||||
phy_reg |= I217_MEMPWR_MASK;
|
||||
phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
|
||||
e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
|
||||
|
||||
/* Disable Proxy */
|
||||
|
@ -4186,7 +4186,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
|
|||
ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
|
||||
if (ret_val)
|
||||
goto release;
|
||||
phy_reg &= ~I217_CGFREG_MASK;
|
||||
phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
|
||||
e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
|
||||
release:
|
||||
if (ret_val)
|
||||
|
|
|
@ -617,7 +617,7 @@ static struct mlx4_cmd_info cmd_info[] = {
|
|||
.out_is_imm = false,
|
||||
.encode_slave_id = false,
|
||||
.verify = NULL,
|
||||
.wrapper = NULL
|
||||
.wrapper = mlx4_QUERY_FW_wrapper
|
||||
},
|
||||
{
|
||||
.opcode = MLX4_CMD_QUERY_HCA,
|
||||
|
@ -635,7 +635,7 @@ static struct mlx4_cmd_info cmd_info[] = {
|
|||
.out_is_imm = false,
|
||||
.encode_slave_id = false,
|
||||
.verify = NULL,
|
||||
.wrapper = NULL
|
||||
.wrapper = mlx4_QUERY_DEV_CAP_wrapper
|
||||
},
|
||||
{
|
||||
.opcode = MLX4_CMD_QUERY_FUNC_CAP,
|
||||
|
|
|
@ -136,13 +136,12 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
|
|||
struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
|
||||
struct mlx4_en_priv *priv;
|
||||
|
||||
if (!mdev->pndev[port])
|
||||
return;
|
||||
|
||||
priv = netdev_priv(mdev->pndev[port]);
|
||||
switch (event) {
|
||||
case MLX4_DEV_EVENT_PORT_UP:
|
||||
case MLX4_DEV_EVENT_PORT_DOWN:
|
||||
if (!mdev->pndev[port])
|
||||
return;
|
||||
priv = netdev_priv(mdev->pndev[port]);
|
||||
/* To prevent races, we poll the link state in a separate
|
||||
task rather than changing it here */
|
||||
priv->link_state = event;
|
||||
|
@ -154,7 +153,10 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
|
|||
break;
|
||||
|
||||
default:
|
||||
mlx4_warn(mdev, "Unhandled event: %d\n", event);
|
||||
if (port < 1 || port > dev->caps.num_ports ||
|
||||
!mdev->pndev[port])
|
||||
return;
|
||||
mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -426,7 +426,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
|
|||
|
||||
mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
|
||||
|
||||
if (flr_slave > dev->num_slaves) {
|
||||
if (flr_slave >= dev->num_slaves) {
|
||||
mlx4_warn(dev,
|
||||
"Got FLR for unknown function: %d\n",
|
||||
flr_slave);
|
||||
|
|
|
@ -412,7 +412,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||
outbox = mailbox->buf;
|
||||
|
||||
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
|
||||
MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -590,8 +590,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||
|
||||
for (i = 1; i <= dev_cap->num_ports; ++i) {
|
||||
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
|
||||
MLX4_CMD_TIME_CLASS_B,
|
||||
!mlx4_is_slave(dev));
|
||||
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -669,6 +668,28 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||
return err;
|
||||
}
|
||||
|
||||
int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd)
|
||||
{
|
||||
int err = 0;
|
||||
u8 field;
|
||||
|
||||
err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* For guests, report Blueflame disabled */
|
||||
MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
|
||||
field &= 0x7f;
|
||||
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
|
@ -860,6 +881,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
|
|||
((fw_ver & 0xffff0000ull) >> 16) |
|
||||
((fw_ver & 0x0000ffffull) << 16);
|
||||
|
||||
if (mlx4_is_slave(dev))
|
||||
goto out;
|
||||
|
||||
MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
|
||||
dev->caps.function = lg;
|
||||
|
||||
|
@ -927,6 +951,27 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
|
|||
return err;
|
||||
}
|
||||
|
||||
int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd)
|
||||
{
|
||||
u8 *outbuf;
|
||||
int err;
|
||||
|
||||
outbuf = outbox->buf;
|
||||
err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
|
||||
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* for slaves, zero out everything except FW version */
|
||||
outbuf[0] = outbuf[1] = 0;
|
||||
memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void get_board_id(void *vsd, char *board_id)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -142,12 +142,6 @@ struct mlx4_port_config {
|
|||
struct pci_dev *pdev;
|
||||
};
|
||||
|
||||
static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
|
||||
{
|
||||
return dev->caps.reserved_eqs +
|
||||
MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
|
||||
}
|
||||
|
||||
int mlx4_check_port_params(struct mlx4_dev *dev,
|
||||
enum mlx4_port_type *port_type)
|
||||
{
|
||||
|
@ -217,6 +211,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
|
|||
}
|
||||
|
||||
dev->caps.num_ports = dev_cap->num_ports;
|
||||
dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
|
||||
for (i = 1; i <= dev->caps.num_ports; ++i) {
|
||||
dev->caps.vl_cap[i] = dev_cap->max_vl[i];
|
||||
dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
|
||||
|
@ -435,12 +430,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
|
|||
mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
|
||||
|
||||
memset(&dev_cap, 0, sizeof(dev_cap));
|
||||
dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
|
||||
err = mlx4_dev_cap(dev, &dev_cap);
|
||||
if (err) {
|
||||
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx4_QUERY_FW(dev);
|
||||
if (err)
|
||||
mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
|
||||
|
||||
page_size = ~dev->caps.page_size_cap + 1;
|
||||
mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
|
||||
if (page_size > PAGE_SIZE) {
|
||||
|
@ -485,15 +485,15 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
|
|||
dev->caps.num_mgms = 0;
|
||||
dev->caps.num_amgms = 0;
|
||||
|
||||
for (i = 1; i <= dev->caps.num_ports; ++i)
|
||||
dev->caps.port_mask[i] = dev->caps.port_type[i];
|
||||
|
||||
if (dev->caps.num_ports > MLX4_MAX_PORTS) {
|
||||
mlx4_err(dev, "HCA has %d ports, but we only support %d, "
|
||||
"aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
for (i = 1; i <= dev->caps.num_ports; ++i)
|
||||
dev->caps.port_mask[i] = dev->caps.port_type[i];
|
||||
|
||||
if (dev->caps.uar_page_size * (dev->caps.num_uars -
|
||||
dev->caps.reserved_uars) >
|
||||
pci_resource_len(dev->pdev, 2)) {
|
||||
|
@ -504,18 +504,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
#if 0
|
||||
mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
|
||||
mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
|
||||
dev->caps.num_uars, dev->caps.reserved_uars,
|
||||
dev->caps.uar_page_size * dev->caps.num_uars,
|
||||
pci_resource_len(dev->pdev, 2));
|
||||
mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
|
||||
dev->caps.reserved_eqs);
|
||||
mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
|
||||
dev->caps.num_pds, dev->caps.reserved_pds,
|
||||
dev->caps.slave_pd_shift, dev->caps.pd_base);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -810,9 +798,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
|
|||
if (err)
|
||||
goto err_srq;
|
||||
|
||||
num_eqs = (mlx4_is_master(dev)) ?
|
||||
roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
|
||||
dev->caps.num_eqs;
|
||||
num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
|
||||
dev->caps.num_eqs;
|
||||
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
|
||||
cmpt_base +
|
||||
((u64) (MLX4_CMPT_TYPE_EQ *
|
||||
|
@ -874,9 +861,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
|
|||
}
|
||||
|
||||
|
||||
num_eqs = (mlx4_is_master(dev)) ?
|
||||
roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
|
||||
dev->caps.num_eqs;
|
||||
num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
|
||||
dev->caps.num_eqs;
|
||||
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
|
||||
init_hca->eqc_base, dev_cap->eqc_entry_sz,
|
||||
num_eqs, num_eqs, 0, 0);
|
||||
|
|
|
@ -1039,6 +1039,11 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev);
|
|||
void mlx4_free_resource_tracker(struct mlx4_dev *dev,
|
||||
enum mlx4_res_tracker_free_type type);
|
||||
|
||||
int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd);
|
||||
int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
|
@ -1054,6 +1059,11 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
|
|||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd);
|
||||
int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
struct mlx4_cmd_mailbox *outbox,
|
||||
struct mlx4_cmd_info *cmd);
|
||||
int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
|
|
|
@ -126,7 +126,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
|||
profile[MLX4_RES_AUXC].num = request->num_qp;
|
||||
profile[MLX4_RES_SRQ].num = request->num_srq;
|
||||
profile[MLX4_RES_CQ].num = request->num_cq;
|
||||
profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
|
||||
profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ?
|
||||
dev->phys_caps.num_phys_eqs :
|
||||
min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
|
||||
profile[MLX4_RES_DMPT].num = request->num_mpt;
|
||||
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
|
||||
profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
|
||||
|
@ -215,9 +217,10 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
|||
init_hca->log_num_cqs = profile[i].log_num;
|
||||
break;
|
||||
case MLX4_RES_EQ:
|
||||
dev->caps.num_eqs = profile[i].num;
|
||||
dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs,
|
||||
MAX_MSIX));
|
||||
init_hca->eqc_base = profile[i].start;
|
||||
init_hca->log_num_eqs = profile[i].log_num;
|
||||
init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
|
||||
break;
|
||||
case MLX4_RES_DMPT:
|
||||
dev->caps.num_mpts = profile[i].num;
|
||||
|
|
|
@ -979,6 +979,17 @@ static void cp_init_hw (struct cp_private *cp)
|
|||
cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
|
||||
cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
|
||||
|
||||
cpw32_f(HiTxRingAddr, 0);
|
||||
cpw32_f(HiTxRingAddr + 4, 0);
|
||||
|
||||
ring_dma = cp->ring_dma;
|
||||
cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
|
||||
cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
|
||||
|
||||
ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
|
||||
cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
|
||||
cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
|
||||
|
||||
cp_start_hw(cp);
|
||||
cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
|
||||
|
||||
|
@ -992,17 +1003,6 @@ static void cp_init_hw (struct cp_private *cp)
|
|||
|
||||
cpw8(Config5, cpr8(Config5) & PMEStatus);
|
||||
|
||||
cpw32_f(HiTxRingAddr, 0);
|
||||
cpw32_f(HiTxRingAddr + 4, 0);
|
||||
|
||||
ring_dma = cp->ring_dma;
|
||||
cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
|
||||
cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
|
||||
|
||||
ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
|
||||
cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
|
||||
cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
|
||||
|
||||
cpw16(MultiIntr, 0);
|
||||
|
||||
cpw8_f(Cfg9346, Cfg9346_Lock);
|
||||
|
@ -1636,7 +1636,7 @@ static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
|
|||
|
||||
static void eeprom_cmd_end(void __iomem *ee_addr)
|
||||
{
|
||||
writeb (~EE_CS, ee_addr);
|
||||
writeb(0, ee_addr);
|
||||
eeprom_delay ();
|
||||
}
|
||||
|
||||
|
|
|
@ -1173,7 +1173,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l
|
|||
}
|
||||
|
||||
/* Terminate the EEPROM access. */
|
||||
RTL_W8 (Cfg9346, ~EE_CS);
|
||||
RTL_W8(Cfg9346, 0);
|
||||
eeprom_delay ();
|
||||
|
||||
return retval;
|
||||
|
|
|
@ -6345,6 +6345,8 @@ static void __devexit rtl_remove_one(struct pci_dev *pdev)
|
|||
|
||||
cancel_work_sync(&tp->wk.work);
|
||||
|
||||
netif_napi_del(&tp->napi);
|
||||
|
||||
unregister_netdev(dev);
|
||||
|
||||
rtl_release_firmware(tp);
|
||||
|
@ -6668,6 +6670,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
return rc;
|
||||
|
||||
err_out_msi_4:
|
||||
netif_napi_del(&tp->napi);
|
||||
rtl_disable_msi(pdev, tp);
|
||||
iounmap(ioaddr);
|
||||
err_out_free_res_3:
|
||||
|
|
|
@ -629,11 +629,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
|||
return skb->len > 0;
|
||||
}
|
||||
|
||||
static void mcs7830_status(struct usbnet *dev, struct urb *urb)
|
||||
{
|
||||
u8 *buf = urb->transfer_buffer;
|
||||
bool link;
|
||||
|
||||
if (urb->actual_length < 16)
|
||||
return;
|
||||
|
||||
link = !(buf[1] & 0x20);
|
||||
if (netif_carrier_ok(dev->net) != link) {
|
||||
if (link) {
|
||||
netif_carrier_on(dev->net);
|
||||
usbnet_defer_kevent(dev, EVENT_LINK_RESET);
|
||||
} else
|
||||
netif_carrier_off(dev->net);
|
||||
netdev_dbg(dev->net, "Link Status is: %d\n", link);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct driver_info moschip_info = {
|
||||
.description = "MOSCHIP 7830/7832/7730 usb-NET adapter",
|
||||
.bind = mcs7830_bind,
|
||||
.rx_fixup = mcs7830_rx_fixup,
|
||||
.flags = FLAG_ETHER,
|
||||
.flags = FLAG_ETHER | FLAG_LINK_INTR,
|
||||
.status = mcs7830_status,
|
||||
.in = 1,
|
||||
.out = 2,
|
||||
};
|
||||
|
@ -642,7 +662,8 @@ static const struct driver_info sitecom_info = {
|
|||
.description = "Sitecom LN-30 usb-NET adapter",
|
||||
.bind = mcs7830_bind,
|
||||
.rx_fixup = mcs7830_rx_fixup,
|
||||
.flags = FLAG_ETHER,
|
||||
.flags = FLAG_ETHER | FLAG_LINK_INTR,
|
||||
.status = mcs7830_status,
|
||||
.in = 1,
|
||||
.out = 2,
|
||||
};
|
||||
|
|
|
@ -64,6 +64,7 @@ enum {
|
|||
MLX4_MAX_NUM_PF = 16,
|
||||
MLX4_MAX_NUM_VF = 64,
|
||||
MLX4_MFUNC_MAX = 80,
|
||||
MLX4_MAX_EQ_NUM = 1024,
|
||||
MLX4_MFUNC_EQ_NUM = 4,
|
||||
MLX4_MFUNC_MAX_EQES = 8,
|
||||
MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
|
||||
|
@ -239,6 +240,10 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
|
|||
return (major << 32) | (minor << 16) | subminor;
|
||||
}
|
||||
|
||||
struct mlx4_phys_caps {
|
||||
u32 num_phys_eqs;
|
||||
};
|
||||
|
||||
struct mlx4_caps {
|
||||
u64 fw_ver;
|
||||
u32 function;
|
||||
|
@ -499,6 +504,7 @@ struct mlx4_dev {
|
|||
unsigned long flags;
|
||||
unsigned long num_slaves;
|
||||
struct mlx4_caps caps;
|
||||
struct mlx4_phys_caps phys_caps;
|
||||
struct radix_tree_root qp_table_tree;
|
||||
u8 rev_id;
|
||||
char board_id[MLX4_BOARD_ID_LEN];
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include <net/netlabel.h>
|
||||
#include <net/request_sock.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
/* known doi values */
|
||||
#define CIPSO_V4_DOI_UNKNOWN 0x00000000
|
||||
|
@ -285,7 +286,33 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
|
|||
static inline int cipso_v4_validate(const struct sk_buff *skb,
|
||||
unsigned char **option)
|
||||
{
|
||||
return -ENOSYS;
|
||||
unsigned char *opt = *option;
|
||||
unsigned char err_offset = 0;
|
||||
u8 opt_len = opt[1];
|
||||
u8 opt_iter;
|
||||
|
||||
if (opt_len < 8) {
|
||||
err_offset = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (get_unaligned_be32(&opt[2]) == 0) {
|
||||
err_offset = 2;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (opt_iter = 6; opt_iter < opt_len;) {
|
||||
if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
|
||||
err_offset = opt_iter + 1;
|
||||
goto out;
|
||||
}
|
||||
opt_iter += opt[opt_iter + 1];
|
||||
}
|
||||
|
||||
out:
|
||||
*option = opt + err_offset;
|
||||
return err_offset;
|
||||
|
||||
}
|
||||
#endif /* CONFIG_NETLABEL */
|
||||
|
||||
|
|
|
@ -10,23 +10,27 @@
|
|||
#include <linux/jiffies.h>
|
||||
#include <linux/dynamic_queue_limits.h>
|
||||
|
||||
#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0)
|
||||
#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
|
||||
#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
|
||||
|
||||
/* Records completed count and recalculates the queue limit */
|
||||
void dql_completed(struct dql *dql, unsigned int count)
|
||||
{
|
||||
unsigned int inprogress, prev_inprogress, limit;
|
||||
unsigned int ovlimit, all_prev_completed, completed;
|
||||
unsigned int ovlimit, completed, num_queued;
|
||||
bool all_prev_completed;
|
||||
|
||||
num_queued = ACCESS_ONCE(dql->num_queued);
|
||||
|
||||
/* Can't complete more than what's in queue */
|
||||
BUG_ON(count > dql->num_queued - dql->num_completed);
|
||||
BUG_ON(count > num_queued - dql->num_completed);
|
||||
|
||||
completed = dql->num_completed + count;
|
||||
limit = dql->limit;
|
||||
ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit);
|
||||
inprogress = dql->num_queued - completed;
|
||||
ovlimit = POSDIFF(num_queued - dql->num_completed, limit);
|
||||
inprogress = num_queued - completed;
|
||||
prev_inprogress = dql->prev_num_queued - dql->num_completed;
|
||||
all_prev_completed = POSDIFF(completed, dql->prev_num_queued);
|
||||
all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued);
|
||||
|
||||
if ((ovlimit && !inprogress) ||
|
||||
(dql->prev_ovlimit && all_prev_completed)) {
|
||||
|
@ -104,7 +108,7 @@ void dql_completed(struct dql *dql, unsigned int count)
|
|||
dql->prev_ovlimit = ovlimit;
|
||||
dql->prev_last_obj_cnt = dql->last_obj_cnt;
|
||||
dql->num_completed = completed;
|
||||
dql->prev_num_queued = dql->num_queued;
|
||||
dql->prev_num_queued = num_queued;
|
||||
}
|
||||
EXPORT_SYMBOL(dql_completed);
|
||||
|
||||
|
|
|
@ -1592,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
|
|||
gfp_t gfp_mask;
|
||||
long timeo;
|
||||
int err;
|
||||
int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
|
||||
err = -EMSGSIZE;
|
||||
if (npages > MAX_SKB_FRAGS)
|
||||
goto failure;
|
||||
|
||||
gfp_mask = sk->sk_allocation;
|
||||
if (gfp_mask & __GFP_WAIT)
|
||||
|
@ -1610,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
|
|||
if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
|
||||
skb = alloc_skb(header_len, gfp_mask);
|
||||
if (skb) {
|
||||
int npages;
|
||||
int i;
|
||||
|
||||
/* No pages, we're done... */
|
||||
if (!data_len)
|
||||
break;
|
||||
|
||||
npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
skb->truesize += data_len;
|
||||
skb_shinfo(skb)->nr_frags = npages;
|
||||
for (i = 0; i < npages; i++) {
|
||||
|
|
|
@ -377,7 +377,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
|
|||
|
||||
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
|
||||
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
|
||||
sk->sk_protocol, inet_sk_flowi_flags(sk),
|
||||
sk->sk_protocol,
|
||||
inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS,
|
||||
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
|
||||
ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
|
||||
security_req_classify_flow(req, flowi4_to_flowi(fl4));
|
||||
|
|
|
@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
|
|||
*/
|
||||
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
|
||||
struct request_sock *req,
|
||||
struct request_values *rvp)
|
||||
struct request_values *rvp,
|
||||
u16 queue_mapping)
|
||||
{
|
||||
const struct inet_request_sock *ireq = inet_rsk(req);
|
||||
struct flowi4 fl4;
|
||||
|
@ -840,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
|
|||
if (skb) {
|
||||
__tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
|
||||
|
||||
skb_set_queue_mapping(skb, queue_mapping);
|
||||
err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
|
||||
ireq->rmt_addr,
|
||||
ireq->opt);
|
||||
|
@ -854,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
|
|||
struct request_values *rvp)
|
||||
{
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
|
||||
return tcp_v4_send_synack(sk, NULL, req, rvp);
|
||||
return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1422,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
tcp_rsk(req)->snt_synack = tcp_time_stamp;
|
||||
|
||||
if (tcp_v4_send_synack(sk, dst, req,
|
||||
(struct request_values *)&tmp_ext) ||
|
||||
(struct request_values *)&tmp_ext,
|
||||
skb_get_queue_mapping(skb)) ||
|
||||
want_cookie)
|
||||
goto drop_and_free;
|
||||
|
||||
|
|
|
@ -476,7 +476,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
|||
|
||||
|
||||
static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
|
||||
struct request_values *rvp)
|
||||
struct request_values *rvp,
|
||||
u16 queue_mapping)
|
||||
{
|
||||
struct inet6_request_sock *treq = inet6_rsk(req);
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
|
@ -513,6 +514,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
|
|||
__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
|
||||
|
||||
fl6.daddr = treq->rmt_addr;
|
||||
skb_set_queue_mapping(skb, queue_mapping);
|
||||
err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
|
||||
err = net_xmit_eval(err);
|
||||
}
|
||||
|
@ -528,7 +530,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
|
|||
struct request_values *rvp)
|
||||
{
|
||||
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
|
||||
return tcp_v6_send_synack(sk, req, rvp);
|
||||
return tcp_v6_send_synack(sk, req, rvp, 0);
|
||||
}
|
||||
|
||||
static void tcp_v6_reqsk_destructor(struct request_sock *req)
|
||||
|
@ -1213,7 +1215,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
security_inet_conn_request(sk, skb, req);
|
||||
|
||||
if (tcp_v6_send_synack(sk, req,
|
||||
(struct request_values *)&tmp_ext) ||
|
||||
(struct request_values *)&tmp_ext,
|
||||
skb_get_queue_mapping(skb)) ||
|
||||
want_cookie)
|
||||
goto drop_and_free;
|
||||
|
||||
|
|
Loading…
Reference in a new issue