From 30f7c73bed749814f4985b1ce4566fe64b9c25f0 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Wed, 30 May 2012 09:14:50 +0000 Subject: [PATCH 01/20] net/mlx4_core: Fix the slave_id out-of-range test in mlx4_eq_int Ths fixes the comparison in the FLR (Function Level Reset) event case. Signed-off-by: Jack Morgenstein Reviewed-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/eq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 3b6f8efbf141..bce98d9c0039 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -426,7 +426,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); - if (flr_slave > dev->num_slaves) { + if (flr_slave >= dev->num_slaves) { mlx4_warn(dev, "Got FLR for unknown function: %d\n", flr_slave); From 3fc929e2d693185aac2686e5e64e24eae10642a4 Mon Sep 17 00:00:00 2001 From: Marcel Apfelbaum Date: Wed, 30 May 2012 09:14:51 +0000 Subject: [PATCH 02/20] net/mlx4_core: Fix number of EQs used in ICM initialisation In SRIOV mode, the number of EQs used when computing the total ICM size was incorrect. To fix this, we do the following: 1. We add a new structure to mlx4_dev, mlx4_phys_caps, to contain physical HCA capabilities. The PPF uses the phys capabilities when it computes things like ICM size. The dev_caps structure will then contain the paravirtualized values, making bookkeeping much easier in SRIOV mode. We add a structure rather than a single parameter because there will be other fields in the phys_caps. The first field we add to the mlx4_phys_caps structure is num_phys_eqs. 2. In INIT_HCA, when running in SRIOV mode, the "log_num_eqs" parameter passed to the FW is the number of EQs per VF/PF; each function (PF or VF) has this number of EQs available. However, the total number of EQs which must be allowed for in the ICM is (1 << log_num_eqs) * (#VFs + #PFs). Rather than compute this quantity, we allocate ICM space for 1024 EQs (which is the device maximum number of EQs, and which is the value we place in the mlx4_phys_caps structure). For INIT_HCA, however, we use the per-function number of EQs as described above. Signed-off-by: Marcel Apfelbaum Signed-off-by: Jack Morgenstein Reviewed-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 17 +++++------------ drivers/net/ethernet/mellanox/mlx4/profile.c | 9 ++++++--- include/linux/mlx4/device.h | 6 ++++++ 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 2e024a68fa81..2fe9fe5f7cfb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -142,12 +142,6 @@ struct mlx4_port_config { struct pci_dev *pdev; }; -static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev) -{ - return dev->caps.reserved_eqs + - MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1); -} - int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { @@ -217,6 +211,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) } dev->caps.num_ports = dev_cap->num_ports; + dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM; for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.vl_cap[i] = dev_cap->max_vl[i]; dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; @@ -810,9 +805,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, if (err) goto err_srq; - num_eqs = (mlx4_is_master(dev)) ? - roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : - dev->caps.num_eqs; + num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : + dev->caps.num_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, cmpt_base + ((u64) (MLX4_CMPT_TYPE_EQ * @@ -874,9 +868,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, } - num_eqs = (mlx4_is_master(dev)) ? - roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : - dev->caps.num_eqs; + num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : + dev->caps.num_eqs; err = mlx4_init_icm_table(dev, &priv->eq_table.table, init_hca->eqc_base, dev_cap->eqc_entry_sz, num_eqs, num_eqs, 0, 0); diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 06e5adeb76f7..b83bc928d52a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -126,7 +126,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, profile[MLX4_RES_AUXC].num = request->num_qp; profile[MLX4_RES_SRQ].num = request->num_srq; profile[MLX4_RES_CQ].num = request->num_cq; - profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); + profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? + dev->phys_caps.num_phys_eqs : + min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); profile[MLX4_RES_DMPT].num = request->num_mpt; profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg); @@ -215,9 +217,10 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, init_hca->log_num_cqs = profile[i].log_num; break; case MLX4_RES_EQ: - dev->caps.num_eqs = profile[i].num; + dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs, + MAX_MSIX)); init_hca->eqc_base = profile[i].start; - init_hca->log_num_eqs = profile[i].log_num; + init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); break; case MLX4_RES_DMPT: dev->caps.num_mpts = profile[i].num; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6e27fa99e8b9..6a8f002b8ed3 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -64,6 +64,7 @@ enum { MLX4_MAX_NUM_PF = 16, MLX4_MAX_NUM_VF = 64, MLX4_MFUNC_MAX = 80, + MLX4_MAX_EQ_NUM = 1024, MLX4_MFUNC_EQ_NUM = 4, MLX4_MFUNC_MAX_EQES = 8, MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) @@ -239,6 +240,10 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) return (major << 32) | (minor << 16) | subminor; } +struct mlx4_phys_caps { + u32 num_phys_eqs; +}; + struct mlx4_caps { u64 fw_ver; u32 function; @@ -499,6 +504,7 @@ struct mlx4_dev { unsigned long flags; unsigned long num_slaves; struct mlx4_caps caps; + struct mlx4_phys_caps phys_caps; struct radix_tree_root qp_table_tree; u8 rev_id; char board_id[MLX4_BOARD_ID_LEN]; From 13bf58b7604d9adfebb8b7c95e6cfb31ec17c699 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Wed, 30 May 2012 09:14:52 +0000 Subject: [PATCH 03/20] net/mlx4_en: Fix improper use of "port" parameter in mlx4_en_event Port is used as an array index before we know if that is proper. For example, in the catas event case, port is zero; however, the port index should lie in the range (1..2). Fix this by using 'port' only in the events where it is of interest. Test for port out of range in the default (unhandled event) case, and do not output a message if it is not an ethernet port. Signed-off-by: Jack Morgenstein Reviewed-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_main.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 988b2424e1c6..69ba57270481 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -136,13 +136,12 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; struct mlx4_en_priv *priv; - if (!mdev->pndev[port]) - return; - - priv = netdev_priv(mdev->pndev[port]); switch (event) { case MLX4_DEV_EVENT_PORT_UP: case MLX4_DEV_EVENT_PORT_DOWN: + if (!mdev->pndev[port]) + return; + priv = netdev_priv(mdev->pndev[port]); /* To prevent races, we poll the link state in a separate task rather than changing it here */ priv->link_state = event; @@ -154,7 +153,10 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, break; default: - mlx4_warn(mdev, "Unhandled event: %d\n", event); + if (port < 1 || port > dev->caps.num_ports || + !mdev->pndev[port]) + return; + mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port); } } From b91cb3ebcd5ef8db956b8caa486d780dc52b07f1 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Wed, 30 May 2012 09:14:53 +0000 Subject: [PATCH 04/20] net/mlx4_core: Fixes for VF / Guest startup flow - pass the following parameters: - firmware version (added QUERY_FW paravirtualization for that) - disable Blueflame on slaves. KVM disables write combining on guests, and we get better performance without BF in this case. (This requires QUERY_DEV_CAP paravirtualization, also in this commit) - max qp rdma as destination - get rid of a chunk of "if (0)" dead code Signed-off-by: Jack Morgenstein Reviewed-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 4 +- drivers/net/ethernet/mellanox/mlx4/fw.c | 46 +++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/main.c | 17 +++------ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 10 +++++ 4 files changed, 63 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 1bcead1fa2f6..842c8ce9494e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -617,7 +617,7 @@ static struct mlx4_cmd_info cmd_info[] = { .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = NULL + .wrapper = mlx4_QUERY_FW_wrapper }, { .opcode = MLX4_CMD_QUERY_HCA, @@ -635,7 +635,7 @@ static struct mlx4_cmd_info cmd_info[] = { .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = NULL + .wrapper = mlx4_QUERY_DEV_CAP_wrapper }, { .opcode = MLX4_CMD_QUERY_FUNC_CAP, diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 68f5cd6cb3c7..7f2a4a659627 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -669,6 +669,28 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) return err; } +int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err = 0; + u8 field; + + err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + + /* For guests, report Blueflame disabled */ + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); + field &= 0x7f; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); + + return 0; +} + int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -860,6 +882,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) ((fw_ver & 0xffff0000ull) >> 16) | ((fw_ver & 0x0000ffffull) << 16); + if (mlx4_is_slave(dev)) + goto out; + MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); dev->caps.function = lg; @@ -927,6 +952,27 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev) return err; } +int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + u8 *outbuf; + int err; + + outbuf = outbox->buf; + err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + if (err) + return err; + + /* for slaves, zero out everything except FW version */ + outbuf[0] = outbuf[1] = 0; + memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8); + return 0; +} + static void get_board_id(void *vsd, char *board_id) { int i; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 2fe9fe5f7cfb..7f71be0ca5f2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -430,12 +430,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; memset(&dev_cap, 0, sizeof(dev_cap)); + dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; err = mlx4_dev_cap(dev, &dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); return err; } + err = mlx4_QUERY_FW(dev); + if (err) + mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); + page_size = ~dev->caps.page_size_cap + 1; mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); if (page_size > PAGE_SIZE) { @@ -499,18 +504,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) return -ENODEV; } -#if 0 - mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux); - mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n", - dev->caps.num_uars, dev->caps.reserved_uars, - dev->caps.uar_page_size * dev->caps.num_uars, - pci_resource_len(dev->pdev, 2)); - mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs, - dev->caps.reserved_eqs); - mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n", - dev->caps.num_pds, dev->caps.reserved_pds, - dev->caps.slave_pd_shift, dev->caps.pd_base); -#endif return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 86b6e5a2fabf..e5d20220762c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1039,6 +1039,11 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev); void mlx4_free_resource_tracker(struct mlx4_dev *dev, enum mlx4_res_tracker_free_type type); +int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -1054,6 +1059,11 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, From 6230bb234dd17b4c92518e848368ce03f9802323 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Wed, 30 May 2012 09:14:54 +0000 Subject: [PATCH 05/20] net/mlx4_core: Check port out-of-range before using in mlx4_slave_cap The range check was performed after using the port number. Reverse this to prevent a potential array overflow. Signed-off-by: Jack Morgenstein Reviewed-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 7f71be0ca5f2..ee6f4fe00837 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -485,15 +485,15 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) dev->caps.num_mgms = 0; dev->caps.num_amgms = 0; - for (i = 1; i <= dev->caps.num_ports; ++i) - dev->caps.port_mask[i] = dev->caps.port_type[i]; - if (dev->caps.num_ports > MLX4_MAX_PORTS) { mlx4_err(dev, "HCA has %d ports, but we only support %d, " "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); return -ENODEV; } + for (i = 1; i <= dev->caps.num_ports; ++i) + dev->caps.port_mask[i] = dev->caps.port_type[i]; + if (dev->caps.uar_page_size * (dev->caps.num_uars - dev->caps.reserved_uars) > pci_resource_len(dev->pdev, 2)) { From 401453a31ea8192eb94f9337f5608de907681bfb Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Wed, 30 May 2012 09:14:55 +0000 Subject: [PATCH 06/20] net/mlx4_core: Fix obscure mlx4_cmd_box parameter in QUERY_DEV_CAP The "!mlx4_is_slave" is totally confusing. Fix with constant MLX4_CMD_NATIVE, which is the intended behavior. Signed-off-by: Jack Morgenstein Reviewed-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/fw.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 7f2a4a659627..9c83bb8151ea 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -412,7 +412,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) outbox = mailbox->buf; err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, - MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev)); + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) goto out; @@ -590,8 +590,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) for (i = 1; i <= dev_cap->num_ports; ++i) { err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, - MLX4_CMD_TIME_CLASS_B, - !mlx4_is_slave(dev)); + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); if (err) goto out; From 0cfd32b736ae0c36b42697584811042726c07cba Mon Sep 17 00:00:00 2001 From: Hiroaki SHIMODA Date: Wed, 30 May 2012 12:24:39 +0000 Subject: [PATCH 07/20] bql: Fix POSDIFF() to integer overflow aware. POSDIFF() fails to take into account integer overflow case. Signed-off-by: Hiroaki SHIMODA Cc: Tom Herbert Cc: Eric Dumazet Cc: Denys Fedoryshchenko Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- lib/dynamic_queue_limits.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index 6ab4587d052b..c87eb76f2fd4 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -10,7 +10,7 @@ #include #include -#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0) +#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) /* Records completed count and recalculates the queue limit */ void dql_completed(struct dql *dql, unsigned int count) From 25426b794efdc70dde7fd3134dc56fac3e7d562d Mon Sep 17 00:00:00 2001 From: Hiroaki SHIMODA Date: Wed, 30 May 2012 12:25:19 +0000 Subject: [PATCH 08/20] bql: Avoid unneeded limit decrement. When below pattern is observed, TIME dql_queued() dql_completed() | a) initial state | | b) X bytes queued V c) Y bytes queued d) X bytes completed e) Z bytes queued f) Y bytes completed a) dql->limit has already some value and there is no in-flight packet. b) X bytes queued. c) Y bytes queued and excess limit. d) X bytes completed and dql->prev_ovlimit is set and also dql->prev_num_queued is set Y. e) Z bytes queued. f) Y bytes completed. inprogress and prev_inprogress are true. At f), according to the comment, all_prev_completed becomes true and limit should be increased. But POSDIFF() ignores (completed == dql->prev_num_queued) case, so limit is decreased. Signed-off-by: Hiroaki SHIMODA Cc: Tom Herbert Cc: Eric Dumazet Cc: Denys Fedoryshchenko Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- lib/dynamic_queue_limits.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index c87eb76f2fd4..0fafa77f4036 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -11,12 +11,14 @@ #include #define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) +#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0) /* Records completed count and recalculates the queue limit */ void dql_completed(struct dql *dql, unsigned int count) { unsigned int inprogress, prev_inprogress, limit; - unsigned int ovlimit, all_prev_completed, completed; + unsigned int ovlimit, completed; + bool all_prev_completed; /* Can't complete more than what's in queue */ BUG_ON(count > dql->num_queued - dql->num_completed); @@ -26,7 +28,7 @@ void dql_completed(struct dql *dql, unsigned int count) ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit); inprogress = dql->num_queued - completed; prev_inprogress = dql->prev_num_queued - dql->num_completed; - all_prev_completed = POSDIFF(completed, dql->prev_num_queued); + all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued); if ((ovlimit && !inprogress) || (dql->prev_ovlimit && all_prev_completed)) { From 914bec1011a25f65cdc94988a6f974bfb9a3c10d Mon Sep 17 00:00:00 2001 From: Hiroaki SHIMODA Date: Wed, 30 May 2012 12:25:37 +0000 Subject: [PATCH 09/20] bql: Avoid possible inconsistent calculation. dql->num_queued could change while processing dql_completed(). To provide consistent calculation, added an on stack variable. Signed-off-by: Hiroaki SHIMODA Cc: Tom Herbert Cc: Eric Dumazet Cc: Denys Fedoryshchenko Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- lib/dynamic_queue_limits.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index 0fafa77f4036..0777c5a45fa0 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -17,16 +17,18 @@ void dql_completed(struct dql *dql, unsigned int count) { unsigned int inprogress, prev_inprogress, limit; - unsigned int ovlimit, completed; + unsigned int ovlimit, completed, num_queued; bool all_prev_completed; + num_queued = ACCESS_ONCE(dql->num_queued); + /* Can't complete more than what's in queue */ - BUG_ON(count > dql->num_queued - dql->num_completed); + BUG_ON(count > num_queued - dql->num_completed); completed = dql->num_completed + count; limit = dql->limit; - ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit); - inprogress = dql->num_queued - completed; + ovlimit = POSDIFF(num_queued - dql->num_completed, limit); + inprogress = num_queued - completed; prev_inprogress = dql->prev_num_queued - dql->num_completed; all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued); @@ -106,7 +108,7 @@ void dql_completed(struct dql *dql, unsigned int count) dql->prev_ovlimit = ovlimit; dql->prev_last_obj_cnt = dql->last_obj_cnt; dql->num_completed = completed; - dql->prev_num_queued = dql->num_queued; + dql->prev_num_queued = num_queued; } EXPORT_SYMBOL(dql_completed); From cc9b17ad29ecaa20bfe426a8d4dbfb94b13ff1cc Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 30 May 2012 21:18:10 +0000 Subject: [PATCH 10/20] net: sock: validate data_len before allocating skb in sock_alloc_send_pskb() We need to validate the number of pages consumed by data_len, otherwise frags array could be overflowed by userspace. So this patch validate data_len and return -EMSGSIZE when data_len may occupies more frags than MAX_SKB_FRAGS. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- net/core/sock.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index 653f8c0aedc5..9e5b71fda6ec 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1592,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, gfp_t gfp_mask; long timeo; int err; + int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + + err = -EMSGSIZE; + if (npages > MAX_SKB_FRAGS) + goto failure; gfp_mask = sk->sk_allocation; if (gfp_mask & __GFP_WAIT) @@ -1610,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { skb = alloc_skb(header_len, gfp_mask); if (skb) { - int npages; int i; /* No pages, we're done... */ if (!data_len) break; - npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; skb->truesize += data_len; skb_shinfo(skb)->nr_frags = npages; for (i = 0; i < npages; i++) { From 20e2a86485967c385d7c7befc1646e4d1d39362e Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Fri, 1 Jun 2012 05:54:56 +0000 Subject: [PATCH 11/20] cipso: handle CIPSO options correctly when NetLabel is disabled When NetLabel is not enabled, e.g. CONFIG_NETLABEL=n, and the system receives a CIPSO tagged packet it is dropped (cipso_v4_validate() returns non-zero). In most cases this is the correct and desired behavior, however, in the case where we are simply forwarding the traffic, e.g. acting as a network bridge, this becomes a problem. This patch fixes the forwarding problem by providing the basic CIPSO validation code directly in ip_options_compile() without the need for the NetLabel or CIPSO code. The new validation code can not perform any of the CIPSO option label/value verification that cipso_v4_validate() does, but it can verify the basic CIPSO option format. The behavior when NetLabel is enabled is unchanged. Signed-off-by: Paul Moore Signed-off-by: David S. Miller --- include/net/cipso_ipv4.h | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h index 9808877c2ab9..a7a683e30b64 100644 --- a/include/net/cipso_ipv4.h +++ b/include/net/cipso_ipv4.h @@ -42,6 +42,7 @@ #include #include #include +#include /* known doi values */ #define CIPSO_V4_DOI_UNKNOWN 0x00000000 @@ -285,7 +286,33 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb, static inline int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { - return -ENOSYS; + unsigned char *opt = *option; + unsigned char err_offset = 0; + u8 opt_len = opt[1]; + u8 opt_iter; + + if (opt_len < 8) { + err_offset = 1; + goto out; + } + + if (get_unaligned_be32(&opt[2]) == 0) { + err_offset = 2; + goto out; + } + + for (opt_iter = 6; opt_iter < opt_len;) { + if (opt[opt_iter + 1] > (opt_len - opt_iter)) { + err_offset = opt_iter + 1; + goto out; + } + opt_iter += opt[opt_iter + 1]; + } + +out: + *option = opt + err_offset; + return err_offset; + } #endif /* CONFIG_NETLABEL */ From b01af4579ec41f48e9b9c774e70bd6474ad210db Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 31 May 2012 18:19:39 +0000 Subject: [PATCH 12/20] 8139cp: set ring address before enabling receiver Currently, we enable the receiver before setting the ring address which could lead the card DMA into unexpected areas. Solving this by set the ring address before enabling the receiver. btw. I find and test this in qemu as I didn't have a 8139cp card in hand. please review it carefully. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/8139cp.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 5eef290997f9..7f08779acba4 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -979,6 +979,17 @@ static void cp_init_hw (struct cp_private *cp) cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); + cpw32_f(HiTxRingAddr, 0); + cpw32_f(HiTxRingAddr + 4, 0); + + ring_dma = cp->ring_dma; + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); + + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); + cp_start_hw(cp); cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ @@ -992,17 +1003,6 @@ static void cp_init_hw (struct cp_private *cp) cpw8(Config5, cpr8(Config5) & PMEStatus); - cpw32_f(HiTxRingAddr, 0); - cpw32_f(HiTxRingAddr + 4, 0); - - ring_dma = cp->ring_dma; - cpw32_f(RxRingAddr, ring_dma & 0xffffffff); - cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); - - ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; - cpw32_f(TxRingAddr, ring_dma & 0xffffffff); - cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); - cpw16(MultiIntr, 0); cpw8_f(Cfg9346, Cfg9346_Lock); From 0bc777bca480357941418952cf228484f5485daf Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Thu, 31 May 2012 18:19:48 +0000 Subject: [PATCH 13/20] 8139cp/8139too: terminate the eeprom access with the right opmode Currently, we terminate the eeprom access through clearing the CS by: RTL_W8 (Cfg9346, ~EE_CS); or writeb (~EE_CS, ee_addr); This would left the eeprom into "Config. Register Write Enable:" state which is not expcted as the highest two bits were set to 0x11 ( expected is the "Normal" mode (0x00)). Solving this by write 0x0 instead of ~EE_CS when terminating the eeprom access. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/8139cp.c | 2 +- drivers/net/ethernet/realtek/8139too.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 7f08779acba4..995d0cfc4c06 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -1636,7 +1636,7 @@ static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len) static void eeprom_cmd_end(void __iomem *ee_addr) { - writeb (~EE_CS, ee_addr); + writeb(0, ee_addr); eeprom_delay (); } diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 03df076ed596..1d83565cc6af 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -1173,7 +1173,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l } /* Terminate the EEPROM access. */ - RTL_W8 (Cfg9346, ~EE_CS); + RTL_W8(Cfg9346, 0); eeprom_delay (); return retval; From 7433819a1eefd4e74711fffd6d54e30a644ef240 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 31 May 2012 21:00:26 +0000 Subject: [PATCH 14/20] tcp: do not create inetpeer on SYNACK message Another problem on SYNFLOOD/DDOS attack is the inetpeer cache getting larger and larger, using lots of memory and cpu time. tcp_v4_send_synack() ->inet_csk_route_req() ->ip_route_output_flow() ->rt_set_nexthop() ->rt_init_metrics() ->inet_getpeer( create = true) This is a side effect of commit a4daad6b09230 (net: Pre-COW metrics for TCP) added in 2.6.39 Possible solution : Instruct inet_csk_route_req() to remove FLOWI_FLAG_PRECOW_METRICS Before patch : # grep peer /proc/slabinfo inet_peer_cache 4175430 4175430 192 42 2 : tunables 0 0 0 : slabdata 99415 99415 0 Samples: 41K of event 'cycles', Event count (approx.): 30716565122 + 20,24% ksoftirqd/0 [kernel.kallsyms] [k] inet_getpeer + 8,19% ksoftirqd/0 [kernel.kallsyms] [k] peer_avl_rebalance.isra.1 + 4,81% ksoftirqd/0 [kernel.kallsyms] [k] sha_transform + 3,64% ksoftirqd/0 [kernel.kallsyms] [k] fib_table_lookup + 2,36% ksoftirqd/0 [ixgbe] [k] ixgbe_poll + 2,16% ksoftirqd/0 [kernel.kallsyms] [k] __ip_route_output_key + 2,11% ksoftirqd/0 [kernel.kallsyms] [k] kernel_map_pages + 2,11% ksoftirqd/0 [kernel.kallsyms] [k] ip_route_input_common + 2,01% ksoftirqd/0 [kernel.kallsyms] [k] __inet_lookup_established + 1,83% ksoftirqd/0 [kernel.kallsyms] [k] md5_transform + 1,75% ksoftirqd/0 [kernel.kallsyms] [k] check_leaf.isra.9 + 1,49% ksoftirqd/0 [kernel.kallsyms] [k] ipt_do_table + 1,46% ksoftirqd/0 [kernel.kallsyms] [k] hrtimer_interrupt + 1,45% ksoftirqd/0 [kernel.kallsyms] [k] kmem_cache_alloc + 1,29% ksoftirqd/0 [kernel.kallsyms] [k] inet_csk_search_req + 1,29% ksoftirqd/0 [kernel.kallsyms] [k] __netif_receive_skb + 1,16% ksoftirqd/0 [kernel.kallsyms] [k] copy_user_generic_string + 1,15% ksoftirqd/0 [kernel.kallsyms] [k] kmem_cache_free + 1,02% ksoftirqd/0 [kernel.kallsyms] [k] tcp_make_synack + 0,93% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_lock_bh + 0,87% ksoftirqd/0 [kernel.kallsyms] [k] __call_rcu + 0,84% ksoftirqd/0 [kernel.kallsyms] [k] rt_garbage_collect + 0,84% ksoftirqd/0 [kernel.kallsyms] [k] fib_rules_lookup Signed-off-by: Eric Dumazet Cc: Hans Schillstrom Cc: Jesper Dangaard Brouer Cc: Neal Cardwell Cc: Tom Herbert Signed-off-by: David S. Miller --- net/ipv4/inet_connection_sock.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 95e61596e605..f9ee7417f6a0 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -377,7 +377,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk, flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, - sk->sk_protocol, inet_sk_flowi_flags(sk), + sk->sk_protocol, + inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS, (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); security_req_classify_flow(req, flowi4_to_flowi(fl4)); From fff3269907897ee91406ece125795f53e722677e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 1 Jun 2012 01:47:50 +0000 Subject: [PATCH 15/20] tcp: reflect SYN queue_mapping into SYNACK packets While testing how linux behaves on SYNFLOOD attack on multiqueue device (ixgbe), I found that SYNACK messages were dropped at Qdisc level because we send them all on a single queue. Obvious choice is to reflect incoming SYN packet @queue_mapping to SYNACK packet. Under stress, my machine could only send 25.000 SYNACK per second (for 200.000 incoming SYN per second). NIC : ixgbe with 16 rx/tx queues. After patch, not a single SYNACK is dropped. Signed-off-by: Eric Dumazet Cc: Hans Schillstrom Cc: Jesper Dangaard Brouer Cc: Neal Cardwell Cc: Tom Herbert Signed-off-by: David S. Miller --- net/ipv4/tcp_ipv4.c | 9 ++++++--- net/ipv6/tcp_ipv6.c | 9 ++++++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a43b87dfe800..c8d28c433b2b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, */ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, struct request_sock *req, - struct request_values *rvp) + struct request_values *rvp, + u16 queue_mapping) { const struct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; @@ -840,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, if (skb) { __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); + skb_set_queue_mapping(skb, queue_mapping); err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, ireq->rmt_addr, ireq->opt); @@ -854,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, struct request_values *rvp) { TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); - return tcp_v4_send_synack(sk, NULL, req, rvp); + return tcp_v4_send_synack(sk, NULL, req, rvp, 0); } /* @@ -1422,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) tcp_rsk(req)->snt_synack = tcp_time_stamp; if (tcp_v4_send_synack(sk, dst, req, - (struct request_values *)&tmp_ext) || + (struct request_values *)&tmp_ext, + skb_get_queue_mapping(skb)) || want_cookie) goto drop_and_free; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 554d5999abc4..3a9aec29581a 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -476,7 +476,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, - struct request_values *rvp) + struct request_values *rvp, + u16 queue_mapping) { struct inet6_request_sock *treq = inet6_rsk(req); struct ipv6_pinfo *np = inet6_sk(sk); @@ -513,6 +514,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); fl6.daddr = treq->rmt_addr; + skb_set_queue_mapping(skb, queue_mapping); err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); err = net_xmit_eval(err); } @@ -528,7 +530,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, struct request_values *rvp) { TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); - return tcp_v6_send_synack(sk, req, rvp); + return tcp_v6_send_synack(sk, req, rvp, 0); } static void tcp_v6_reqsk_destructor(struct request_sock *req) @@ -1213,7 +1215,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) security_inet_conn_request(sk, skb, req); if (tcp_v6_send_synack(sk, req, - (struct request_values *)&tmp_ext) || + (struct request_values *)&tmp_ext, + skb_get_queue_mapping(skb)) || want_cookie) goto drop_and_free; From ad1be8d345416a794dea39761a374032aa471a76 Mon Sep 17 00:00:00 2001 From: Devendra Naga Date: Thu, 31 May 2012 01:51:20 +0000 Subject: [PATCH 16/20] r8169: call netif_napi_del at errpaths and at driver unload when register_netdev fails, the init'ed NAPIs by netif_napi_add must be deleted with netif_napi_del, and also when driver unloads, it should delete the NAPI before unregistering netdevice using unregister_netdev. Signed-off-by: Devendra Naga Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 00b4f56a671c..9757ce3543a0 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6345,6 +6345,8 @@ static void __devexit rtl_remove_one(struct pci_dev *pdev) cancel_work_sync(&tp->wk.work); + netif_napi_del(&tp->napi); + unregister_netdev(dev); rtl_release_firmware(tp); @@ -6668,6 +6670,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return rc; err_out_msi_4: + netif_napi_del(&tp->napi); rtl_disable_msi(pdev, tp); iounmap(ioaddr); err_out_free_res_3: From 281a8f2462fe3cd2395902955205621e2c519464 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 15 May 2012 09:18:55 +0000 Subject: [PATCH 17/20] e1000: look into the page instead of skb->data for e1000_tbi_adjust_stats() This is another fixup where the data is not transfered into buffer addressed by skb->data but into a page. Signed-off-by: Sebastian Andrzej Siewior Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000/e1000_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 95731c841044..7483ca0a6282 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -4080,7 +4080,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, spin_lock_irqsave(&adapter->stats_lock, irq_flags); e1000_tbi_adjust_stats(hw, &adapter->stats, - length, skb->data); + length, mapped); spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); length--; From 6d7407bfba0b4eb21d843ff1f9e9c86156e502b2 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Thu, 10 May 2012 02:51:17 +0000 Subject: [PATCH 18/20] e1000e: fix Rapid Start Technology support for i217 The definition of I217_PROXY_CTRL must use the BM_PHY_REG() macro instead of the PHY_REG() macro for PHY page 800 register 70 since it is for a PHY register greater than the maximum allowed by the latter macro, and fix a typo setting the I217_MEMPWR register in e1000_suspend_workarounds_ich8lan. Also for clarity, rename a few defines as bit definitions instead of masks. Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ich8lan.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index bbf70ba367da..238ab2f8a5e7 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -165,14 +165,14 @@ #define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */ /* Intel Rapid Start Technology Support */ -#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70) +#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70) #define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 #define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) -#define I217_SxCTRL_MASK 0x1000 +#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000 #define I217_CGFREG PHY_REG(772, 29) -#define I217_CGFREG_MASK 0x0002 +#define I217_CGFREG_ENABLE_MTA_RESET 0x0002 #define I217_MEMPWR PHY_REG(772, 26) -#define I217_MEMPWR_MASK 0x0010 +#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 /* Strapping Option Register - RO */ #define E1000_STRAP 0x0000C @@ -4089,12 +4089,12 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) * power good. */ e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); - phy_reg |= I217_SxCTRL_MASK; + phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); /* Disable the SMB release on LCD reset. */ e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); - phy_reg &= ~I217_MEMPWR; + phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); } @@ -4103,7 +4103,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) * Support */ e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); - phy_reg |= I217_CGFREG_MASK; + phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; e1e_wphy_locked(hw, I217_CGFREG, phy_reg); release: @@ -4176,7 +4176,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); if (ret_val) goto release; - phy_reg |= I217_MEMPWR_MASK; + phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); /* Disable Proxy */ @@ -4186,7 +4186,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); if (ret_val) goto release; - phy_reg &= ~I217_CGFREG_MASK; + phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; e1e_wphy_locked(hw, I217_CGFREG, phy_reg); release: if (ret_val) From b1ff4f96fd1c63890d78d8939c6e0f2b44ce3113 Mon Sep 17 00:00:00 2001 From: Ondrej Zary Date: Fri, 1 Jun 2012 10:29:08 +0000 Subject: [PATCH 19/20] mcs7830: Implement link state detection Add .status callback that detects link state changes. Tested with MCS7832CV-AA chip (9710:7830, identified as rev.C by the driver). Fixes https://bugzilla.kernel.org/show_bug.cgi?id=28532 Signed-off-by: Ondrej Zary Signed-off-by: David S. Miller --- drivers/net/usb/mcs7830.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index add1064f755d..03c2d8d653df 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -629,11 +629,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb) return skb->len > 0; } +static void mcs7830_status(struct usbnet *dev, struct urb *urb) +{ + u8 *buf = urb->transfer_buffer; + bool link; + + if (urb->actual_length < 16) + return; + + link = !(buf[1] & 0x20); + if (netif_carrier_ok(dev->net) != link) { + if (link) { + netif_carrier_on(dev->net); + usbnet_defer_kevent(dev, EVENT_LINK_RESET); + } else + netif_carrier_off(dev->net); + netdev_dbg(dev->net, "Link Status is: %d\n", link); + } +} + static const struct driver_info moschip_info = { .description = "MOSCHIP 7830/7832/7730 usb-NET adapter", .bind = mcs7830_bind, .rx_fixup = mcs7830_rx_fixup, - .flags = FLAG_ETHER, + .flags = FLAG_ETHER | FLAG_LINK_INTR, + .status = mcs7830_status, .in = 1, .out = 2, }; @@ -642,7 +662,8 @@ static const struct driver_info sitecom_info = { .description = "Sitecom LN-30 usb-NET adapter", .bind = mcs7830_bind, .rx_fixup = mcs7830_rx_fixup, - .flags = FLAG_ETHER, + .flags = FLAG_ETHER | FLAG_LINK_INTR, + .status = mcs7830_status, .in = 1, .out = 2, }; From 9ca3cc6f3026946ba655e863ca2096339e667639 Mon Sep 17 00:00:00 2001 From: Stephan Gatzka Date: Sat, 2 Jun 2012 03:04:06 +0000 Subject: [PATCH 20/20] fec_mpc52xx: fix timestamp filtering skb_defer_rx_timestamp was called with a freshly allocated skb but must be called with rskb instead. Signed-off-by: Stephan Gatzka Cc: stable Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_mpc52xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 97f947b3d94a..2933d08b036e 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -437,7 +437,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) length = status & BCOM_FEC_RX_BD_LEN_MASK; skb_put(rskb, length - 4); /* length without CRC32 */ rskb->protocol = eth_type_trans(rskb, dev); - if (!skb_defer_rx_timestamp(skb)) + if (!skb_defer_rx_timestamp(rskb)) netif_rx(rskb); spin_lock(&priv->lock);