Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (27 commits)
  vlan: allow nested vlan_do_receive()
  ipv6: fix route lookup in addrconf_prefix_rcv()
  bonding: eliminate bond_close race conditions
  qlcnic: fix beacon and LED test.
  qlcnic: Updated License file
  qlcnic: updated reset sequence
  qlcnic: reset loopback mode if promiscous mode setting fails.
  qlcnic: skip IDC ack check in fw reset path.
  i825xx: Fix incorrect dependency for BVME6000_NET
  ipv6: fix route error binding peer in func icmp6_dst_alloc
  ipv6: fix error propagation in ip6_ufo_append_data()
  stmmac: update normal descriptor structure (v2)
  stmmac: fix NULL pointer dereference in capabilities fixup (v2)
  stmmac: fix a bug while checking the HW cap reg (v2)
  be2net: Changing MAC Address of a VF was broken.
  be2net: Refactored be_cmds.c file.
  bnx2x: update driver version to 1.70.30-0
  bnx2x: use FW 7.0.29.0
  bnx2x: Enable changing speed when port type is PORT_DA
  bnx2x: Fix 54618se LED behavior
  ...
This commit is contained in:
Linus Torvalds 2011-10-31 15:22:44 -07:00
commit 1a4ceab195
39 changed files with 634 additions and 567 deletions

View file

@ -1,61 +1,22 @@
Copyright (c) 2009-2010 QLogic Corporation Copyright (c) 2009-2011 QLogic Corporation
QLogic Linux qlcnic NIC Driver QLogic Linux qlcnic NIC Driver
This program includes a device driver for Linux 2.6 that may be
distributed with QLogic hardware specific firmware binary file.
You may modify and redistribute the device driver code under the You may modify and redistribute the device driver code under the
GNU General Public License (a copy of which is attached hereto as GNU General Public License (a copy of which is attached hereto as
Exhibit A) published by the Free Software Foundation (version 2). Exhibit A) published by the Free Software Foundation (version 2).
You may redistribute the hardware specific firmware binary file
under the following terms:
1. Redistribution of source code (only if applicable),
must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistribution in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
3. The name of QLogic Corporation may not be used to
endorse or promote products derived from this software
without specific prior written permission
REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
COMBINATION WITH THIS PROGRAM.
EXHIBIT A EXHIBIT A
GNU GENERAL PUBLIC LICENSE GNU GENERAL PUBLIC LICENSE
Version 2, June 1991 Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc. Copyright (C) 1989, 1991 Free Software Foundation, Inc.
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed. of this license document, but changing it is not allowed.
Preamble Preamble
The licenses for most software are designed to take away your The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public freedom to share and change it. By contrast, the GNU General Public
@ -105,7 +66,7 @@ patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and The precise terms and conditions for copying, distribution and
modification follow. modification follow.
GNU GENERAL PUBLIC LICENSE GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains 0. This License applies to any program or other work which contains
@ -304,7 +265,7 @@ make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally. of promoting the sharing and reuse of software generally.
NO WARRANTY NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN

View file

@ -2110,9 +2110,6 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
read_lock(&bond->lock); read_lock(&bond->lock);
if (bond->kill_timers)
goto out;
//check if there are any slaves //check if there are any slaves
if (bond->slave_cnt == 0) if (bond->slave_cnt == 0)
goto re_arm; goto re_arm;
@ -2161,9 +2158,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
} }
re_arm: re_arm:
if (!bond->kill_timers) queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
} }

View file

@ -1343,10 +1343,6 @@ void bond_alb_monitor(struct work_struct *work)
read_lock(&bond->lock); read_lock(&bond->lock);
if (bond->kill_timers) {
goto out;
}
if (bond->slave_cnt == 0) { if (bond->slave_cnt == 0) {
bond_info->tx_rebalance_counter = 0; bond_info->tx_rebalance_counter = 0;
bond_info->lp_counter = 0; bond_info->lp_counter = 0;
@ -1401,10 +1397,13 @@ void bond_alb_monitor(struct work_struct *work)
/* /*
* dev_set_promiscuity requires rtnl and * dev_set_promiscuity requires rtnl and
* nothing else. * nothing else. Avoid race with bond_close.
*/ */
read_unlock(&bond->lock); read_unlock(&bond->lock);
rtnl_lock(); if (!rtnl_trylock()) {
read_lock(&bond->lock);
goto re_arm;
}
bond_info->rlb_promisc_timeout_counter = 0; bond_info->rlb_promisc_timeout_counter = 0;
@ -1440,9 +1439,8 @@ void bond_alb_monitor(struct work_struct *work)
} }
re_arm: re_arm:
if (!bond->kill_timers) queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
} }

View file

@ -773,9 +773,6 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
read_lock(&bond->lock); read_lock(&bond->lock);
if (bond->kill_timers)
goto out;
/* rejoin all groups on bond device */ /* rejoin all groups on bond device */
__bond_resend_igmp_join_requests(bond->dev); __bond_resend_igmp_join_requests(bond->dev);
@ -789,9 +786,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
__bond_resend_igmp_join_requests(vlan_dev); __bond_resend_igmp_join_requests(vlan_dev);
} }
if ((--bond->igmp_retrans > 0) && !bond->kill_timers) if (--bond->igmp_retrans > 0)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
} }
@ -2517,10 +2514,11 @@ void bond_mii_monitor(struct work_struct *work)
struct bonding *bond = container_of(work, struct bonding, struct bonding *bond = container_of(work, struct bonding,
mii_work.work); mii_work.work);
bool should_notify_peers = false; bool should_notify_peers = false;
unsigned long delay;
read_lock(&bond->lock); read_lock(&bond->lock);
if (bond->kill_timers)
goto out; delay = msecs_to_jiffies(bond->params.miimon);
if (bond->slave_cnt == 0) if (bond->slave_cnt == 0)
goto re_arm; goto re_arm;
@ -2529,7 +2527,15 @@ void bond_mii_monitor(struct work_struct *work)
if (bond_miimon_inspect(bond)) { if (bond_miimon_inspect(bond)) {
read_unlock(&bond->lock); read_unlock(&bond->lock);
rtnl_lock();
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
read_lock(&bond->lock);
delay = 1;
should_notify_peers = false;
goto re_arm;
}
read_lock(&bond->lock); read_lock(&bond->lock);
bond_miimon_commit(bond); bond_miimon_commit(bond);
@ -2540,14 +2546,18 @@ void bond_mii_monitor(struct work_struct *work)
} }
re_arm: re_arm:
if (bond->params.miimon && !bond->kill_timers) if (bond->params.miimon)
queue_delayed_work(bond->wq, &bond->mii_work, queue_delayed_work(bond->wq, &bond->mii_work, delay);
msecs_to_jiffies(bond->params.miimon));
out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
if (should_notify_peers) { if (should_notify_peers) {
rtnl_lock(); if (!rtnl_trylock()) {
read_lock(&bond->lock);
bond->send_peer_notif++;
read_unlock(&bond->lock);
return;
}
netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
rtnl_unlock(); rtnl_unlock();
} }
@ -2789,9 +2799,6 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
if (bond->kill_timers)
goto out;
if (bond->slave_cnt == 0) if (bond->slave_cnt == 0)
goto re_arm; goto re_arm;
@ -2888,9 +2895,9 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
} }
re_arm: re_arm:
if (bond->params.arp_interval && !bond->kill_timers) if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
} }
@ -3131,9 +3138,6 @@ void bond_activebackup_arp_mon(struct work_struct *work)
read_lock(&bond->lock); read_lock(&bond->lock);
if (bond->kill_timers)
goto out;
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
if (bond->slave_cnt == 0) if (bond->slave_cnt == 0)
@ -3143,7 +3147,15 @@ void bond_activebackup_arp_mon(struct work_struct *work)
if (bond_ab_arp_inspect(bond, delta_in_ticks)) { if (bond_ab_arp_inspect(bond, delta_in_ticks)) {
read_unlock(&bond->lock); read_unlock(&bond->lock);
rtnl_lock();
/* Race avoidance with bond_close flush of workqueue */
if (!rtnl_trylock()) {
read_lock(&bond->lock);
delta_in_ticks = 1;
should_notify_peers = false;
goto re_arm;
}
read_lock(&bond->lock); read_lock(&bond->lock);
bond_ab_arp_commit(bond, delta_in_ticks); bond_ab_arp_commit(bond, delta_in_ticks);
@ -3156,13 +3168,18 @@ void bond_activebackup_arp_mon(struct work_struct *work)
bond_ab_arp_probe(bond); bond_ab_arp_probe(bond);
re_arm: re_arm:
if (bond->params.arp_interval && !bond->kill_timers) if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out:
read_unlock(&bond->lock); read_unlock(&bond->lock);
if (should_notify_peers) { if (should_notify_peers) {
rtnl_lock(); if (!rtnl_trylock()) {
read_lock(&bond->lock);
bond->send_peer_notif++;
read_unlock(&bond->lock);
return;
}
netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
rtnl_unlock(); rtnl_unlock();
} }
@ -3424,8 +3441,6 @@ static int bond_open(struct net_device *bond_dev)
struct slave *slave; struct slave *slave;
int i; int i;
bond->kill_timers = 0;
/* reset slave->backup and slave->inactive */ /* reset slave->backup and slave->inactive */
read_lock(&bond->lock); read_lock(&bond->lock);
if (bond->slave_cnt > 0) { if (bond->slave_cnt > 0) {
@ -3494,33 +3509,30 @@ static int bond_close(struct net_device *bond_dev)
bond->send_peer_notif = 0; bond->send_peer_notif = 0;
/* signal timers not to re-arm */
bond->kill_timers = 1;
write_unlock_bh(&bond->lock); write_unlock_bh(&bond->lock);
if (bond->params.miimon) { /* link check interval, in milliseconds. */ if (bond->params.miimon) { /* link check interval, in milliseconds. */
cancel_delayed_work(&bond->mii_work); cancel_delayed_work_sync(&bond->mii_work);
} }
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
cancel_delayed_work(&bond->arp_work); cancel_delayed_work_sync(&bond->arp_work);
} }
switch (bond->params.mode) { switch (bond->params.mode) {
case BOND_MODE_8023AD: case BOND_MODE_8023AD:
cancel_delayed_work(&bond->ad_work); cancel_delayed_work_sync(&bond->ad_work);
break; break;
case BOND_MODE_TLB: case BOND_MODE_TLB:
case BOND_MODE_ALB: case BOND_MODE_ALB:
cancel_delayed_work(&bond->alb_work); cancel_delayed_work_sync(&bond->alb_work);
break; break;
default: default:
break; break;
} }
if (delayed_work_pending(&bond->mcast_work)) if (delayed_work_pending(&bond->mcast_work))
cancel_delayed_work(&bond->mcast_work); cancel_delayed_work_sync(&bond->mcast_work);
if (bond_is_lb(bond)) { if (bond_is_lb(bond)) {
/* Must be called only after all /* Must be called only after all
@ -4367,26 +4379,22 @@ static void bond_setup(struct net_device *bond_dev)
static void bond_work_cancel_all(struct bonding *bond) static void bond_work_cancel_all(struct bonding *bond)
{ {
write_lock_bh(&bond->lock);
bond->kill_timers = 1;
write_unlock_bh(&bond->lock);
if (bond->params.miimon && delayed_work_pending(&bond->mii_work)) if (bond->params.miimon && delayed_work_pending(&bond->mii_work))
cancel_delayed_work(&bond->mii_work); cancel_delayed_work_sync(&bond->mii_work);
if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work)) if (bond->params.arp_interval && delayed_work_pending(&bond->arp_work))
cancel_delayed_work(&bond->arp_work); cancel_delayed_work_sync(&bond->arp_work);
if (bond->params.mode == BOND_MODE_ALB && if (bond->params.mode == BOND_MODE_ALB &&
delayed_work_pending(&bond->alb_work)) delayed_work_pending(&bond->alb_work))
cancel_delayed_work(&bond->alb_work); cancel_delayed_work_sync(&bond->alb_work);
if (bond->params.mode == BOND_MODE_8023AD && if (bond->params.mode == BOND_MODE_8023AD &&
delayed_work_pending(&bond->ad_work)) delayed_work_pending(&bond->ad_work))
cancel_delayed_work(&bond->ad_work); cancel_delayed_work_sync(&bond->ad_work);
if (delayed_work_pending(&bond->mcast_work)) if (delayed_work_pending(&bond->mcast_work))
cancel_delayed_work(&bond->mcast_work); cancel_delayed_work_sync(&bond->mcast_work);
} }
/* /*

View file

@ -222,7 +222,6 @@ struct bonding {
struct slave *); struct slave *);
rwlock_t lock; rwlock_t lock;
rwlock_t curr_slave_lock; rwlock_t curr_slave_lock;
s8 kill_timers;
u8 send_peer_notif; u8 send_peer_notif;
s8 setup_by_slave; s8 setup_by_slave;
s8 igmp_retrans; s8 igmp_retrans;

View file

@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */ * (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */ /* #define BNX2X_STOP_ON_ERROR */
#define DRV_MODULE_VERSION "1.70.00-0" #define DRV_MODULE_VERSION "1.70.30-0"
#define DRV_MODULE_RELDATE "2011/06/13" #define DRV_MODULE_RELDATE "2011/10/25"
#define BNX2X_BC_VER 0x040200 #define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB) #if defined(CONFIG_DCB)

View file

@ -329,6 +329,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY; PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
break; break;
case PORT_FIBRE: case PORT_FIBRE:
case PORT_DA:
if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE) if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
break; /* no port change */ break; /* no port change */

View file

@ -2550,7 +2550,7 @@ struct host_func_stats {
#define BCM_5710_FW_MAJOR_VERSION 7 #define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 0 #define BCM_5710_FW_MINOR_VERSION 0
#define BCM_5710_FW_REVISION_VERSION 23 #define BCM_5710_FW_REVISION_VERSION 29
#define BCM_5710_FW_ENGINEERING_VERSION 0 #define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1 #define BCM_5710_FW_COMPILE_FLAGS 1

View file

@ -45,6 +45,9 @@
#define MCPR_IMC_COMMAND_READ_OP 1 #define MCPR_IMC_COMMAND_READ_OP 1
#define MCPR_IMC_COMMAND_WRITE_OP 2 #define MCPR_IMC_COMMAND_WRITE_OP 2
/* LED Blink rate that will achieve ~15.9Hz */
#define LED_BLINK_RATE_VAL_E3 354
#define LED_BLINK_RATE_VAL_E1X_E2 480
/***********************************************************/ /***********************************************************/
/* Shortcut definitions */ /* Shortcut definitions */
/***********************************************************/ /***********************************************************/
@ -258,6 +261,7 @@
#define MAX_PACKET_SIZE (9700) #define MAX_PACKET_SIZE (9700)
#define WC_UC_TIMEOUT 100 #define WC_UC_TIMEOUT 100
#define MAX_KR_LINK_RETRY 4
/**********************************************************/ /**********************************************************/
/* INTERFACE */ /* INTERFACE */
@ -1490,6 +1494,18 @@ static void bnx2x_set_xumac_nig(struct link_params *params,
NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
} }
static void bnx2x_umac_disable(struct link_params *params)
{
u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
struct bnx2x *bp = params->bp;
if (!(REG_RD(bp, MISC_REG_RESET_REG_2) &
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)))
return;
/* Disable RX and TX */
REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, 0);
}
static void bnx2x_umac_enable(struct link_params *params, static void bnx2x_umac_enable(struct link_params *params,
struct link_vars *vars, u8 lb) struct link_vars *vars, u8 lb)
{ {
@ -1599,8 +1615,9 @@ static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
} }
/* Define the XMAC mode */ /* Define the XMAC mode */
static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed) static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
{ {
struct bnx2x *bp = params->bp;
u32 is_port4mode = bnx2x_is_4_port_mode(bp); u32 is_port4mode = bnx2x_is_4_port_mode(bp);
/** /**
@ -1610,7 +1627,8 @@ static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed)
* ports of the path * ports of the path
**/ **/
if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) & if ((CHIP_NUM(bp) == CHIP_NUM_57840) &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC)) { MISC_REGISTERS_RESET_REG_2_XMAC)) {
DP(NETIF_MSG_LINK, DP(NETIF_MSG_LINK,
"XMAC already out of reset in 4-port mode\n"); "XMAC already out of reset in 4-port mode\n");
@ -1677,10 +1695,6 @@ static void bnx2x_xmac_disable(struct link_params *params)
(pfc_ctrl | (1<<1))); (pfc_ctrl | (1<<1)));
DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
usleep_range(1000, 1000);
bnx2x_set_xumac_nig(params, 0, 0);
REG_WR(bp, xmac_base + XMAC_REG_CTRL,
XMAC_CTRL_REG_SOFT_RESET);
} }
} }
@ -1693,7 +1707,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
bnx2x_xmac_init(bp, vars->line_speed); bnx2x_xmac_init(params, vars->line_speed);
/* /*
* This register determines on which events the MAC will assert * This register determines on which events the MAC will assert
@ -3575,6 +3589,11 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
u16 val16 = 0, lane, bam37 = 0; u16 val16 = 0, lane, bam37 = 0;
struct bnx2x *bp = params->bp; struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
/* Disable Autoneg: re-enable it after adv is done. */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
/* Check adding advertisement for 1G KX */ /* Check adding advertisement for 1G KX */
if (((vars->line_speed == SPEED_AUTO_NEG) && if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
@ -3616,9 +3635,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL, MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL,
0x03f0); 0x03f0);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
0x383f);
/* Advertised speeds */ /* Advertised speeds */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
@ -3645,19 +3661,22 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Advertise pause */ /* Advertise pause */
bnx2x_ext_phy_set_pause(params, phy, vars); bnx2x_ext_phy_set_pause(params, phy, vars);
/* Enable Autoneg */ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000);
/* Over 1G - AN local device user page 1 */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC7, &val16); MDIO_WC_REG_DIGITAL5_MISC7, &val16);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100); MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
/* Over 1G - AN local device user page 1 */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
/* Enable Autoneg */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000);
} }
static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
@ -4126,6 +4145,85 @@ static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
else else
return 0; return 0;
} }
static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy,
struct link_params *params)
{
u16 gp2_status_reg0, lane;
struct bnx2x *bp = params->bp;
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_GP2_STATUS_GP_2_0,
&gp2_status_reg0);
return (gp2_status_reg0 >> (8+lane)) & 0x1;
}
static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u32 serdes_net_if;
u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
u16 lane = bnx2x_get_warpcore_lane(phy, params);
vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
if (!vars->turn_to_run_wc_rt)
return;
/* return if there is no link partner */
if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
return;
}
if (vars->rx_tx_asic_rst) {
serdes_net_if = (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_NET_SERDES_IF_MASK);
switch (serdes_net_if) {
case PORT_HW_CFG_NET_SERDES_IF_KR:
/* Do we get link yet? */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1,
&gp_status1);
lnkup = (gp_status1 >> (8+lane)) & 0x1;/* 1G */
/*10G KR*/
lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
DP(NETIF_MSG_LINK,
"gp_status1 0x%x\n", gp_status1);
if (lnkup_kr || lnkup) {
vars->rx_tx_asic_rst = 0;
DP(NETIF_MSG_LINK,
"link up, rx_tx_asic_rst 0x%x\n",
vars->rx_tx_asic_rst);
} else {
/*reset the lane to see if link comes up.*/
bnx2x_warpcore_reset_lane(bp, phy, 1);
bnx2x_warpcore_reset_lane(bp, phy, 0);
/* restart Autoneg */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
vars->rx_tx_asic_rst--;
DP(NETIF_MSG_LINK, "0x%x retry left\n",
vars->rx_tx_asic_rst);
}
break;
default:
break;
}
} /*params->rx_tx_asic_rst*/
}
static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
struct link_params *params, struct link_params *params,
@ -5896,7 +5994,13 @@ int bnx2x_set_led(struct link_params *params,
SHARED_HW_CFG_LED_MAC1); SHARED_HW_CFG_LED_MAC1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); if (params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp & 0xfff1);
else {
EMAC_WR(bp, EMAC_REG_EMAC_LED,
(tmp | EMAC_LED_OVERRIDE));
}
break; break;
case LED_MODE_OPER: case LED_MODE_OPER:
@ -5949,17 +6053,33 @@ int bnx2x_set_led(struct link_params *params,
else else
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
hw_led_mode); hw_led_mode);
} else if ((params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) &&
(mode != LED_MODE_OPER)) {
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED, tmp | 0x3);
} else } else
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode); REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
hw_led_mode);
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */ /* Set blinking rate to ~15.9Hz */
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, if (CHIP_IS_E3(bp))
LED_BLINK_RATE_VAL); REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
LED_BLINK_RATE_VAL_E3);
else
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
LED_BLINK_RATE_VAL_E1X_E2);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
port*4, 1); port*4, 1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); if ((params->phy[EXT_PHY1].type !=
EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE))); PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) &&
(mode != LED_MODE_OPER)) {
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED,
(tmp & (~EMAC_LED_OVERRIDE)));
}
if (CHIP_IS_E1(bp) && if (CHIP_IS_E1(bp) &&
((speed == SPEED_2500) || ((speed == SPEED_2500) ||
@ -6218,8 +6338,10 @@ static int bnx2x_update_link_down(struct link_params *params,
MISC_REGISTERS_RESET_REG_2_CLEAR, MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
} }
if (CHIP_IS_E3(bp)) if (CHIP_IS_E3(bp)) {
bnx2x_xmac_disable(params); bnx2x_xmac_disable(params);
bnx2x_umac_disable(params);
}
return 0; return 0;
} }
@ -10205,22 +10327,6 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
return 0; return 0;
} }
static void bnx2x_54618se_set_link_led(struct bnx2x_phy *phy,
struct link_params *params, u8 mode)
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "54618SE set link led (mode=%x)\n", mode);
switch (mode) {
case LED_MODE_FRONT_PANEL_OFF:
case LED_MODE_OFF:
case LED_MODE_OPER:
case LED_MODE_ON:
default:
break;
}
return;
}
static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy, static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
struct link_params *params) struct link_params *params)
{ {
@ -10997,7 +11103,7 @@ static struct bnx2x_phy phy_54618se = {
.config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback, .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
.format_fw_ver = (format_fw_ver_t)NULL, .format_fw_ver = (format_fw_ver_t)NULL,
.hw_reset = (hw_reset_t)NULL, .hw_reset = (hw_reset_t)NULL,
.set_link_led = (set_link_led_t)bnx2x_54618se_set_link_led, .set_link_led = (set_link_led_t)NULL,
.phy_specific_func = (phy_specific_func_t)NULL .phy_specific_func = (phy_specific_func_t)NULL
}; };
/*****************************************************************/ /*****************************************************************/
@ -11718,8 +11824,10 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
/* Stop BigMac rx */ /* Stop BigMac rx */
if (!CHIP_IS_E3(bp)) if (!CHIP_IS_E3(bp))
bnx2x_bmac_rx_disable(bp, port); bnx2x_bmac_rx_disable(bp, port);
else else {
bnx2x_xmac_disable(params); bnx2x_xmac_disable(params);
bnx2x_umac_disable(params);
}
/* disable emac */ /* disable emac */
if (!CHIP_IS_E3(bp)) if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@ -11757,14 +11865,21 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
if (params->phy[INT_PHY].link_reset) if (params->phy[INT_PHY].link_reset)
params->phy[INT_PHY].link_reset( params->phy[INT_PHY].link_reset(
&params->phy[INT_PHY], params); &params->phy[INT_PHY], params);
/* reset BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
/* disable nig ingress interface */ /* disable nig ingress interface */
if (!CHIP_IS_E3(bp)) { if (!CHIP_IS_E3(bp)) {
/* reset BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0); REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
} else {
u32 xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
bnx2x_set_xumac_nig(params, 0, 0);
if (REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC)
REG_WR(bp, xmac_base + XMAC_REG_CTRL,
XMAC_CTRL_REG_SOFT_RESET);
} }
vars->link_up = 0; vars->link_up = 0;
vars->phy_flags = 0; vars->phy_flags = 0;
@ -12332,11 +12447,6 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
{ {
struct bnx2x *bp = params->bp; struct bnx2x *bp = params->bp;
u16 phy_idx; u16 phy_idx;
if (!params) {
DP(NETIF_MSG_LINK, "Uninitialized params !\n");
return;
}
for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
bnx2x_set_aer_mmd(params, &params->phy[phy_idx]); bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
@ -12345,8 +12455,13 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
} }
} }
if (CHIP_IS_E3(bp)) if (CHIP_IS_E3(bp)) {
struct bnx2x_phy *phy = &params->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
bnx2x_check_over_curr(params, vars); bnx2x_check_over_curr(params, vars);
bnx2x_warpcore_config_runtime(phy, params, vars);
}
} }
u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base) u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)

View file

@ -303,6 +303,9 @@ struct link_vars {
#define PERIODIC_FLAGS_LINK_EVENT 0x0001 #define PERIODIC_FLAGS_LINK_EVENT 0x0001
u32 aeu_int_mask; u32 aeu_int_mask;
u8 rx_tx_asic_rst;
u8 turn_to_run_wc_rt;
u16 rsrv2;
}; };
/***********************************************************/ /***********************************************************/

View file

@ -428,28 +428,33 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
return &wrb->payload.sgl[0]; return &wrb->payload.sgl[0];
} }
/* Don't touch the hdr after it's prepared */
static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
bool embedded, u8 sge_cnt, u32 opcode)
{
if (embedded)
wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
else
wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
MCC_WRB_SGE_CNT_SHIFT;
wrb->payload_length = payload_len;
wrb->tag0 = opcode;
be_dws_cpu_to_le(wrb, 8);
}
/* Don't touch the hdr after it's prepared */ /* Don't touch the hdr after it's prepared */
static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, /* mem will be NULL for embedded commands */
u8 subsystem, u8 opcode, int cmd_len) static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
u8 subsystem, u8 opcode, int cmd_len,
struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
{ {
struct be_sge *sge;
req_hdr->opcode = opcode; req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem; req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
req_hdr->version = 0; req_hdr->version = 0;
wrb->tag0 = opcode;
wrb->tag1 = subsystem;
wrb->payload_length = cmd_len;
if (mem) {
wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
MCC_WRB_SGE_CNT_SHIFT;
sge = nonembedded_sgl(wrb);
sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(mem->size);
} else
wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
be_dws_cpu_to_le(wrb, 8);
} }
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
@ -586,10 +591,8 @@ int be_cmd_eq_create(struct be_adapter *adapter,
wrb = wrb_from_mbox(adapter); wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_EQ_CREATE, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
@ -632,12 +635,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_MAC_QUERY); OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
req->type = type; req->type = type;
if (permanent) { if (permanent) {
req->permanent = 1; req->permanent = 1;
@ -674,11 +673,8 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_PMAC_ADD); OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
req->hdr.domain = domain; req->hdr.domain = domain;
req->if_id = cpu_to_le32(if_id); req->if_id = cpu_to_le32(if_id);
@ -692,6 +688,10 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
err: err:
spin_unlock_bh(&adapter->mcc_lock); spin_unlock_bh(&adapter->mcc_lock);
if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status; return status;
} }
@ -711,11 +711,8 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_PMAC_DEL); OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
req->hdr.domain = dom; req->hdr.domain = dom;
req->if_id = cpu_to_le32(if_id); req->if_id = cpu_to_le32(if_id);
@ -746,11 +743,8 @@ int be_cmd_cq_create(struct be_adapter *adapter,
req = embedded_payload(wrb); req = embedded_payload(wrb);
ctxt = &req->context; ctxt = &req->context;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_CQ_CREATE); OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_CQ_CREATE, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (lancer_chip(adapter)) { if (lancer_chip(adapter)) {
@ -822,11 +816,8 @@ int be_cmd_mccq_ext_create(struct be_adapter *adapter,
req = embedded_payload(wrb); req = embedded_payload(wrb);
ctxt = &req->context; ctxt = &req->context;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MCC_CREATE_EXT); OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (lancer_chip(adapter)) { if (lancer_chip(adapter)) {
@ -882,11 +873,8 @@ int be_cmd_mccq_org_create(struct be_adapter *adapter,
req = embedded_payload(wrb); req = embedded_payload(wrb);
ctxt = &req->context; ctxt = &req->context;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MCC_CREATE); OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MCC_CREATE, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
@ -943,11 +931,8 @@ int be_cmd_txq_create(struct be_adapter *adapter,
req = embedded_payload(wrb); req = embedded_payload(wrb);
ctxt = &req->context; ctxt = &req->context;
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_TX_CREATE); OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
sizeof(*req));
if (lancer_chip(adapter)) { if (lancer_chip(adapter)) {
req->hdr.version = 1; req->hdr.version = 1;
@ -999,11 +984,8 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_RX_CREATE); OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
sizeof(*req));
req->cq_id = cpu_to_le16(cq_id); req->cq_id = cpu_to_le16(cq_id);
req->frag_size = fls(frag_size) - 1; req->frag_size = fls(frag_size) - 1;
@ -1071,9 +1053,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
BUG(); BUG();
} }
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode); be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
NULL);
be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
req->id = cpu_to_le16(q->id); req->id = cpu_to_le16(q->id);
status = be_mbox_notify_wait(adapter); status = be_mbox_notify_wait(adapter);
@ -1100,9 +1081,8 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_RX_DESTROY); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY, OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
sizeof(*req));
req->id = cpu_to_le16(q->id); req->id = cpu_to_le16(q->id);
status = be_mcc_notify_wait(adapter); status = be_mcc_notify_wait(adapter);
@ -1133,12 +1113,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_CREATE); OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
req->hdr.domain = domain; req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags); req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags); req->enable_flags = cpu_to_le32(en_flags);
@ -1182,12 +1158,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_DESTROY); OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
req->hdr.domain = domain; req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id); req->interface_id = cpu_to_le32(interface_id);
@ -1205,7 +1177,6 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
{ {
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
struct be_cmd_req_hdr *hdr; struct be_cmd_req_hdr *hdr;
struct be_sge *sge;
int status = 0; int status = 0;
if (MODULO(adapter->work_counter, be_get_temp_freq) == 0) if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
@ -1219,22 +1190,13 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
goto err; goto err;
} }
hdr = nonemb_cmd->va; hdr = nonemb_cmd->va;
sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1, be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_GET_STATISTICS); OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
if (adapter->generation == BE_GEN3) if (adapter->generation == BE_GEN3)
hdr->version = 1; hdr->version = 1;
wrb->tag1 = CMD_SUBSYSTEM_ETH;
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd->size);
be_mcc_notify(adapter); be_mcc_notify(adapter);
adapter->stats_cmd_sent = true; adapter->stats_cmd_sent = true;
@ -1250,7 +1212,6 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
struct lancer_cmd_req_pport_stats *req; struct lancer_cmd_req_pport_stats *req;
struct be_sge *sge;
int status = 0; int status = 0;
spin_lock_bh(&adapter->mcc_lock); spin_lock_bh(&adapter->mcc_lock);
@ -1261,23 +1222,14 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
goto err; goto err;
} }
req = nonemb_cmd->va; req = nonemb_cmd->va;
sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
OPCODE_ETH_GET_PPORT_STATS);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
nonemb_cmd);
req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num); req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
req->cmd_params.params.reset_stats = 0; req->cmd_params.params.reset_stats = 0;
wrb->tag1 = CMD_SUBSYSTEM_ETH;
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd->size);
be_mcc_notify(adapter); be_mcc_notify(adapter);
adapter->stats_cmd_sent = true; adapter->stats_cmd_sent = true;
@ -1303,11 +1255,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_LINK_STATUS_QUERY); OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
status = be_mcc_notify_wait(adapter); status = be_mcc_notify_wait(adapter);
if (!status) { if (!status) {
@ -1343,11 +1292,9 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES); OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
wrb->tag1 = mccq_index; wrb->tag1 = mccq_index;
@ -1374,11 +1321,8 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MANAGE_FAT); OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
req->fat_operation = cpu_to_le32(QUERY_FAT); req->fat_operation = cpu_to_le32(QUERY_FAT);
status = be_mcc_notify_wait(adapter); status = be_mcc_notify_wait(adapter);
if (!status) { if (!status) {
@ -1397,7 +1341,6 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
struct be_dma_mem get_fat_cmd; struct be_dma_mem get_fat_cmd;
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
struct be_cmd_req_get_fat *req; struct be_cmd_req_get_fat *req;
struct be_sge *sge;
u32 offset = 0, total_size, buf_size, u32 offset = 0, total_size, buf_size,
log_offset = sizeof(u32), payload_len; log_offset = sizeof(u32), payload_len;
int status; int status;
@ -1430,18 +1373,11 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
goto err; goto err;
} }
req = get_fat_cmd.va; req = get_fat_cmd.va;
sge = nonembedded_sgl(wrb);
payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
be_wrb_hdr_prepare(wrb, payload_len, false, 1, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MANAGE_FAT); OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
&get_fat_cmd);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MANAGE_FAT, payload_len);
sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(get_fat_cmd.size);
req->fat_operation = cpu_to_le32(RETRIEVE_FAT); req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
req->read_log_offset = cpu_to_le32(log_offset); req->read_log_offset = cpu_to_le32(log_offset);
@ -1485,11 +1421,9 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
OPCODE_COMMON_GET_FW_VERSION);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
status = be_mcc_notify_wait(adapter); status = be_mcc_notify_wait(adapter);
if (!status) { if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
@ -1520,11 +1454,8 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MODIFY_EQ_DELAY); OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
req->num_eq = cpu_to_le32(1); req->num_eq = cpu_to_le32(1);
req->delay[0].eq_id = cpu_to_le32(eq_id); req->delay[0].eq_id = cpu_to_le32(eq_id);
@ -1555,11 +1486,8 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_VLAN_CONFIG); OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
req->interface_id = if_id; req->interface_id = if_id;
req->promiscuous = promiscuous; req->promiscuous = promiscuous;
@ -1582,7 +1510,6 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
struct be_dma_mem *mem = &adapter->rx_filter; struct be_dma_mem *mem = &adapter->rx_filter;
struct be_cmd_req_rx_filter *req = mem->va; struct be_cmd_req_rx_filter *req = mem->va;
struct be_sge *sge;
int status; int status;
spin_lock_bh(&adapter->mcc_lock); spin_lock_bh(&adapter->mcc_lock);
@ -1592,16 +1519,10 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
status = -EBUSY; status = -EBUSY;
goto err; goto err;
} }
sge = nonembedded_sgl(wrb);
sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(mem->size);
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
OPCODE_COMMON_NTWK_RX_FILTER);
memset(req, 0, sizeof(*req)); memset(req, 0, sizeof(*req));
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req)); OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
wrb, mem);
req->if_id = cpu_to_le32(adapter->if_handle); req->if_id = cpu_to_le32(adapter->if_handle);
if (flags & IFF_PROMISC) { if (flags & IFF_PROMISC) {
@ -1646,11 +1567,8 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SET_FLOW_CONTROL); OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
req->tx_flow_control = cpu_to_le16((u16)tx_fc); req->tx_flow_control = cpu_to_le16((u16)tx_fc);
req->rx_flow_control = cpu_to_le16((u16)rx_fc); req->rx_flow_control = cpu_to_le16((u16)rx_fc);
@ -1678,11 +1596,8 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_FLOW_CONTROL); OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
status = be_mcc_notify_wait(adapter); status = be_mcc_notify_wait(adapter);
if (!status) { if (!status) {
@ -1711,11 +1626,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
wrb = wrb_from_mbox(adapter); wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_QUERY_FIRMWARE_CONFIG); OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
status = be_mbox_notify_wait(adapter); status = be_mbox_notify_wait(adapter);
if (!status) { if (!status) {
@ -1742,11 +1654,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
wrb = wrb_from_mbox(adapter); wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_FUNCTION_RESET); OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
status = be_mbox_notify_wait(adapter); status = be_mbox_notify_wait(adapter);
@ -1768,11 +1677,8 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
wrb = wrb_from_mbox(adapter); wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_RSS_CONFIG); OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_RSS_CONFIG, sizeof(*req));
req->if_id = cpu_to_le32(adapter->if_handle); req->if_id = cpu_to_le32(adapter->if_handle);
req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4); req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
@ -1804,11 +1710,8 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_ENABLE_DISABLE_BEACON); OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
req->port_num = port_num; req->port_num = port_num;
req->beacon_state = state; req->beacon_state = state;
@ -1838,11 +1741,8 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_BEACON_STATE); OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
req->port_num = port_num; req->port_num = port_num;
@ -1879,13 +1779,10 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(struct lancer_cmd_req_write_object), be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
true, 1, OPCODE_COMMON_WRITE_OBJECT);
wrb->tag1 = CMD_SUBSYSTEM_COMMON;
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_WRITE_OBJECT, OPCODE_COMMON_WRITE_OBJECT,
sizeof(struct lancer_cmd_req_write_object)); sizeof(struct lancer_cmd_req_write_object), wrb,
NULL);
ctxt = &req->context; ctxt = &req->context;
AMAP_SET_BITS(struct amap_lancer_write_obj_context, AMAP_SET_BITS(struct amap_lancer_write_obj_context,
@ -1938,7 +1835,6 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
{ {
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
struct be_cmd_write_flashrom *req; struct be_cmd_write_flashrom *req;
struct be_sge *sge;
int status; int status;
spin_lock_bh(&adapter->mcc_lock); spin_lock_bh(&adapter->mcc_lock);
@ -1950,17 +1846,9 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
goto err_unlock; goto err_unlock;
} }
req = cmd->va; req = cmd->va;
sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, cmd->size, false, 1, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_WRITE_FLASHROM); OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
wrb->tag1 = CMD_SUBSYSTEM_COMMON;
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(cmd->size);
req->params.op_type = cpu_to_le32(flash_type); req->params.op_type = cpu_to_le32(flash_type);
req->params.op_code = cpu_to_le32(flash_opcode); req->params.op_code = cpu_to_le32(flash_opcode);
@ -1998,11 +1886,8 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
} }
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_READ_FLASHROM); OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
@ -2023,7 +1908,6 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
{ {
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
struct be_cmd_req_acpi_wol_magic_config *req; struct be_cmd_req_acpi_wol_magic_config *req;
struct be_sge *sge;
int status; int status;
spin_lock_bh(&adapter->mcc_lock); spin_lock_bh(&adapter->mcc_lock);
@ -2034,19 +1918,12 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
goto err; goto err;
} }
req = nonemb_cmd->va; req = nonemb_cmd->va;
sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG); OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
nonemb_cmd);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
memcpy(req->magic_mac, mac, ETH_ALEN); memcpy(req->magic_mac, mac, ETH_ALEN);
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd->size);
status = be_mcc_notify_wait(adapter); status = be_mcc_notify_wait(adapter);
err: err:
@ -2071,12 +1948,9 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_SET_LOOPBACK_MODE); OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
sizeof(*req));
req->src_port = port_num; req->src_port = port_num;
req->dest_port = port_num; req->dest_port = port_num;
@ -2106,11 +1980,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_LOOPBACK_TEST); OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
req->hdr.timeout = cpu_to_le32(4); req->hdr.timeout = cpu_to_le32(4);
req->pattern = cpu_to_le64(pattern); req->pattern = cpu_to_le64(pattern);
@ -2136,7 +2007,6 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
{ {
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
struct be_cmd_req_ddrdma_test *req; struct be_cmd_req_ddrdma_test *req;
struct be_sge *sge;
int status; int status;
int i, j = 0; int i, j = 0;
@ -2148,15 +2018,8 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
goto err; goto err;
} }
req = cmd->va; req = cmd->va;
sge = nonembedded_sgl(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
be_wrb_hdr_prepare(wrb, cmd->size, false, 1, OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
OPCODE_LOWLEVEL_HOST_DDR_DMA);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(cmd->size);
req->pattern = cpu_to_le64(pattern); req->pattern = cpu_to_le64(pattern);
req->byte_count = cpu_to_le32(byte_cnt); req->byte_count = cpu_to_le32(byte_cnt);
@ -2201,15 +2064,9 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
req = nonemb_cmd->va; req = nonemb_cmd->va;
sge = nonembedded_sgl(wrb); sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SEEPROM_READ); OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
nonemb_cmd);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd->size);
status = be_mcc_notify_wait(adapter); status = be_mcc_notify_wait(adapter);
@ -2223,7 +2080,6 @@ int be_cmd_get_phy_info(struct be_adapter *adapter,
{ {
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
struct be_cmd_req_get_phy_info *req; struct be_cmd_req_get_phy_info *req;
struct be_sge *sge;
struct be_dma_mem cmd; struct be_dma_mem cmd;
int status; int status;
@ -2244,18 +2100,10 @@ int be_cmd_get_phy_info(struct be_adapter *adapter,
} }
req = cmd.va; req = cmd.va;
sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_PHY_DETAILS); OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
wrb, &cmd);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_PHY_DETAILS,
sizeof(*req));
sge->pa_hi = cpu_to_le32(upper_32_bits(cmd.dma));
sge->pa_lo = cpu_to_le32(cmd.dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(cmd.size);
status = be_mcc_notify_wait(adapter); status = be_mcc_notify_wait(adapter);
if (!status) { if (!status) {
@ -2288,11 +2136,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SET_QOS); OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SET_QOS, sizeof(*req));
req->hdr.domain = domain; req->hdr.domain = domain;
req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
@ -2310,7 +2155,6 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
struct be_mcc_wrb *wrb; struct be_mcc_wrb *wrb;
struct be_cmd_req_cntl_attribs *req; struct be_cmd_req_cntl_attribs *req;
struct be_cmd_resp_cntl_attribs *resp; struct be_cmd_resp_cntl_attribs *resp;
struct be_sge *sge;
int status; int status;
int payload_len = max(sizeof(*req), sizeof(*resp)); int payload_len = max(sizeof(*req), sizeof(*resp));
struct mgmt_controller_attrib *attribs; struct mgmt_controller_attrib *attribs;
@ -2335,15 +2179,10 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
goto err; goto err;
} }
req = attribs_cmd.va; req = attribs_cmd.va;
sge = nonembedded_sgl(wrb);
be_wrb_hdr_prepare(wrb, payload_len, false, 1, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_GET_CNTL_ATTRIBUTES); OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, &attribs_cmd);
OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(attribs_cmd.size);
status = be_mbox_notify_wait(adapter); status = be_mbox_notify_wait(adapter);
if (!status) { if (!status) {
@ -2376,11 +2215,8 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
req = embedded_payload(wrb); req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP); OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
CAPABILITY_BE3_NATIVE_ERX_API); CAPABILITY_BE3_NATIVE_ERX_API);

View file

@ -229,27 +229,29 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
struct be_adapter *adapter = netdev_priv(netdev); struct be_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p; struct sockaddr *addr = p;
int status = 0; int status = 0;
u8 current_mac[ETH_ALEN];
u32 pmac_id = adapter->pmac_id;
if (!is_valid_ether_addr(addr->sa_data)) if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
/* MAC addr configuration will be done in hardware for VFs status = be_cmd_mac_addr_query(adapter, current_mac,
* by their corresponding PFs. Just copy to netdev addr here MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
*/
if (!be_physfn(adapter))
goto netdev_addr;
status = be_cmd_pmac_del(adapter, adapter->if_handle,
adapter->pmac_id, 0);
if (status) if (status)
return status; goto err;
status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
adapter->if_handle, &adapter->pmac_id, 0); adapter->if_handle, &adapter->pmac_id, 0);
netdev_addr: if (status)
if (!status) goto err;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
}
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
return 0;
err:
dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
return status; return status;
} }

View file

@ -85,7 +85,7 @@ config APRICOT
config BVME6000_NET config BVME6000_NET
tristate "BVME6000 Ethernet support" tristate "BVME6000 Ethernet support"
depends on BVME6000MVME16x depends on BVME6000
---help--- ---help---
This is the driver for the Ethernet interface on BVME4000 and This is the driver for the Ethernet interface on BVME4000 and
BVME6000 VME boards. Say Y here to include the driver for this chip BVME6000 VME boards. Say Y here to include the driver for this chip

View file

@ -36,8 +36,8 @@
#define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0 #define _QLCNIC_LINUX_MINOR 0
#define _QLCNIC_LINUX_SUBVERSION 24 #define _QLCNIC_LINUX_SUBVERSION 25
#define QLCNIC_LINUX_VERSIONID "5.0.24" #define QLCNIC_LINUX_VERSIONID "5.0.25"
#define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))

View file

@ -935,31 +935,49 @@ static int qlcnic_set_led(struct net_device *dev,
{ {
struct qlcnic_adapter *adapter = netdev_priv(dev); struct qlcnic_adapter *adapter = netdev_priv(dev);
int max_sds_rings = adapter->max_sds_rings; int max_sds_rings = adapter->max_sds_rings;
int err = -EIO, active = 1;
if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(dev, "LED test not supported for non "
"privilege function\n");
return -EOPNOTSUPP;
}
switch (state) { switch (state) {
case ETHTOOL_ID_ACTIVE: case ETHTOOL_ID_ACTIVE:
if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
return -EBUSY; return -EBUSY;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { if (test_bit(__QLCNIC_RESETTING, &adapter->state))
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) break;
return -EIO;
if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) { if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
clear_bit(__QLCNIC_RESETTING, &adapter->state); if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST))
return -EIO; break;
}
set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state); set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
} }
if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) {
return 0; err = 0;
break;
}
dev_err(&adapter->pdev->dev, dev_err(&adapter->pdev->dev,
"Failed to set LED blink state.\n"); "Failed to set LED blink state.\n");
break; break;
case ETHTOOL_ID_INACTIVE: case ETHTOOL_ID_INACTIVE:
active = 0;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
break;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST))
break;
set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
}
if (adapter->nic_ops->config_led(adapter, 0, 0xf)) if (adapter->nic_ops->config_led(adapter, 0, 0xf))
dev_err(&adapter->pdev->dev, dev_err(&adapter->pdev->dev,
"Failed to reset LED blink state.\n"); "Failed to reset LED blink state.\n");
@ -970,14 +988,13 @@ static int qlcnic_set_led(struct net_device *dev,
return -EINVAL; return -EINVAL;
} }
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) { if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
qlcnic_diag_free_res(dev, max_sds_rings); qlcnic_diag_free_res(dev, max_sds_rings);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
}
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); if (!active || err)
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
return -EIO; return err;
} }
static void static void

View file

@ -407,7 +407,9 @@ enum {
#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE) #define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
#define QLCNIC_CRB_ROMUSB \ #define QLCNIC_CRB_ROMUSB \
QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB) QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
#define QLCNIC_CRB_EPG QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_EG)
#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q) #define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
#define QLCNIC_CRB_TIMER QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_TIMR)
#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0) #define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB) #define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64) #define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)

View file

@ -566,7 +566,7 @@ int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return -EIO; return -EIO;
if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) { if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
qlcnic_set_fw_loopback(adapter, mode); qlcnic_set_fw_loopback(adapter, 0);
return -EIO; return -EIO;
} }

View file

@ -422,9 +422,53 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, CRB_CMDPEG_STATE, 0); QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
QLCWR32(adapter, CRB_RCVPEG_STATE, 0); QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
qlcnic_rom_lock(adapter); /* Halt all the indiviual PEGs and other blocks */
QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff); /* disable all I2Q */
QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x10, 0x0);
QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x14, 0x0);
QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x18, 0x0);
QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x1c, 0x0);
QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x20, 0x0);
QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x24, 0x0);
/* disable all niu interrupts */
QLCWR32(adapter, QLCNIC_CRB_NIU + 0x40, 0xff);
/* disable xge rx/tx */
QLCWR32(adapter, QLCNIC_CRB_NIU + 0x70000, 0x00);
/* disable xg1 rx/tx */
QLCWR32(adapter, QLCNIC_CRB_NIU + 0x80000, 0x00);
/* disable sideband mac */
QLCWR32(adapter, QLCNIC_CRB_NIU + 0x90000, 0x00);
/* disable ap0 mac */
QLCWR32(adapter, QLCNIC_CRB_NIU + 0xa0000, 0x00);
/* disable ap1 mac */
QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00);
/* halt sre */
val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000);
QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1)));
/* halt epg */
QLCWR32(adapter, QLCNIC_CRB_EPG + 0x1300, 0x1);
/* halt timers */
QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x0, 0x0);
QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x8, 0x0);
QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x10, 0x0);
QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x18, 0x0);
QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x100, 0x0);
QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x200, 0x0);
/* halt pegs */
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, 1);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, 1);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, 1);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, 1);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
msleep(20);
qlcnic_rom_unlock(adapter); qlcnic_rom_unlock(adapter);
/* big hammer don't reset CAM block on reset */
QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
/* Init HW CRB block */ /* Init HW CRB block */
if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) || if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
@ -522,8 +566,10 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0); QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
msleep(1); msleep(1);
QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
return 0; return 0;
} }

View file

@ -2840,8 +2840,15 @@ qlcnic_fwinit_work(struct work_struct *work)
goto wait_npar; goto wait_npar;
} }
if (dev_state == QLCNIC_DEV_INITIALIZING ||
dev_state == QLCNIC_DEV_READY) {
dev_info(&adapter->pdev->dev, "Detected state change from "
"DEV_NEED_RESET, skipping ack check\n");
goto skip_ack_check;
}
if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) { if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n", dev_info(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
adapter->reset_ack_timeo); adapter->reset_ack_timeo);
goto skip_ack_check; goto skip_ack_check;
} }
@ -3497,11 +3504,16 @@ qlcnic_store_beacon(struct device *dev,
{ {
struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int max_sds_rings = adapter->max_sds_rings; int max_sds_rings = adapter->max_sds_rings;
int dev_down = 0;
u16 beacon; u16 beacon;
u8 b_state, b_rate; u8 b_state, b_rate;
int err; int err;
if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
dev_warn(dev, "LED test not supported for non "
"privilege function\n");
return -EOPNOTSUPP;
}
if (len != sizeof(u16)) if (len != sizeof(u16))
return QL_STATUS_INVALID_PARAM; return QL_STATUS_INVALID_PARAM;
@ -3513,36 +3525,40 @@ qlcnic_store_beacon(struct device *dev,
if (adapter->ahw->beacon_state == b_state) if (adapter->ahw->beacon_state == b_state)
return len; return len;
rtnl_lock();
if (!adapter->ahw->beacon_state) if (!adapter->ahw->beacon_state)
if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
rtnl_unlock();
return -EBUSY; return -EBUSY;
}
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
err = -EIO;
goto out;
}
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EIO;
err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST); err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
if (err) { if (err)
clear_bit(__QLCNIC_RESETTING, &adapter->state); goto out;
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
return err;
}
dev_down = 1;
} }
err = qlcnic_config_led(adapter, b_state, b_rate); err = qlcnic_config_led(adapter, b_state, b_rate);
if (!err) { if (!err) {
adapter->ahw->beacon_state = b_state;
err = len; err = len;
adapter->ahw->beacon_state = b_state;
} }
if (dev_down) { if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
qlcnic_diag_free_res(adapter->netdev, max_sds_rings); qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
}
if (!b_state) out:
if (!adapter->ahw->beacon_state)
clear_bit(__QLCNIC_LED_ENABLE, &adapter->state); clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
rtnl_unlock();
return err; return err;
} }

View file

@ -49,7 +49,7 @@ struct stmmac_extra_stats {
unsigned long tx_underflow ____cacheline_aligned; unsigned long tx_underflow ____cacheline_aligned;
unsigned long tx_carrier; unsigned long tx_carrier;
unsigned long tx_losscarrier; unsigned long tx_losscarrier;
unsigned long tx_heartbeat; unsigned long vlan_tag;
unsigned long tx_deferred; unsigned long tx_deferred;
unsigned long tx_vlan; unsigned long tx_vlan;
unsigned long tx_jabber; unsigned long tx_jabber;
@ -58,9 +58,9 @@ struct stmmac_extra_stats {
unsigned long tx_ip_header_error; unsigned long tx_ip_header_error;
/* Receive errors */ /* Receive errors */
unsigned long rx_desc; unsigned long rx_desc;
unsigned long rx_partial; unsigned long sa_filter_fail;
unsigned long rx_runt; unsigned long overflow_error;
unsigned long rx_toolong; unsigned long ipc_csum_error;
unsigned long rx_collision; unsigned long rx_collision;
unsigned long rx_crc; unsigned long rx_crc;
unsigned long rx_length; unsigned long rx_length;

View file

@ -25,33 +25,34 @@ struct dma_desc {
union { union {
struct { struct {
/* RDES0 */ /* RDES0 */
u32 reserved1:1; u32 payload_csum_error:1;
u32 crc_error:1; u32 crc_error:1;
u32 dribbling:1; u32 dribbling:1;
u32 mii_error:1; u32 mii_error:1;
u32 receive_watchdog:1; u32 receive_watchdog:1;
u32 frame_type:1; u32 frame_type:1;
u32 collision:1; u32 collision:1;
u32 frame_too_long:1; u32 ipc_csum_error:1;
u32 last_descriptor:1; u32 last_descriptor:1;
u32 first_descriptor:1; u32 first_descriptor:1;
u32 multicast_frame:1; u32 vlan_tag:1;
u32 run_frame:1; u32 overflow_error:1;
u32 length_error:1; u32 length_error:1;
u32 partial_frame_error:1; u32 sa_filter_fail:1;
u32 descriptor_error:1; u32 descriptor_error:1;
u32 error_summary:1; u32 error_summary:1;
u32 frame_length:14; u32 frame_length:14;
u32 filtering_fail:1; u32 da_filter_fail:1;
u32 own:1; u32 own:1;
/* RDES1 */ /* RDES1 */
u32 buffer1_size:11; u32 buffer1_size:11;
u32 buffer2_size:11; u32 buffer2_size:11;
u32 reserved2:2; u32 reserved1:2;
u32 second_address_chained:1; u32 second_address_chained:1;
u32 end_ring:1; u32 end_ring:1;
u32 reserved3:5; u32 reserved2:5;
u32 disable_ic:1; u32 disable_ic:1;
} rx; } rx;
struct { struct {
/* RDES0 */ /* RDES0 */
@ -91,24 +92,28 @@ struct dma_desc {
u32 underflow_error:1; u32 underflow_error:1;
u32 excessive_deferral:1; u32 excessive_deferral:1;
u32 collision_count:4; u32 collision_count:4;
u32 heartbeat_fail:1; u32 vlan_frame:1;
u32 excessive_collisions:1; u32 excessive_collisions:1;
u32 late_collision:1; u32 late_collision:1;
u32 no_carrier:1; u32 no_carrier:1;
u32 loss_carrier:1; u32 loss_carrier:1;
u32 reserved1:3; u32 payload_error:1;
u32 frame_flushed:1;
u32 jabber_timeout:1;
u32 error_summary:1; u32 error_summary:1;
u32 reserved2:15; u32 ip_header_error:1;
u32 time_stamp_status:1;
u32 reserved1:13;
u32 own:1; u32 own:1;
/* TDES1 */ /* TDES1 */
u32 buffer1_size:11; u32 buffer1_size:11;
u32 buffer2_size:11; u32 buffer2_size:11;
u32 reserved3:1; u32 time_stamp_enable:1;
u32 disable_padding:1; u32 disable_padding:1;
u32 second_address_chained:1; u32 second_address_chained:1;
u32 end_ring:1; u32 end_ring:1;
u32 crc_disable:1; u32 crc_disable:1;
u32 reserved4:2; u32 checksum_insertion:2;
u32 first_segment:1; u32 first_segment:1;
u32 last_segment:1; u32 last_segment:1;
u32 interrupt:1; u32 interrupt:1;

View file

@ -50,11 +50,12 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
stats->collisions += p->des01.tx.collision_count; stats->collisions += p->des01.tx.collision_count;
ret = -1; ret = -1;
} }
if (unlikely(p->des01.tx.heartbeat_fail)) {
x->tx_heartbeat++; if (p->des01.etx.vlan_frame) {
stats->tx_heartbeat_errors++; CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
ret = -1; x->tx_vlan++;
} }
if (unlikely(p->des01.tx.deferred)) if (unlikely(p->des01.tx.deferred))
x->tx_deferred++; x->tx_deferred++;
@ -68,12 +69,12 @@ static int ndesc_get_tx_len(struct dma_desc *p)
/* This function verifies if each incoming frame has some errors /* This function verifies if each incoming frame has some errors
* and, if required, updates the multicast statistics. * and, if required, updates the multicast statistics.
* In case of success, it returns csum_none because the device * In case of success, it returns good_frame because the GMAC device
* is not able to compute the csum in HW. */ * is supposed to be able to compute the csum in HW. */
static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p) struct dma_desc *p)
{ {
int ret = csum_none; int ret = good_frame;
struct net_device_stats *stats = (struct net_device_stats *)data; struct net_device_stats *stats = (struct net_device_stats *)data;
if (unlikely(p->des01.rx.last_descriptor == 0)) { if (unlikely(p->des01.rx.last_descriptor == 0)) {
@ -86,12 +87,12 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.rx.error_summary)) { if (unlikely(p->des01.rx.error_summary)) {
if (unlikely(p->des01.rx.descriptor_error)) if (unlikely(p->des01.rx.descriptor_error))
x->rx_desc++; x->rx_desc++;
if (unlikely(p->des01.rx.partial_frame_error)) if (unlikely(p->des01.rx.sa_filter_fail))
x->rx_partial++; x->sa_filter_fail++;
if (unlikely(p->des01.rx.run_frame)) if (unlikely(p->des01.rx.overflow_error))
x->rx_runt++; x->overflow_error++;
if (unlikely(p->des01.rx.frame_too_long)) if (unlikely(p->des01.rx.ipc_csum_error))
x->rx_toolong++; x->ipc_csum_error++;
if (unlikely(p->des01.rx.collision)) { if (unlikely(p->des01.rx.collision)) {
x->rx_collision++; x->rx_collision++;
stats->collisions++; stats->collisions++;
@ -113,10 +114,10 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
x->rx_mii++; x->rx_mii++;
ret = discard_frame; ret = discard_frame;
} }
if (p->des01.rx.multicast_frame) { #ifdef STMMAC_VLAN_TAG_USED
x->rx_multicast++; if (p->des01.rx.vlan_tag)
stats->multicast++; x->vlan_tag++;
} #endif
return ret; return ret;
} }
@ -184,6 +185,9 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
{ {
p->des01.tx.first_segment = is_fs; p->des01.tx.first_segment = is_fs;
norm_set_tx_desc_len(p, len); norm_set_tx_desc_len(p, len);
if (likely(csum_flag))
p->des01.tx.checksum_insertion = cic_full;
} }
static void ndesc_clear_tx_ic(struct dma_desc *p) static void ndesc_clear_tx_ic(struct dma_desc *p)

View file

@ -50,7 +50,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(tx_underflow), STMMAC_STAT(tx_underflow),
STMMAC_STAT(tx_carrier), STMMAC_STAT(tx_carrier),
STMMAC_STAT(tx_losscarrier), STMMAC_STAT(tx_losscarrier),
STMMAC_STAT(tx_heartbeat), STMMAC_STAT(vlan_tag),
STMMAC_STAT(tx_deferred), STMMAC_STAT(tx_deferred),
STMMAC_STAT(tx_vlan), STMMAC_STAT(tx_vlan),
STMMAC_STAT(rx_vlan), STMMAC_STAT(rx_vlan),
@ -59,9 +59,9 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(tx_payload_error), STMMAC_STAT(tx_payload_error),
STMMAC_STAT(tx_ip_header_error), STMMAC_STAT(tx_ip_header_error),
STMMAC_STAT(rx_desc), STMMAC_STAT(rx_desc),
STMMAC_STAT(rx_partial), STMMAC_STAT(sa_filter_fail),
STMMAC_STAT(rx_runt), STMMAC_STAT(overflow_error),
STMMAC_STAT(rx_toolong), STMMAC_STAT(ipc_csum_error),
STMMAC_STAT(rx_collision), STMMAC_STAT(rx_collision),
STMMAC_STAT(rx_crc), STMMAC_STAT(rx_crc),
STMMAC_STAT(rx_length), STMMAC_STAT(rx_length),

View file

@ -325,7 +325,7 @@ static int stmmac_init_phy(struct net_device *dev)
(interface == PHY_INTERFACE_MODE_RMII))) { (interface == PHY_INTERFACE_MODE_RMII))) {
phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause); SUPPORTED_Asym_Pause);
priv->phydev->advertising = priv->phydev->supported; phydev->advertising = phydev->supported;
} }
/* /*
@ -812,9 +812,11 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
*/ */
static int stmmac_get_hw_features(struct stmmac_priv *priv) static int stmmac_get_hw_features(struct stmmac_priv *priv)
{ {
u32 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr); u32 hw_cap = 0;
if (priv->hw->dma->get_hw_feature) {
hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
if (likely(hw_cap)) {
priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
@ -937,6 +939,7 @@ static int stmmac_open(struct net_device *dev)
stmmac_get_hw_features(priv); stmmac_get_hw_features(priv);
priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
if (priv->rx_coe) if (priv->rx_coe)
pr_info("stmmac: Rx Checksum Offload Engine supported\n"); pr_info("stmmac: Rx Checksum Offload Engine supported\n");
if (priv->plat->tx_coe) if (priv->plat->tx_coe)
@ -1274,8 +1277,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
#endif #endif
skb->protocol = eth_type_trans(skb, priv->dev); skb->protocol = eth_type_trans(skb, priv->dev);
if (unlikely(status == csum_none)) { if (unlikely(!priv->rx_coe)) {
/* always for the old mac 10/100 */ /* No RX COE for old mac10/100 devices */
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
netif_receive_skb(skb); netif_receive_skb(skb);
} else { } else {

View file

@ -106,7 +106,7 @@ extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev); extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern bool vlan_do_receive(struct sk_buff **skb); extern bool vlan_do_receive(struct sk_buff **skb, bool last_handler);
extern struct sk_buff *vlan_untag(struct sk_buff *skb); extern struct sk_buff *vlan_untag(struct sk_buff *skb);
#else #else
@ -128,9 +128,9 @@ static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
return 0; return 0;
} }
static inline bool vlan_do_receive(struct sk_buff **skb) static inline bool vlan_do_receive(struct sk_buff **skb, bool last_handler)
{ {
if ((*skb)->vlan_tci & VLAN_VID_MASK) if (((*skb)->vlan_tci & VLAN_VID_MASK) && last_handler)
(*skb)->pkt_type = PACKET_OTHERHOST; (*skb)->pkt_type = PACKET_OTHERHOST;
return false; return false;
} }

View file

@ -134,6 +134,7 @@ struct inet_timewait_sock {
struct inet_bind_bucket *tw_tb; struct inet_bind_bucket *tw_tb;
struct hlist_node tw_death_node; struct hlist_node tw_death_node;
}; };
#define tw_tclass tw_tos
static inline void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw, static inline void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
struct hlist_nulls_head *list) struct hlist_nulls_head *list)

View file

@ -486,7 +486,8 @@ extern int ip6_rcv_finish(struct sk_buff *skb);
extern int ip6_xmit(struct sock *sk, extern int ip6_xmit(struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
struct flowi6 *fl6, struct flowi6 *fl6,
struct ipv6_txoptions *opt); struct ipv6_txoptions *opt,
int tclass);
extern int ip6_nd_hdr(struct sock *sk, extern int ip6_nd_hdr(struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,

View file

@ -4,7 +4,7 @@
#include <linux/netpoll.h> #include <linux/netpoll.h>
#include "vlan.h" #include "vlan.h"
bool vlan_do_receive(struct sk_buff **skbp) bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
{ {
struct sk_buff *skb = *skbp; struct sk_buff *skb = *skbp;
u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
@ -13,7 +13,10 @@ bool vlan_do_receive(struct sk_buff **skbp)
vlan_dev = vlan_find_dev(skb->dev, vlan_id); vlan_dev = vlan_find_dev(skb->dev, vlan_id);
if (!vlan_dev) { if (!vlan_dev) {
if (vlan_id) /* Only the last call to vlan_do_receive() should change
* pkt_type to PACKET_OTHERHOST
*/
if (vlan_id && last_handler)
skb->pkt_type = PACKET_OTHERHOST; skb->pkt_type = PACKET_OTHERHOST;
return false; return false;
} }

View file

@ -137,10 +137,22 @@ static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
kfree_rcu(tt_local_entry, rcu); kfree_rcu(tt_local_entry, rcu);
} }
static void tt_global_entry_free_rcu(struct rcu_head *rcu)
{
struct tt_global_entry *tt_global_entry;
tt_global_entry = container_of(rcu, struct tt_global_entry, rcu);
if (tt_global_entry->orig_node)
orig_node_free_ref(tt_global_entry->orig_node);
kfree(tt_global_entry);
}
static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
{ {
if (atomic_dec_and_test(&tt_global_entry->refcount)) if (atomic_dec_and_test(&tt_global_entry->refcount))
kfree_rcu(tt_global_entry, rcu); call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu);
} }
static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@ -710,6 +722,9 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
struct hlist_head *head; struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */ spinlock_t *list_lock; /* protects write access to the hash lists */
if (!hash)
return;
for (i = 0; i < hash->size; i++) { for (i = 0; i < hash->size; i++) {
head = &hash->table[i]; head = &hash->table[i];
list_lock = &hash->list_locks[i]; list_lock = &hash->list_locks[i];

View file

@ -224,22 +224,22 @@ struct socket_packet {
struct tt_local_entry { struct tt_local_entry {
uint8_t addr[ETH_ALEN]; uint8_t addr[ETH_ALEN];
struct hlist_node hash_entry;
unsigned long last_seen; unsigned long last_seen;
uint16_t flags; uint16_t flags;
atomic_t refcount; atomic_t refcount;
struct rcu_head rcu; struct rcu_head rcu;
struct hlist_node hash_entry;
}; };
struct tt_global_entry { struct tt_global_entry {
uint8_t addr[ETH_ALEN]; uint8_t addr[ETH_ALEN];
struct hlist_node hash_entry; /* entry in the global table */
struct orig_node *orig_node; struct orig_node *orig_node;
uint8_t ttvn; uint8_t ttvn;
uint16_t flags; /* only TT_GLOBAL_ROAM is used */ uint16_t flags; /* only TT_GLOBAL_ROAM is used */
unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
atomic_t refcount; atomic_t refcount;
struct rcu_head rcu; struct rcu_head rcu;
struct hlist_node hash_entry; /* entry in the global table */
}; };
struct tt_change_node { struct tt_change_node {

View file

@ -3283,18 +3283,18 @@ static int __netif_receive_skb(struct sk_buff *skb)
ncls: ncls:
#endif #endif
rx_handler = rcu_dereference(skb->dev->rx_handler);
if (vlan_tx_tag_present(skb)) { if (vlan_tx_tag_present(skb)) {
if (pt_prev) { if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev); ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = NULL; pt_prev = NULL;
} }
if (vlan_do_receive(&skb)) if (vlan_do_receive(&skb, !rx_handler))
goto another_round; goto another_round;
else if (unlikely(!skb)) else if (unlikely(!skb))
goto out; goto out;
} }
rx_handler = rcu_dereference(skb->dev->rx_handler);
if (rx_handler) { if (rx_handler) {
if (pt_prev) { if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev); ret = deliver_skb(skb, pt_prev, orig_dev);

View file

@ -271,7 +271,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
&ireq6->loc_addr, &ireq6->loc_addr,
&ireq6->rmt_addr); &ireq6->rmt_addr);
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
err = ip6_xmit(sk, skb, &fl6, opt); err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
err = net_xmit_eval(err); err = net_xmit_eval(err);
} }
@ -326,7 +326,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
if (!IS_ERR(dst)) { if (!IS_ERR(dst)) {
skb_dst_set(skb, dst); skb_dst_set(skb, dst);
ip6_xmit(ctl_sk, skb, &fl6, NULL); ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
return; return;

View file

@ -345,6 +345,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tw6 = inet6_twsk((struct sock *)tw); tw6 = inet6_twsk((struct sock *)tw);
ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr); ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr); ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
tw->tw_tclass = np->tclass;
tw->tw_ipv6only = np->ipv6only; tw->tw_ipv6only = np->ipv6only;
} }
#endif #endif

View file

@ -1713,6 +1713,40 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
ip6_route_add(&cfg); ip6_route_add(&cfg);
} }
static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
int plen,
const struct net_device *dev,
u32 flags, u32 noflags)
{
struct fib6_node *fn;
struct rt6_info *rt = NULL;
struct fib6_table *table;
table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
if (table == NULL)
return NULL;
write_lock_bh(&table->tb6_lock);
fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
if (!fn)
goto out;
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
if (rt->rt6i_dev->ifindex != dev->ifindex)
continue;
if ((rt->rt6i_flags & flags) != flags)
continue;
if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
continue;
dst_hold(&rt->dst);
break;
}
out:
write_unlock_bh(&table->tb6_lock);
return rt;
}
/* Create "default" multicast route to the interface */ /* Create "default" multicast route to the interface */
static void addrconf_add_mroute(struct net_device *dev) static void addrconf_add_mroute(struct net_device *dev)
@ -1842,10 +1876,13 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
if (addrconf_finite_timeout(rt_expires)) if (addrconf_finite_timeout(rt_expires))
rt_expires *= HZ; rt_expires *= HZ;
rt = rt6_lookup(net, &pinfo->prefix, NULL, rt = addrconf_get_prefix_route(&pinfo->prefix,
dev->ifindex, 1); pinfo->prefix_len,
dev,
RTF_ADDRCONF | RTF_PREFIX_RT,
RTF_GATEWAY | RTF_DEFAULT);
if (rt && addrconf_is_prefix_route(rt)) { if (rt) {
/* Autoconf prefix route */ /* Autoconf prefix route */
if (valid_lft == 0) { if (valid_lft == 0) {
ip6_del_rt(rt); ip6_del_rt(rt);

View file

@ -248,7 +248,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
/* Restore final destination back after routing done */ /* Restore final destination back after routing done */
ipv6_addr_copy(&fl6.daddr, &np->daddr); ipv6_addr_copy(&fl6.daddr, &np->daddr);
res = ip6_xmit(sk, skb, &fl6, np->opt); res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
rcu_read_unlock(); rcu_read_unlock();
return res; return res;
} }

View file

@ -180,7 +180,7 @@ int ip6_output(struct sk_buff *skb)
*/ */
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
struct ipv6_txoptions *opt) struct ipv6_txoptions *opt, int tclass)
{ {
struct net *net = sock_net(sk); struct net *net = sock_net(sk);
struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk);
@ -190,7 +190,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
u8 proto = fl6->flowi6_proto; u8 proto = fl6->flowi6_proto;
int seg_len = skb->len; int seg_len = skb->len;
int hlimit = -1; int hlimit = -1;
int tclass = 0;
u32 mtu; u32 mtu;
if (opt) { if (opt) {
@ -228,10 +227,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
/* /*
* Fill in the IPv6 header * Fill in the IPv6 header
*/ */
if (np) { if (np)
tclass = np->tclass;
hlimit = np->hop_limit; hlimit = np->hop_limit;
}
if (hlimit < 0) if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst); hlimit = ip6_dst_hoplimit(dst);
@ -1126,7 +1123,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
hh_len + fragheaderlen + transhdrlen + 20, hh_len + fragheaderlen + transhdrlen + 20,
(flags & MSG_DONTWAIT), &err); (flags & MSG_DONTWAIT), &err);
if (skb == NULL) if (skb == NULL)
return -ENOMEM; return err;
/* reserve space for Hardware header */ /* reserve space for Hardware header */
skb_reserve(skb, hh_len); skb_reserve(skb, hh_len);

View file

@ -1086,11 +1086,10 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
rt->dst.output = ip6_output; rt->dst.output = ip6_output;
dst_set_neighbour(&rt->dst, neigh); dst_set_neighbour(&rt->dst, neigh);
atomic_set(&rt->dst.__refcnt, 1); atomic_set(&rt->dst.__refcnt, 1);
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
ipv6_addr_copy(&rt->rt6i_dst.addr, addr); ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
rt->rt6i_dst.plen = 128; rt->rt6i_dst.plen = 128;
rt->rt6i_idev = idev; rt->rt6i_idev = idev;
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
spin_lock_bh(&icmp6_dst_lock); spin_lock_bh(&icmp6_dst_lock);
rt->dst.next = icmp6_dst_gc_list; rt->dst.next = icmp6_dst_gc_list;

View file

@ -513,7 +513,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr); ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
err = ip6_xmit(sk, skb, &fl6, opt); err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
err = net_xmit_eval(err); err = net_xmit_eval(err);
} }
@ -979,7 +979,7 @@ static int tcp6_gro_complete(struct sk_buff *skb)
} }
static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
u32 ts, struct tcp_md5sig_key *key, int rst) u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
{ {
const struct tcphdr *th = tcp_hdr(skb); const struct tcphdr *th = tcp_hdr(skb);
struct tcphdr *t1; struct tcphdr *t1;
@ -1060,7 +1060,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
if (!IS_ERR(dst)) { if (!IS_ERR(dst)) {
skb_dst_set(buff, dst); skb_dst_set(buff, dst);
ip6_xmit(ctl_sk, buff, &fl6, NULL); ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
if (rst) if (rst)
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
@ -1093,13 +1093,13 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
(th->doff << 2); (th->doff << 2);
tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1); tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
} }
static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
struct tcp_md5sig_key *key) struct tcp_md5sig_key *key, u8 tclass)
{ {
tcp_v6_send_response(skb, seq, ack, win, ts, key, 0); tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
} }
static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
@ -1109,7 +1109,8 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw)); tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
tw->tw_tclass);
inet_twsk_put(tw); inet_twsk_put(tw);
} }
@ -1118,7 +1119,7 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
struct request_sock *req) struct request_sock *req)
{ {
tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr)); tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
} }

View file

@ -243,7 +243,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
if (!(transport->param_flags & SPP_PMTUD_ENABLE)) if (!(transport->param_flags & SPP_PMTUD_ENABLE))
skb->local_df = 1; skb->local_df = 1;
return ip6_xmit(sk, skb, &fl6, np->opt); return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
} }
/* Returns the dst cache entry for the given source and destination ip /* Returns the dst cache entry for the given source and destination ip