Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1436 commits)
  cassini: Use local-mac-address prom property for Cassini MAC address
  net: remove the duplicate #ifdef __KERNEL__
  net: bridge: check the length of skb after nf_bridge_maybe_copy_header()
  netconsole: clarify stopping message
  netconsole: don't announce stopping if nothing happened
  cnic: Fix the type field in SPQ messages
  netfilter: fix export secctx error handling
  netfilter: fix the race when initializing nf_ct_expect_hash_rnd
  ipv4: IP defragmentation must be ECN aware
  net: r6040: Return proper error for r6040_init_one
  dcb: use after free in dcb_flushapp()
  dcb: unlock on error in dcbnl_ieee_get()
  net: ixp4xx_eth: Return proper error for eth_init_one
  include/linux/if_ether.h: Add #define ETH_P_LINK_CTL for HPNA and wlan local tunnel
  net: add POLLPRI to sock_def_readable()
  af_unix: Avoid socket->sk NULL OOPS in stream connect security hooks.
  net_sched: pfifo_head_drop problem
  mac80211: remove stray extern
  mac80211: implement off-channel TX using hw r-o-c offload
  mac80211: implement hardware offload for remain-on-channel
  ...
This commit is contained in:
Linus Torvalds 2011-01-06 12:30:19 -08:00
commit abb359450f
1108 changed files with 147656 additions and 78421 deletions

View file

@ -0,0 +1,14 @@
What: /sys/class/net/<iface>/batman-adv/mesh_iface
Date: May 2010
Contact: Marek Lindner <lindner_marek@yahoo.de>
Description:
The /sys/class/net/<iface>/batman-adv/mesh_iface file
displays the batman mesh interface this <iface>
currently is associated with.
What: /sys/class/net/<iface>/batman-adv/iface_status
Date: May 2010
Contact: Marek Lindner <lindner_marek@yahoo.de>
Description:
Indicates the status of <iface> as it is seen by batman.

View file

@ -0,0 +1,69 @@
What: /sys/class/net/<mesh_iface>/mesh/aggregated_ogms
Date: May 2010
Contact: Marek Lindner <lindner_marek@yahoo.de>
Description:
Indicates whether the batman protocol messages of the
mesh <mesh_iface> shall be aggregated or not.
What: /sys/class/net/<mesh_iface>/mesh/bonding
Date: June 2010
Contact: Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
Description:
Indicates whether the data traffic going through the
mesh will be sent using multiple interfaces at the
same time (if available).
What: /sys/class/net/<mesh_iface>/mesh/fragmentation
Date: October 2010
Contact: Andreas Langer <an.langer@gmx.de>
Description:
Indicates whether the data traffic going through the
mesh will be fragmented or silently discarded if the
packet size exceeds the outgoing interface MTU.
What: /sys/class/net/<mesh_iface>/mesh/gw_bandwidth
Date: October 2010
Contact: Marek Lindner <lindner_marek@yahoo.de>
Description:
Defines the bandwidth which is propagated by this
node if gw_mode was set to 'server'.
What: /sys/class/net/<mesh_iface>/mesh/gw_mode
Date: October 2010
Contact: Marek Lindner <lindner_marek@yahoo.de>
Description:
Defines the state of the gateway features. Can be
either 'off', 'client' or 'server'.
What: /sys/class/net/<mesh_iface>/mesh/gw_sel_class
Date: October 2010
Contact: Marek Lindner <lindner_marek@yahoo.de>
Description:
Defines the selection criteria this node will use
to choose a gateway if gw_mode was set to 'client'.
What: /sys/class/net/<mesh_iface>/mesh/orig_interval
Date: May 2010
Contact: Marek Lindner <lindner_marek@yahoo.de>
Description:
Defines the interval in milliseconds in which batman
sends its protocol messages.
What: /sys/class/net/<mesh_iface>/mesh/hop_penalty
Date: Oct 2010
Contact: Linus Lüssing <linus.luessing@web.de>
Description:
Defines the penalty which will be applied to an
originator message's tq-field on every hop.
What: /sys/class/net/<mesh_iface>/mesh/vis_mode
Date: May 2010
Contact: Marek Lindner <lindner_marek@yahoo.de>
Description:
Each batman node only maintains information about its
own local neighborhood, therefore generating graphs
showing the topology of the entire mesh is not easily
feasible without having a central instance to collect
the local topologies from all nodes. This file allows
to activate the collecting (server) mode.

View file

@ -146,6 +146,7 @@
!Finclude/net/cfg80211.h cfg80211_rx_mgmt
!Finclude/net/cfg80211.h cfg80211_mgmt_tx_status
!Finclude/net/cfg80211.h cfg80211_cqm_rssi_notify
!Finclude/net/cfg80211.h cfg80211_cqm_pktloss_notify
!Finclude/net/cfg80211.h cfg80211_michael_mic_failure
</chapter>
<chapter>
@ -332,10 +333,16 @@
<title>functions/definitions</title>
!Finclude/net/mac80211.h ieee80211_rx_status
!Finclude/net/mac80211.h mac80211_rx_flags
!Finclude/net/mac80211.h mac80211_tx_control_flags
!Finclude/net/mac80211.h mac80211_rate_control_flags
!Finclude/net/mac80211.h ieee80211_tx_rate
!Finclude/net/mac80211.h ieee80211_tx_info
!Finclude/net/mac80211.h ieee80211_tx_info_clear_status
!Finclude/net/mac80211.h ieee80211_rx
!Finclude/net/mac80211.h ieee80211_rx_ni
!Finclude/net/mac80211.h ieee80211_rx_irqsafe
!Finclude/net/mac80211.h ieee80211_tx_status
!Finclude/net/mac80211.h ieee80211_tx_status_ni
!Finclude/net/mac80211.h ieee80211_tx_status_irqsafe
!Finclude/net/mac80211.h ieee80211_rts_get
!Finclude/net/mac80211.h ieee80211_rts_duration
@ -346,6 +353,7 @@
!Finclude/net/mac80211.h ieee80211_stop_queue
!Finclude/net/mac80211.h ieee80211_wake_queues
!Finclude/net/mac80211.h ieee80211_stop_queues
!Finclude/net/mac80211.h ieee80211_queue_stopped
</sect1>
</chapter>
@ -354,6 +362,13 @@
!Pinclude/net/mac80211.h Frame filtering
!Finclude/net/mac80211.h ieee80211_filter_flags
</chapter>
<chapter id="workqueue">
<title>The mac80211 workqueue</title>
!Pinclude/net/mac80211.h mac80211 workqueue
!Finclude/net/mac80211.h ieee80211_queue_work
!Finclude/net/mac80211.h ieee80211_queue_delayed_work
</chapter>
</part>
<part id="advanced">
@ -374,6 +389,9 @@
!Finclude/net/mac80211.h set_key_cmd
!Finclude/net/mac80211.h ieee80211_key_conf
!Finclude/net/mac80211.h ieee80211_key_flags
!Finclude/net/mac80211.h ieee80211_tkip_key_type
!Finclude/net/mac80211.h ieee80211_get_tkip_key
!Finclude/net/mac80211.h ieee80211_key_removed
</chapter>
<chapter id="powersave">
@ -417,6 +435,18 @@
supported by mac80211, add notes about supporting hw crypto
with it.
</para>
!Finclude/net/mac80211.h ieee80211_iterate_active_interfaces
!Finclude/net/mac80211.h ieee80211_iterate_active_interfaces_atomic
</chapter>
<chapter id="station-handling">
<title>Station handling</title>
<para>TODO</para>
!Finclude/net/mac80211.h ieee80211_sta
!Finclude/net/mac80211.h sta_notify_cmd
!Finclude/net/mac80211.h ieee80211_find_sta
!Finclude/net/mac80211.h ieee80211_find_sta_by_ifaddr
!Finclude/net/mac80211.h ieee80211_sta_block_awake
</chapter>
<chapter id="hardware-scan-offload">
@ -424,6 +454,28 @@
<para>TBD</para>
!Finclude/net/mac80211.h ieee80211_scan_completed
</chapter>
<chapter id="aggregation">
<title>Aggregation</title>
<sect1>
<title>TX A-MPDU aggregation</title>
!Pnet/mac80211/agg-tx.c TX A-MPDU aggregation
!Cnet/mac80211/agg-tx.c
</sect1>
<sect1>
<title>RX A-MPDU aggregation</title>
!Pnet/mac80211/agg-rx.c RX A-MPDU aggregation
!Cnet/mac80211/agg-rx.c
</sect1>
!Finclude/net/mac80211.h ieee80211_ampdu_mlme_action
</chapter>
<chapter id="smps">
<title>Spatial Multiplexing Powersave (SMPS)</title>
!Pinclude/net/mac80211.h Spatial multiplexing power save
!Finclude/net/mac80211.h ieee80211_request_smps
!Finclude/net/mac80211.h ieee80211_smps_mode
</chapter>
</part>
<part id="rate-control">
@ -435,9 +487,16 @@
interface and how it relates to mac80211 and drivers.
</para>
</partintro>
<chapter id="dummy">
<title>dummy chapter</title>
<chapter id="ratecontrol-api">
<title>Rate Control API</title>
<para>TBD</para>
!Finclude/net/mac80211.h ieee80211_start_tx_ba_session
!Finclude/net/mac80211.h ieee80211_start_tx_ba_cb_irqsafe
!Finclude/net/mac80211.h ieee80211_stop_tx_ba_session
!Finclude/net/mac80211.h ieee80211_stop_tx_ba_cb_irqsafe
!Finclude/net/mac80211.h rate_control_changed
!Finclude/net/mac80211.h ieee80211_tx_rate_control
!Finclude/net/mac80211.h rate_control_send_low
</chapter>
</part>
@ -485,6 +544,13 @@
</sect1>
</chapter>
<chapter id="aggregation-internals">
<title>Aggregation</title>
!Fnet/mac80211/sta_info.h sta_ampdu_mlme
!Fnet/mac80211/sta_info.h tid_ampdu_tx
!Fnet/mac80211/sta_info.h tid_ampdu_rx
</chapter>
<chapter id="synchronisation">
<title>Synchronisation</title>
<para>TBD</para>

View file

@ -0,0 +1,327 @@
Copyright (c) 2009-2010 QLogic Corporation
QLogic Linux qlcnic NIC Driver
This program includes a device driver for Linux 2.6 that may be
distributed with QLogic hardware specific firmware binary file.
You may modify and redistribute the device driver code under the
GNU General Public License (a copy of which is attached hereto as
Exhibit A) published by the Free Software Foundation (version 2).
You may redistribute the hardware specific firmware binary file
under the following terms:
1. Redistribution of source code (only if applicable),
must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistribution in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
3. The name of QLogic Corporation may not be used to
endorse or promote products derived from this software
without specific prior written permission
REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
COMBINATION WITH THIS PROGRAM.
EXHIBIT A
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.

View file

@ -0,0 +1,240 @@
[state: 21-11-2010]
BATMAN-ADV
----------
Batman advanced is a new approach to wireless networking which
does no longer operate on the IP basis. Unlike the batman daemon,
which exchanges information using UDP packets and sets routing
tables, batman-advanced operates on ISO/OSI Layer 2 only and uses
and routes (or better: bridges) Ethernet Frames. It emulates a
virtual network switch of all nodes participating. Therefore all
nodes appear to be link local, thus all higher operating proto-
cols won't be affected by any changes within the network. You can
run almost any protocol above batman advanced, prominent examples
are: IPv4, IPv6, DHCP, IPX.
Batman advanced was implemented as a Linux kernel driver to re-
duce the overhead to a minimum. It does not depend on any (other)
network driver, and can be used on wifi as well as ethernet lan,
vpn, etc ... (anything with ethernet-style layer 2).
CONFIGURATION
-------------
Load the batman-adv module into your kernel:
# insmod batman-adv.ko
The module is now waiting for activation. You must add some in-
terfaces on which batman can operate. After loading the module
batman advanced will scan your systems interfaces to search for
compatible interfaces. Once found, it will create subfolders in
the /sys directories of each supported interface, e.g.
# ls /sys/class/net/eth0/batman_adv/
# iface_status mesh_iface
If an interface does not have the "batman_adv" subfolder it prob-
ably is not supported. Not supported interfaces are: loopback,
non-ethernet and batman's own interfaces.
Note: After the module was loaded it will continuously watch for
new interfaces to verify the compatibility. There is no need to
reload the module if you plug your USB wifi adapter into your ma-
chine after batman advanced was initially loaded.
To activate a given interface simply write "bat0" into its
"mesh_iface" file inside the batman_adv subfolder:
# echo bat0 > /sys/class/net/eth0/batman_adv/mesh_iface
Repeat this step for all interfaces you wish to add. Now batman
starts using/broadcasting on this/these interface(s).
By reading the "iface_status" file you can check its status:
# cat /sys/class/net/eth0/batman_adv/iface_status
# active
To deactivate an interface you have to write "none" into its
"mesh_iface" file:
# echo none > /sys/class/net/eth0/batman_adv/mesh_iface
All mesh wide settings can be found in batman's own interface
folder:
# ls /sys/class/net/bat0/mesh/
# aggregated_ogms bonding fragmentation orig_interval
# vis_mode
There is a special folder for debugging informations:
# ls /sys/kernel/debug/batman_adv/bat0/
# originators socket transtable_global transtable_local
# vis_data
Some of the files contain all sort of status information regard-
ing the mesh network. For example, you can view the table of
originators (mesh participants) with:
# cat /sys/kernel/debug/batman_adv/bat0/originators
Other files allow to change batman's behaviour to better fit your
requirements. For instance, you can check the current originator
interval (value in milliseconds which determines how often batman
sends its broadcast packets):
# cat /sys/class/net/bat0/mesh/orig_interval
# 1000
and also change its value:
# echo 3000 > /sys/class/net/bat0/mesh/orig_interval
In very mobile scenarios, you might want to adjust the originator
interval to a lower value. This will make the mesh more respon-
sive to topology changes, but will also increase the overhead.
USAGE
-----
To make use of your newly created mesh, batman advanced provides
a new interface "bat0" which you should use from this point on.
All interfaces added to batman advanced are not relevant any
longer because batman handles them for you. Basically, one "hands
over" the data by using the batman interface and batman will make
sure it reaches its destination.
The "bat0" interface can be used like any other regular inter-
face. It needs an IP address which can be either statically con-
figured or dynamically (by using DHCP or similar services):
# NodeA: ifconfig bat0 192.168.0.1
# NodeB: ifconfig bat0 192.168.0.2
# NodeB: ping 192.168.0.1
Note: In order to avoid problems remove all IP addresses previ-
ously assigned to interfaces now used by batman advanced, e.g.
# ifconfig eth0 0.0.0.0
VISUALIZATION
-------------
If you want topology visualization, at least one mesh node must
be configured as VIS-server:
# echo "server" > /sys/class/net/bat0/mesh/vis_mode
Each node is either configured as "server" or as "client" (de-
fault: "client"). Clients send their topology data to the server
next to them, and server synchronize with other servers. If there
is no server configured (default) within the mesh, no topology
information will be transmitted. With these "synchronizing
servers", there can be 1 or more vis servers sharing the same (or
at least very similar) data.
When configured as server, you can get a topology snapshot of
your mesh:
# cat /sys/kernel/debug/batman_adv/bat0/vis_data
This raw output is intended to be easily parsable and convertable
with other tools. Have a look at the batctl README if you want a
vis output in dot or json format for instance and how those out-
puts could then be visualised in an image.
The raw format consists of comma separated values per entry where
each entry is giving information about a certain source inter-
face. Each entry can/has to have the following values:
-> "mac" - mac address of an originator's source interface
(each line begins with it)
-> "TQ mac value" - src mac's link quality towards mac address
of a neighbor originator's interface which
is being used for routing
-> "HNA mac" - HNA announced by source mac
-> "PRIMARY" - this is a primary interface
-> "SEC mac" - secondary mac address of source
(requires preceding PRIMARY)
The TQ value has a range from 4 to 255 with 255 being the best.
The HNA entries are showing which hosts are connected to the mesh
via bat0 or being bridged into the mesh network. The PRIMARY/SEC
values are only applied on primary interfaces
LOGGING/DEBUGGING
-----------------
All error messages, warnings and information messages are sent to
the kernel log. Depending on your operating system distribution
this can be read in one of a number of ways. Try using the com-
mands: dmesg, logread, or looking in the files /var/log/kern.log
or /var/log/syslog. All batman-adv messages are prefixed with
"batman-adv:" So to see just these messages try
# dmesg | grep batman-adv
When investigating problems with your mesh network it is some-
times necessary to see more detail debug messages. This must be
enabled when compiling the batman-adv module. When building bat-
man-adv as part of kernel, use "make menuconfig" and enable the
option "B.A.T.M.A.N. debugging".
Those additional debug messages can be accessed using a special
file in debugfs
# cat /sys/kernel/debug/batman_adv/bat0/log
The additional debug output is by default disabled. It can be en-
abled during run time. Following log_levels are defined:
0 - All debug output disabled
1 - Enable messages related to routing / flooding / broadcasting
2 - Enable route or hna added / changed / deleted
3 - Enable all messages
The debug output can be changed at runtime using the file
/sys/class/net/bat0/mesh/log_level. e.g.
# echo 2 > /sys/class/net/bat0/mesh/log_level
will enable debug messages for when routes or HNAs change.
BATCTL
------
As batman advanced operates on layer 2 all hosts participating in
the virtual switch are completely transparent for all protocols
above layer 2. Therefore the common diagnosis tools do not work
as expected. To overcome these problems batctl was created. At
the moment the batctl contains ping, traceroute, tcpdump and
interfaces to the kernel module settings.
For more information, please see the manpage (man batctl).
batctl is available on http://www.open-mesh.org/
CONTACT
-------
Please send us comments, experiences, questions, anything :)
IRC: #batman on irc.freenode.org
Mailing-list: b.a.t.m.a.n@b.a.t.m.a.n@lists.open-mesh.org
(optional subscription at
https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
You can also contact the Authors:
Marek Lindner <lindner_marek@yahoo.de>
Simon Wunderlich <siwu@hrz.tu-chemnitz.de>

View file

@ -47,6 +47,26 @@ http://linux-net.osdl.org/index.php/DCCP_Testing#Experimental_DCCP_source_tree
Socket options
==============
DCCP_SOCKOPT_QPOLICY_ID sets the dequeuing policy for outgoing packets. It takes
a policy ID as argument and can only be set before the connection (i.e. changes
during an established connection are not supported). Currently, two policies are
defined: the "simple" policy (DCCPQ_POLICY_SIMPLE), which does nothing special,
and a priority-based variant (DCCPQ_POLICY_PRIO). The latter allows to pass an
u32 priority value as ancillary data to sendmsg(), where higher numbers indicate
a higher packet priority (similar to SO_PRIORITY). This ancillary data needs to
be formatted using a cmsg(3) message header filled in as follows:
cmsg->cmsg_level = SOL_DCCP;
cmsg->cmsg_type = DCCP_SCM_PRIORITY;
cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t)); /* or CMSG_LEN(4) */
DCCP_SOCKOPT_QPOLICY_TXQLEN sets the maximum length of the output queue. A zero
value is always interpreted as unbounded queue length. If different from zero,
the interpretation of this parameter depends on the current dequeuing policy
(see above): the "simple" policy will enforce a fixed queue size by returning
EAGAIN, whereas the "prio" policy enforces a fixed queue length by dropping the
lowest-priority packet first. The default value for this parameter is
initialised from /proc/sys/net/dccp/default/tx_qlen.
DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
the socket will fall back to 0 (which means that no meaningful service code

View file

@ -72,7 +72,7 @@ Tx Descriptors: Number of transmit descriptors. A transmit descriptor is a data
ethtool -G eth? tx n, where n is the number of desired tx descriptors.
Speed/Duplex: The driver auto-negotiates the link speed and duplex settings by
default. Ethtool can be used as follows to force speed/duplex.
default. The ethtool utility can be used as follows to force speed/duplex.
ethtool -s eth? autoneg off speed {10|100} duplex {full|half}
@ -126,30 +126,21 @@ Additional Configurations
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. Ethtool
diagnostics, as well as displaying statistical information. The ethtool
version 1.6 or later is required for this functionality.
The latest release of ethtool can be found from
http://sourceforge.net/projects/gkernel.
NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support
for a more complete ethtool feature set can be enabled by upgrading
ethtool to ethtool-1.8.1.
http://ftp.kernel.org/pub/software/network/ethtool/
Enabling Wake on LAN* (WoL)
---------------------------
WoL is provided through the Ethtool* utility. Ethtool is included with Red
Hat* 8.0. For other Linux distributions, download and install Ethtool from
the following website: http://sourceforge.net/projects/gkernel.
For instructions on enabling WoL with Ethtool, refer to the Ethtool man page.
WoL is provided through the ethtool* utility. For instructions on enabling
WoL with ethtool, refer to the ethtool man page.
WoL will be enabled on the system during the next shut down or reboot. For
this driver version, in order to enable WoL, the e100 driver must be
loaded when shutting down or rebooting the system.
NAPI
----

View file

@ -79,7 +79,7 @@ InterruptThrottleRate
---------------------
(not supported on Intel(R) 82542, 82543 or 82544-based adapters)
Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
4=simplified balancing)
4=simplified balancing)
Default Value: 3
The driver can limit the amount of interrupts per second that the adapter
@ -124,8 +124,8 @@ InterruptThrottleRate is set to mode 1. In this mode, which operates
the same as mode 3, the InterruptThrottleRate will be increased stepwise to
70000 for traffic in class "Lowest latency".
In simplified mode the interrupt rate is based on the ratio of Tx and
Rx traffic. If the bytes per second rate is approximately equal, the
In simplified mode the interrupt rate is based on the ratio of TX and
RX traffic. If the bytes per second rate is approximately equal, the
interrupt rate will drop as low as 2000 interrupts per second. If the
traffic is mostly transmit or mostly receive, the interrupt rate could
be as high as 8000.
@ -245,7 +245,7 @@ NOTE: Depending on the available system resources, the request for a
TxDescriptorStep
----------------
Valid Range: 1 (use every Tx Descriptor)
4 (use every 4th Tx Descriptor)
4 (use every 4th Tx Descriptor)
Default Value: 1 (use every Tx Descriptor)
@ -312,7 +312,7 @@ Valid Range: 0-xxxxxxx (0=off)
Default Value: 256
Usage: insmod e1000.ko copybreak=128
Driver copies all packets below or equaling this size to a fresh Rx
Driver copies all packets below or equaling this size to a fresh RX
buffer before handing it up the stack.
This parameter is different than other parameters, in that it is a
@ -431,15 +431,15 @@ Additional Configurations
Ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. Ethtool
diagnostics, as well as displaying statistical information. The ethtool
version 1.6 or later is required for this functionality.
The latest release of ethtool can be found from
http://sourceforge.net/projects/gkernel.
http://ftp.kernel.org/pub/software/network/ethtool/
Enabling Wake on LAN* (WoL)
---------------------------
WoL is configured through the Ethtool* utility.
WoL is configured through the ethtool* utility.
WoL will be enabled on the system during the next shut down or reboot.
For this driver version, in order to enable WoL, the e1000 driver must be

View file

@ -1,5 +1,5 @@
Linux* Driver for Intel(R) Network Connection
===============================================================
=============================================
Intel Gigabit Linux driver.
Copyright(c) 1999 - 2010 Intel Corporation.
@ -61,6 +61,12 @@ per second, even if more packets have come in. This reduces interrupt
load on the system and can lower CPU utilization under heavy load,
but will increase latency as packets are not processed as quickly.
The default behaviour of the driver previously assumed a static
InterruptThrottleRate value of 8000, providing a good fallback value for
all traffic types, but lacking in small packet performance and latency.
The hardware can handle many more small packets per second however, and
for this reason an adaptive interrupt moderation algorithm was implemented.
The driver has two adaptive modes (setting 1 or 3) in which
it dynamically adjusts the InterruptThrottleRate value based on the traffic
that it receives. After determining the type of incoming traffic in the last
@ -86,8 +92,8 @@ InterruptThrottleRate is set to mode 1. In this mode, which operates
the same as mode 3, the InterruptThrottleRate will be increased stepwise to
70000 for traffic in class "Lowest latency".
In simplified mode the interrupt rate is based on the ratio of Tx and
Rx traffic. If the bytes per second rate is approximately equal the
In simplified mode the interrupt rate is based on the ratio of TX and
RX traffic. If the bytes per second rate is approximately equal, the
interrupt rate will drop as low as 2000 interrupts per second. If the
traffic is mostly transmit or mostly receive, the interrupt rate could
be as high as 8000.
@ -177,7 +183,7 @@ Copybreak
Valid Range: 0-xxxxxxx (0=off)
Default Value: 256
Driver copies all packets below or equaling this size to a fresh Rx
Driver copies all packets below or equaling this size to a fresh RX
buffer before handing it up the stack.
This parameter is different than other parameters, in that it is a
@ -223,17 +229,17 @@ loading or enabling the driver, try disabling this feature.
WriteProtectNVM
---------------
Valid Range: 0-1
Default Value: 1 (enabled)
Valid Range: 0,1
Default Value: 1
Set the hardware to ignore all write/erase cycles to the GbE region in the
ICHx NVM (non-volatile memory). This feature can be disabled by the
WriteProtectNVM module parameter (enabled by default) only after a hardware
reset, but the machine must be power cycled before trying to enable writes.
Note: the kernel boot option iomem=relaxed may need to be set if the kernel
config option CONFIG_STRICT_DEVMEM=y, if the root user wants to write the
NVM from user space via ethtool.
If set to 1, configure the hardware to ignore all write/erase cycles to the
GbE region in the ICHx NVM (in order to prevent accidental corruption of the
NVM). This feature can be disabled by setting the parameter to 0 during initial
driver load.
NOTE: The machine must be power cycled (full off/on) when enabling NVM writes
via setting the parameter to zero. Once the NVM has been locked (via the
parameter at 1 when the driver loads) it cannot be unlocked except via power
cycle.
Additional Configurations
=========================
@ -259,32 +265,30 @@ Additional Configurations
- Some adapters limit Jumbo Frames sized packets to a maximum of
4096 bytes and some adapters do not support Jumbo Frames.
Ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. We
strongly recommend downloading the latest version of Ethtool at:
strongly recommend downloading the latest version of ethtool at:
http://sourceforge.net/projects/gkernel.
http://ftp.kernel.org/pub/software/network/ethtool/
Speed and Duplex
----------------
Speed and Duplex are configured through the Ethtool* utility. For
instructions, refer to the Ethtool man page.
Speed and Duplex are configured through the ethtool* utility. For
instructions, refer to the ethtool man page.
Enabling Wake on LAN* (WoL)
---------------------------
WoL is configured through the Ethtool* utility. For instructions on
enabling WoL with Ethtool, refer to the Ethtool man page.
WoL is configured through the ethtool* utility. For instructions on
enabling WoL with ethtool, refer to the ethtool man page.
WoL will be enabled on the system during the next shut down or reboot.
For this driver version, in order to enable WoL, the e1000e driver must be
loaded when shutting down or rebooting the system.
In most cases Wake On LAN is only supported on port A for multiple port
adapters. To verify if a port supports Wake on LAN run ethtool eth<X>.
adapters. To verify if a port supports Wake on Lan run ethtool eth<X>.
Support
=======

View file

@ -36,6 +36,7 @@ Default Value: 0
This parameter adds support for SR-IOV. It causes the driver to spawn up to
max_vfs worth of virtual function.
Additional Configurations
=========================
@ -60,15 +61,16 @@ Additional Configurations
Ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information.
diagnostics, as well as displaying statistical information. The latest
version of ethtool can be found at:
http://sourceforge.net/projects/gkernel.
http://ftp.kernel.org/pub/software/network/ethtool/
Enabling Wake on LAN* (WoL)
---------------------------
WoL is configured through the Ethtool* utility.
WoL is configured through the ethtool* utility.
For instructions on enabling WoL with Ethtool, refer to the Ethtool man page.
For instructions on enabling WoL with ethtool, refer to the ethtool man page.
WoL will be enabled on the system during the next shut down or reboot.
For this driver version, in order to enable WoL, the igb driver must be
@ -91,31 +93,6 @@ Additional Configurations
REQUIREMENTS: MSI-X support is required for Multiqueue. If MSI-X is not
found, the system will fallback to MSI or to Legacy interrupts.
LRO
---
Large Receive Offload (LRO) is a technique for increasing inbound throughput
of high-bandwidth network connections by reducing CPU overhead. It works by
aggregating multiple incoming packets from a single stream into a larger
buffer before they are passed higher up the networking stack, thus reducing
the number of packets that have to be processed. LRO combines multiple
Ethernet frames into a single receive in the stack, thereby potentially
decreasing CPU utilization for receives.
NOTE: You need to have inet_lro enabled via either the CONFIG_INET_LRO or
CONFIG_INET_LRO_MODULE kernel config option. Additionally, if
CONFIG_INET_LRO_MODULE is used, the inet_lro module needs to be loaded
before the igb driver.
You can verify that the driver is using LRO by looking at these counters in
Ethtool:
lro_aggregated - count of total packets that were combined
lro_flushed - counts the number of packets flushed out of LRO
lro_no_desc - counts the number of times an LRO descriptor was not available
for the LRO packet
NOTE: IPv6 and UDP are not supported by LRO.
Support
=======

View file

@ -58,9 +58,11 @@ Additional Configurations
Ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information.
diagnostics, as well as displaying statistical information. The ethtool
version 3.0 or later is required for this functionality, although we
strongly recommend downloading the latest version at:
http://sourceforge.net/projects/gkernel.
http://ftp.kernel.org/pub/software/network/ethtool/
Support
=======

View file

@ -11,7 +11,9 @@ ip_forward - BOOLEAN
for routers)
ip_default_ttl - INTEGER
default 64
Default value of TTL field (Time To Live) for outgoing (but not
forwarded) IP packets. Should be between 1 and 255 inclusive.
Default: 64 (as recommended by RFC1700)
ip_no_pmtu_disc - BOOLEAN
Disable Path MTU Discovery.
@ -708,10 +710,28 @@ igmp_max_memberships - INTEGER
Change the maximum number of multicast groups we can subscribe to.
Default: 20
conf/interface/* changes special settings per interface (where "interface" is
the name of your network interface)
conf/all/* is special, changes the settings for all interfaces
Theoretical maximum value is bounded by having to send a membership
report in a single datagram (i.e. the report can't span multiple
datagrams, or risk confusing the switch and leaving groups you don't
intend to).
The number of supported groups 'M' is bounded by the number of group
report entries you can fit into a single datagram of 65535 bytes.
M = 65536-sizeof (ip header)/(sizeof(Group record))
Group records are variable length, with a minimum of 12 bytes.
So net.ipv4.igmp_max_memberships should not be set higher than:
(65536-24) / 12 = 5459
The value 5459 assumes no IP header options, so in practice
this number may be lower.
conf/interface/* changes special settings per interface (where
"interface" is the name of your network interface)
conf/all/* is special, changes the settings for all interfaces
log_martians - BOOLEAN
Log packets with impossible addresses to kernel log.

View file

@ -309,15 +309,15 @@ Additional Configurations
Ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. Ethtool
diagnostics, as well as displaying statistical information. The ethtool
version 1.6 or later is required for this functionality.
The latest release of ethtool can be found from
http://sourceforge.net/projects/gkernel
http://ftp.kernel.org/pub/software/network/ethtool/
NOTE: Ethtool 1.6 only supports a limited set of ethtool options. Support
for a more complete ethtool feature set can be enabled by upgrading
to the latest version.
NOTE: The ethtool version 1.6 only supports a limited set of ethtool options.
Support for a more complete ethtool feature set can be enabled by
upgrading to the latest version.
NAPI

View file

@ -1,107 +1,126 @@
Linux Base Driver for 10 Gigabit PCI Express Intel(R) Network Connection
========================================================================
March 10, 2009
Intel Gigabit Linux driver.
Copyright(c) 1999 - 2010 Intel Corporation.
Contents
========
- In This Release
- Identifying Your Adapter
- Building and Installation
- Additional Configurations
- Performance Tuning
- Known Issues
- Support
In This Release
===============
This file describes the ixgbe Linux Base Driver for the 10 Gigabit PCI
Express Intel(R) Network Connection. This driver includes support for
Itanium(R)2-based systems.
For questions related to hardware requirements, refer to the documentation
supplied with your 10 Gigabit adapter. All hardware requirements listed apply
to use with Linux.
The following features are available in this kernel:
- Native VLANs
- Channel Bonding (teaming)
- SNMP
- Generic Receive Offload
- Data Center Bridging
Channel Bonding documentation can be found in the Linux kernel source:
/Documentation/networking/bonding.txt
Ethtool, lspci, and ifconfig can be used to display device and driver
specific information.
Identifying Your Adapter
========================
This driver supports devices based on the 82598 controller and the 82599
controller.
The driver in this release is compatible with 82598 and 82599-based Intel
Network Connections.
For specific information on identifying which adapter you have, please visit:
For more information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
http://support.intel.com/support/network/sb/CS-008441.htm
http://support.intel.com/support/network/sb/CS-012904.htm
SFP+ Devices with Pluggable Optics
----------------------------------
82599-BASED ADAPTERS
NOTES: If your 82599-based Intel(R) Network Adapter came with Intel optics, or
is an Intel(R) Ethernet Server Adapter X520-2, then it only supports Intel
optics and/or the direct attach cables listed below.
When 82599-based SFP+ devices are connected back to back, they should be set to
the same Speed setting via ethtool. Results may vary if you mix speed settings.
82598-based adapters support all passive direct attach cables that comply
with SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Active direct attach
cables are not supported.
Supplier Type Part Numbers
SR Modules
Intel DUAL RATE 1G/10G SFP+ SR (bailed) FTLX8571D3BCV-IT
Intel DUAL RATE 1G/10G SFP+ SR (bailed) AFBR-703SDDZ-IN1
Intel DUAL RATE 1G/10G SFP+ SR (bailed) AFBR-703SDZ-IN2
LR Modules
Intel DUAL RATE 1G/10G SFP+ LR (bailed) FTLX1471D3BCV-IT
Intel DUAL RATE 1G/10G SFP+ LR (bailed) AFCT-701SDDZ-IN1
Intel DUAL RATE 1G/10G SFP+ LR (bailed) AFCT-701SDZ-IN2
The following is a list of 3rd party SFP+ modules and direct attach cables that
have received some testing. Not all modules are applicable to all devices.
Supplier Type Part Numbers
Finisar SFP+ SR bailed, 10g single rate FTLX8571D3BCL
Avago SFP+ SR bailed, 10g single rate AFBR-700SDZ
Finisar SFP+ LR bailed, 10g single rate FTLX1471D3BCL
Finisar DUAL RATE 1G/10G SFP+ SR (No Bail) FTLX8571D3QCV-IT
Avago DUAL RATE 1G/10G SFP+ SR (No Bail) AFBR-703SDZ-IN1
Finisar DUAL RATE 1G/10G SFP+ LR (No Bail) FTLX1471D3QCV-IT
Avago DUAL RATE 1G/10G SFP+ LR (No Bail) AFCT-701SDZ-IN1
Finistar 1000BASE-T SFP FCLF8522P2BTL
Avago 1000BASE-T SFP ABCU-5710RZ
82599-based adapters support all passive and active limiting direct attach
cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
Laser turns off for SFP+ when ifconfig down
-------------------------------------------
"ifconfig down" turns off the laser for 82599-based SFP+ fiber adapters.
"ifconfig up" turns on the later.
Building and Installation
=========================
82598-BASED ADAPTERS
select m for "Intel(R) 10GbE PCI Express adapters support" located at:
Location:
-> Device Drivers
-> Network device support (NETDEVICES [=y])
-> Ethernet (10000 Mbit) (NETDEV_10000 [=y])
NOTES for 82598-Based Adapters:
- Intel(R) Network Adapters that support removable optical modules only support
their original module type (i.e., the Intel(R) 10 Gigabit SR Dual Port
Express Module only supports SR optical modules). If you plug in a different
type of module, the driver will not load.
- Hot Swapping/hot plugging optical modules is not supported.
- Only single speed, 10 gigabit modules are supported.
- LAN on Motherboard (LOMs) may support DA, SR, or LR modules. Other module
types are not supported. Please see your system documentation for details.
1. make modules & make modules_install
The following is a list of 3rd party SFP+ modules and direct attach cables that
have received some testing. Not all modules are applicable to all devices.
2. Load the module:
Supplier Type Part Numbers
# modprobe ixgbe
Finisar SFP+ SR bailed, 10g single rate FTLX8571D3BCL
Avago SFP+ SR bailed, 10g single rate AFBR-700SDZ
Finisar SFP+ LR bailed, 10g single rate FTLX1471D3BCL
The insmod command can be used if the full
path to the driver module is specified. For example:
82598-based adapters support all passive direct attach cables that comply
with SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Active direct attach
cables are not supported.
insmod /lib/modules/<KERNEL VERSION>/kernel/drivers/net/ixgbe/ixgbe.ko
With 2.6 based kernels also make sure that older ixgbe drivers are
removed from the kernel, before loading the new module:
Flow Control
------------
Ethernet Flow Control (IEEE 802.3x) can be configured with ethtool to enable
receiving and transmitting pause frames for ixgbe. When TX is enabled, PAUSE
frames are generated when the receive packet buffer crosses a predefined
threshold. When rx is enabled, the transmit unit will halt for the time delay
specified when a PAUSE frame is received.
rmmod ixgbe; modprobe ixgbe
Flow Control is enabled by default. If you want to disable a flow control
capable link partner, use ethtool:
3. Assign an IP address to the interface by entering the following, where
x is the interface number:
ifconfig ethx <IP_address>
4. Verify that the interface works. Enter the following, where <IP_address>
is the IP address for another machine on the same subnet as the interface
that is being tested:
ping <IP_address>
ethtool -A eth? autoneg off RX off TX off
NOTE: For 82598 backplane cards entering 1 gig mode, flow control default
behavior is changed to off. Flow control in 1 gig mode on these devices can
lead to Tx hangs.
Additional Configurations
=========================
Viewing Link Messages
---------------------
Link messages will not be displayed to the console if the distribution is
restricting system messages. In order to see network driver link messages on
your console, set dmesg to eight by entering the following:
dmesg -n 8
NOTE: This setting is not saved across reboots.
Jumbo Frames
------------
The driver supports Jumbo Frames for all adapters. Jumbo Frames support is
@ -123,13 +142,8 @@ Additional Configurations
other protocols besides TCP. It's also safe to use with configurations that
are problematic for LRO, namely bridging and iSCSI.
GRO is enabled by default in the driver. Future versions of ethtool will
support disabling and re-enabling GRO on the fly.
Data Center Bridging, aka DCB
-----------------------------
DCB is a configuration Quality of Service implementation in hardware.
It uses the VLAN priority tag (802.1p) to filter traffic. That means
that there are 8 different priorities that traffic can be filtered into.
@ -163,24 +177,71 @@ Additional Configurations
http://e1000.sf.net
Ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. Ethtool
version 3.0 or later is required for this functionality.
diagnostics, as well as displaying statistical information. The latest
ethtool version is required for this functionality.
The latest release of ethtool can be found from
http://sourceforge.net/projects/gkernel.
http://ftp.kernel.org/pub/software/network/ethtool/
NAPI
FCoE
----
This release of the ixgbe driver contains new code to enable users to use
Fiber Channel over Ethernet (FCoE) and Data Center Bridging (DCB)
functionality that is supported by the 82598-based hardware. This code has
no default effect on the regular driver operation, and configuring DCB and
FCoE is outside the scope of this driver README. Refer to
http://www.open-fcoe.org/ for FCoE project information and contact
e1000-eedc@lists.sourceforge.net for DCB information.
NAPI (Rx polling mode) is supported in the ixgbe driver. NAPI is enabled
by default in the driver.
MAC and VLAN anti-spoofing feature
----------------------------------
When a malicious driver attempts to send a spoofed packet, it is dropped by
the hardware and not transmitted. An interrupt is sent to the PF driver
notifying it of the spoof attempt.
See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI.
When a spoofed packet is detected the PF driver will send the following
message to the system log (displayed by the "dmesg" command):
Spoof event(s) detected on VF (n)
Where n=the VF that attempted to do the spoofing.
Performance Tuning
==================
An excellent article on performance tuning can be found at:
http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf
Known Issues
============
Enabling SR-IOV in a 32-bit Microsoft* Windows* Server 2008 Guest OS using
Intel (R) 82576-based GbE or Intel (R) 82599-based 10GbE controller under KVM
-----------------------------------------------------------------------------
KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
includes traditional PCIe devices, as well as SR-IOV-capable devices using
Intel 82576-based and 82599-based controllers.
While direct assignment of a PCIe device or an SR-IOV Virtual Function (VF)
to a Linux-based VM running 2.6.32 or later kernel works fine, there is a
known issue with Microsoft Windows Server 2008 VM that results in a "yellow
bang" error. This problem is within the KVM VMM itself, not the Intel driver,
or the SR-IOV logic of the VMM, but rather that KVM emulates an older CPU
model for the guests, and this older CPU model does not support MSI-X
interrupts, which is a requirement for Intel SR-IOV.
If you wish to use the Intel 82576 or 82599-based controllers in SR-IOV mode
with KVM and a Microsoft Windows Server 2008 guest try the following
workaround. The workaround is to tell KVM to emulate a different model of CPU
when using qemu to create the KVM guest:
"-cpu qemu64,model=13"
Support

View file

@ -35,10 +35,6 @@ Driver ID Guide at:
Known Issues/Troubleshooting
============================
Unloading Physical Function (PF) Driver Causes System Reboots When VM is
Running and VF is Loaded on the VM
------------------------------------------------------------------------
Do not unload the PF driver (ixgbe) while VFs are assigned to guests.
Support
=======

View file

@ -7,7 +7,7 @@ This is the driver for the MAC 10/100/1000 on-chip Ethernet controllers
(Synopsys IP blocks); it has been fully tested on STLinux platforms.
Currently this network device driver is for all STM embedded MAC/GMAC
(7xxx SoCs).
(7xxx SoCs). Other platforms start using it i.e. ARM SPEAr.
DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
Universal version 4.0 have been used for developing the first code
@ -95,9 +95,14 @@ Several information came from the platform; please refer to the
driver's Header file in include/linux directory.
struct plat_stmmacenet_data {
int bus_id;
int pbl;
int has_gmac;
int bus_id;
int pbl;
int clk_csr;
int has_gmac;
int enh_desc;
int tx_coe;
int bugged_jumbo;
int pmt;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(unsigned long ioaddr);
#ifdef CONFIG_STM_DRIVERS
@ -114,6 +119,12 @@ Where:
registers (on STM platforms);
- has_gmac: GMAC core is on board (get it at run-time in the next step);
- bus_id: bus identifier.
- tx_coe: core is able to perform the tx csum in HW.
- enh_desc: if sets the MAC will use the enhanced descriptor structure.
- clk_csr: CSR Clock range selection.
- bugged_jumbo: some HWs are not able to perform the csum in HW for
over-sized frames due to limited buffer sizes. Setting this
flag the csum will be done in SW on JUMBO frames.
struct plat_stmmacphy_data {
int bus_id;
@ -131,13 +142,28 @@ Where:
- interface: physical MII interface mode;
- phy_reset: hook to reset HW function.
SOURCES:
- Kconfig
- Makefile
- stmmac_main.c: main network device driver;
- stmmac_mdio.c: mdio functions;
- stmmac_ethtool.c: ethtool support;
- stmmac_timer.[ch]: timer code used for mitigating the driver dma interrupts
Only tested on ST40 platforms based.
- stmmac.h: private driver structure;
- common.h: common definitions and VFTs;
- descs.h: descriptor structure definitions;
- dwmac1000_core.c: GMAC core functions;
- dwmac1000_dma.c: dma functions for the GMAC chip;
- dwmac1000.h: specific header file for the GMAC;
- dwmac100_core: MAC 100 core and dma code;
- dwmac100_dma.c: dma funtions for the MAC chip;
- dwmac1000.h: specific header file for the MAC;
- dwmac_lib.c: generic DMA functions shared among chips
- enh_desc.c: functions for handling enhanced descriptors
- norm_desc.c: functions for handling normal descriptors
TODO:
- Continue to make the driver more generic and suitable for other Synopsys
Ethernet controllers used on other architectures (i.e. ARM).
- 10G controllers are not supported.
- MAC uses Normal descriptors and GMAC uses enhanced ones.
This is a limit that should be reviewed. MAC could want to
use the enhanced structure.
- Checksumming: Rx/Tx csum is done in HW in case of GMAC only.
- XGMAC controller is not supported.
- Review the timer optimisation code to use an embedded device that seems to be
available in new chip generations.

View file

@ -166,9 +166,8 @@ F: drivers/serial/8250*
F: include/linux/serial_8250.h
8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.]
M: Paul Gortmaker <p_gortmaker@yahoo.com>
L: netdev@vger.kernel.org
S: Maintained
S: Orphan / Obsolete
F: drivers/net/*8390*
F: drivers/net/ax88796.c
@ -1095,6 +1094,12 @@ S: Supported
F: Documentation/aoe/
F: drivers/block/aoe/
ATHEROS ATH GENERIC UTILITIES
M: "Luis R. Rodriguez" <lrodriguez@atheros.com>
L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/net/wireless/ath/*
ATHEROS ATH5K WIRELESS DRIVER
M: Jiri Slaby <jirislaby@gmail.com>
M: Nick Kossifidis <mickflemm@gmail.com>
@ -1273,6 +1278,15 @@ S: Maintained
F: drivers/video/backlight/
F: include/linux/backlight.h
BATMAN ADVANCED
M: Marek Lindner <lindner_marek@yahoo.de>
M: Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
M: Sven Eckelmann <sven@narfation.org>
L: b.a.t.m.a.n@lists.open-mesh.org
W: http://www.open-mesh.org/
S: Maintained
F: net/batman-adv/
BAYCOM/HDLCDRV DRIVERS FOR AX.25
M: Thomas Sailer <t.sailer@alumni.ethz.ch>
L: linux-hams@vger.kernel.org
@ -3139,6 +3153,8 @@ M: Alex Duyck <alexander.h.duyck@intel.com>
M: John Ronciak <john.ronciak@intel.com>
L: e1000-devel@lists.sourceforge.net
W: http://e1000.sourceforge.net/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next-2.6.git
S: Supported
F: Documentation/networking/e100.txt
F: Documentation/networking/e1000.txt
@ -5056,7 +5072,7 @@ L: linux-wireless@vger.kernel.org
W: http://linuxwireless.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
S: Maintained
F: drivers/net/wireless/rtl818x/rtl8180*
F: drivers/net/wireless/rtl818x/rtl8180/
RTL8187 WIRELESS DRIVER
M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
@ -5066,7 +5082,17 @@ L: linux-wireless@vger.kernel.org
W: http://linuxwireless.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
S: Maintained
F: drivers/net/wireless/rtl818x/rtl8187*
F: drivers/net/wireless/rtl818x/rtl8187/
RTL8192CE WIRELESS DRIVER
M: Larry Finger <Larry.Finger@lwfinger.net>
M: Chaoming Li <chaoming_li@realsil.com.cn>
L: linux-wireless@vger.kernel.org
W: http://linuxwireless.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
S: Maintained
F: drivers/net/wireless/rtlwifi/
F: drivers/net/wireless/rtlwifi/rtl8192ce/
S3 SAVAGE FRAMEBUFFER DRIVER
M: Antonino Daplas <adaplas@gmail.com>

View file

@ -642,31 +642,13 @@ static void __init omap3pandora_init_irq(void)
omap_gpio_init();
}
static void pandora_wl1251_set_power(bool enable)
{
/*
* Keep power always on until wl1251_sdio driver learns to re-init
* the chip after powering it down and back up.
*/
}
static struct wl12xx_platform_data pandora_wl1251_pdata = {
.set_power = pandora_wl1251_set_power,
.use_eeprom = true,
};
static struct platform_device pandora_wl1251_data = {
.name = "wl1251_data",
.id = -1,
.dev = {
.platform_data = &pandora_wl1251_pdata,
},
};
static void pandora_wl1251_init(void)
static void __init pandora_wl1251_init(void)
{
struct wl12xx_platform_data pandora_wl1251_pdata;
int ret;
memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata));
ret = gpio_request(PANDORA_WIFI_IRQ_GPIO, "wl1251 irq");
if (ret < 0)
goto fail;
@ -679,6 +661,11 @@ static void pandora_wl1251_init(void)
if (pandora_wl1251_pdata.irq < 0)
goto fail_irq;
pandora_wl1251_pdata.use_eeprom = true;
ret = wl12xx_set_platform_data(&pandora_wl1251_pdata);
if (ret < 0)
goto fail_irq;
return;
fail_irq:
@ -691,7 +678,6 @@ static struct platform_device *omap3pandora_devices[] __initdata = {
&pandora_leds_gpio,
&pandora_keys_gpio,
&pandora_dss_device,
&pandora_wl1251_data,
&pandora_vwlan_device,
};

View file

@ -28,39 +28,70 @@ struct qeth_arp_cache_entry {
__u8 reserved2[32];
} __attribute__ ((packed));
enum qeth_arp_ipaddrtype {
QETHARP_IP_ADDR_V4 = 1,
QETHARP_IP_ADDR_V6 = 2,
};
struct qeth_arp_entrytype {
__u8 mac;
__u8 ip;
} __attribute__((packed));
#define QETH_QARP_MEDIASPECIFIC_BYTES 32
#define QETH_QARP_MACADDRTYPE_BYTES 1
struct qeth_arp_qi_entry7 {
__u8 media_specific[32];
__u8 macaddr_type;
__u8 ipaddr_type;
__u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
struct qeth_arp_entrytype type;
__u8 macaddr[6];
__u8 ipaddr[4];
} __attribute__((packed));
struct qeth_arp_qi_entry7_ipv6 {
__u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
struct qeth_arp_entrytype type;
__u8 macaddr[6];
__u8 ipaddr[16];
} __attribute__((packed));
struct qeth_arp_qi_entry7_short {
__u8 macaddr_type;
__u8 ipaddr_type;
struct qeth_arp_entrytype type;
__u8 macaddr[6];
__u8 ipaddr[4];
} __attribute__((packed));
struct qeth_arp_qi_entry7_short_ipv6 {
struct qeth_arp_entrytype type;
__u8 macaddr[6];
__u8 ipaddr[16];
} __attribute__((packed));
struct qeth_arp_qi_entry5 {
__u8 media_specific[32];
__u8 macaddr_type;
__u8 ipaddr_type;
__u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
struct qeth_arp_entrytype type;
__u8 ipaddr[4];
} __attribute__((packed));
struct qeth_arp_qi_entry5_ipv6 {
__u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
struct qeth_arp_entrytype type;
__u8 ipaddr[16];
} __attribute__((packed));
struct qeth_arp_qi_entry5_short {
__u8 macaddr_type;
__u8 ipaddr_type;
struct qeth_arp_entrytype type;
__u8 ipaddr[4];
} __attribute__((packed));
struct qeth_arp_qi_entry5_short_ipv6 {
struct qeth_arp_entrytype type;
__u8 ipaddr[16];
} __attribute__((packed));
/*
* can be set by user if no "media specific information" is wanted
* -> saves a lot of space in user space buffer
*/
#define QETH_QARP_STRIP_ENTRIES 0x8000
#define QETH_QARP_WITH_IPV6 0x4000
#define QETH_QARP_REQUEST_MASK 0x00ff
/* data sent to user space as result of query arp ioctl */

View file

@ -92,7 +92,7 @@
#define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
#define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
#define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo))
#if 1
#define ASSERT(expr) if (!(expr)) { \

View file

@ -2241,11 +2241,8 @@ static int __devinit lanai_dev_open(struct atm_dev *atmdev)
memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
lanai_timed_poll_start(lanai);
printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
"(%02X-%02X-%02X-%02X-%02X-%02X)\n", lanai->number,
(int) lanai->pci->revision, (unsigned long) lanai->base,
lanai->pci->irq,
atmdev->esi[0], atmdev->esi[1], atmdev->esi[2],
atmdev->esi[3], atmdev->esi[4], atmdev->esi[5]);
"(%pMF)\n", lanai->number, (int) lanai->pci->revision,
(unsigned long) lanai->base, lanai->pci->irq, atmdev->esi);
printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
"board_rev=%d\n", lanai->number,
lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,

View file

@ -297,8 +297,8 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
struct sk_buff *skb;
struct net_device *ifp;
read_lock(&dev_base_lock);
for_each_netdev(&init_net, ifp) {
rcu_read_lock();
for_each_netdev_rcu(&init_net, ifp) {
dev_hold(ifp);
if (!is_aoe_netif(ifp))
goto cont;
@ -325,7 +325,7 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
cont:
dev_put(ifp);
}
read_unlock(&dev_base_lock);
rcu_read_unlock();
}
static void

View file

@ -130,8 +130,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
read_lock(&dev_base_lock);
for_each_netdev(&init_net, dev) {
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
if (ipv6_chk_addr(&init_net,
&((struct sockaddr_in6 *) addr)->sin6_addr,
dev, 1)) {
@ -139,7 +139,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
break;
}
}
read_unlock(&dev_base_lock);
rcu_read_unlock();
break;
#endif
}
@ -200,7 +200,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
src_in->sin_family = AF_INET;
src_in->sin_addr.s_addr = rt->rt_src;
if (rt->idev->dev->flags & IFF_LOOPBACK) {
if (rt->dst.dev->flags & IFF_LOOPBACK) {
ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
if (!ret)
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
@ -208,12 +208,12 @@ static int addr4_resolve(struct sockaddr_in *src_in,
}
/* If the device does ARP internally, return 'done' */
if (rt->idev->dev->flags & IFF_NOARP) {
rdma_copy_addr(addr, rt->idev->dev, NULL);
if (rt->dst.dev->flags & IFF_NOARP) {
rdma_copy_addr(addr, rt->dst.dev, NULL);
goto put;
}
neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
if (!neigh || !(neigh->nud_state & NUD_VALID)) {
neigh_event_send(rt->dst.neighbour, NULL);
ret = -ENODATA;

View file

@ -848,8 +848,8 @@ static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
goto out;
}
read_lock(&dev_base_lock);
for_each_netdev(&init_net, tmp) {
rcu_read_lock();
for_each_netdev_rcu(&init_net, tmp) {
if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
vid = rdma_vlan_dev_vlan_id(tmp);
@ -884,7 +884,7 @@ static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
}
}
}
read_unlock(&dev_base_lock);
rcu_read_unlock();
for (i = 0; i < 128; ++i)
if (!hits[i]) {

View file

@ -420,7 +420,7 @@ enable_hwirq(struct inf_hw *hw)
break;
case INF_NICCY:
val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
val |= NICCY_IRQ_ENABLE;;
val |= NICCY_IRQ_ENABLE;
outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
break;
case INF_SCT_1:
@ -924,7 +924,7 @@ setup_instance(struct inf_hw *card)
mISDNipac_init(&card->ipac, card);
if (card->ipac.isac.dch.dev.Bprotocols == 0)
goto error_setup;;
goto error_setup;
err = mISDN_register_device(&card->ipac.isac.dch.dev,
&card->pdev->dev, card->name);

View file

@ -264,7 +264,7 @@ load_firmware(struct isar_hw *isar, const u8 *buf, int size)
while (noc) {
val = le16_to_cpu(*sp++);
*mp++ = val >> 8;
*mp++ = val & 0xFF;;
*mp++ = val & 0xFF;
noc--;
}
spin_lock_irqsave(isar->hwlock, flags);

View file

@ -314,7 +314,7 @@ hdlc_fill_fifo(struct BCState *bcs)
bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_XME;
}
if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
debugl1(cs, "hdlc_fill_fifo %d/%ld", count, bcs->tx_skb->len);
debugl1(cs, "hdlc_fill_fifo %d/%u", count, bcs->tx_skb->len);
p = bcs->tx_skb->data;
ptr = (u_int *)p;
skb_pull(bcs->tx_skb, count);

View file

@ -65,7 +65,7 @@ hisax_findcard(int driverid)
return (struct IsdnCardState *) 0;
}
static void
static __attribute__((format(printf, 3, 4))) void
link_debug(struct Channel *chanp, int direction, char *fmt, ...)
{
va_list args;
@ -1068,7 +1068,7 @@ init_d_st(struct Channel *chanp)
return 0;
}
static void
static __attribute__((format(printf, 2, 3))) void
callc_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;

View file

@ -1917,7 +1917,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if)
#ifdef CONFIG_PCI
#include <linux/pci.h>
static struct pci_device_id hisax_pci_tbl[] __devinitdata = {
static struct pci_device_id hisax_pci_tbl[] __devinitdata __used = {
#ifdef CONFIG_HISAX_FRITZPCI
{PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) },
#endif

View file

@ -292,7 +292,7 @@ hfc_fill_fifo(struct BCState *bcs)
}
count = GetFreeFifoBytes_B(bcs);
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "hfc_fill_fifo %d count(%ld/%d),%lx",
debugl1(cs, "hfc_fill_fifo %d count(%u/%d),%lx",
bcs->channel, bcs->tx_skb->len,
count, current->state);
if (count < bcs->tx_skb->len) {
@ -719,7 +719,7 @@ hfc_fill_dfifo(struct IsdnCardState *cs)
}
count = GetFreeFifoBytes_D(cs);
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "hfc_fill_Dfifo count(%ld/%d)",
debugl1(cs, "hfc_fill_Dfifo count(%u/%d)",
cs->tx_skb->len, count);
if (count < cs->tx_skb->len) {
if (cs->debug & L1_DEB_ISAC)

View file

@ -282,7 +282,7 @@ hfc_fill_fifo(struct BCState *bcs)
count += cs->hw.hfc.fifosize;
} /* L1_MODE_TRANS */
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "hfc_fill_fifo %d count(%ld/%d)",
debugl1(cs, "hfc_fill_fifo %d count(%u/%d)",
bcs->channel, bcs->tx_skb->len,
count);
if (count < bcs->tx_skb->len) {

View file

@ -550,7 +550,7 @@ hfcpci_fill_dfifo(struct IsdnCardState *cs)
count += D_FIFO_SIZE; /* count now contains available bytes */
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "hfcpci_fill_Dfifo count(%ld/%d)",
debugl1(cs, "hfcpci_fill_Dfifo count(%u/%d)",
cs->tx_skb->len, count);
if (count < cs->tx_skb->len) {
if (cs->debug & L1_DEB_ISAC)
@ -681,7 +681,7 @@ hfcpci_fill_fifo(struct BCState *bcs)
count += B_FIFO_SIZE; /* count now contains available bytes */
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "hfcpci_fill_fifo %d count(%ld/%d),%lx",
debugl1(cs, "hfcpci_fill_fifo %d count(%u/%d),%lx",
bcs->channel, bcs->tx_skb->len,
count, current->state);

View file

@ -179,7 +179,7 @@ write_fifo(struct IsdnCardState *cs, struct sk_buff *skb, u_char fifo, int trans
count += fifo_size; /* count now contains available bytes */
if (cs->debug & L1_DEB_ISAC_FIFO)
debugl1(cs, "hfcsx_write_fifo %d count(%ld/%d)",
debugl1(cs, "hfcsx_write_fifo %d count(%u/%d)",
fifo, skb->len, count);
if (count < skb->len) {
if (cs->debug & L1_DEB_ISAC_FIFO)
@ -265,7 +265,7 @@ read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max)
count++;
if (cs->debug & L1_DEB_ISAC_FIFO)
debugl1(cs, "hfcsx_read_fifo %d count %ld)",
debugl1(cs, "hfcsx_read_fifo %d count %u)",
fifo, count);
if ((count > fifo_size) || (count < 4)) {
@ -986,7 +986,7 @@ HFCSX_l1hw(struct PStack *st, int pr, void *arg)
default:
spin_unlock_irqrestore(&cs->lock, flags);
if (cs->debug & L1_DEB_WARN)
debugl1(cs, "hfcsx_l1hw loop invalid %4lx", arg);
debugl1(cs, "hfcsx_l1hw loop invalid %4lx", (unsigned long)arg);
return;
}
cs->hw.hfcsx.trm |= 0x80; /* enable IOM-loop */

View file

@ -1286,7 +1286,9 @@ int jiftime(char *s, long mark);
int HiSax_command(isdn_ctrl * ic);
int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb);
__attribute__((format(printf, 3, 4)))
void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...);
__attribute__((format(printf, 3, 0)))
void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args);
void HiSax_reportcard(int cardnr, int sel);
int QuickHex(char *txt, u_char * p, int cnt);

View file

@ -717,7 +717,7 @@ bch_mode(struct BCState *bcs, int mode, int bc)
bc = bc ? 1 : 0; // in case bc is greater than 1
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "mode_bch() switch B-% mode %d chan %d", hscx, mode, bc);
debugl1(cs, "mode_bch() switch B-%d mode %d chan %d", hscx, mode, bc);
bcs->mode = mode;
bcs->channel = bc;

View file

@ -189,7 +189,7 @@ ISARVersion(struct IsdnCardState *cs, char *s)
static int
isar_load_firmware(struct IsdnCardState *cs, u_char __user *buf)
{
int ret, size, cnt, debug;
int cfu_ret, ret, size, cnt, debug;
u_char len, nom, noc;
u_short sadr, left, *sp;
u_char __user *p = buf;
@ -212,9 +212,10 @@ isar_load_firmware(struct IsdnCardState *cs, u_char __user *buf)
cs->debug &= ~(L1_DEB_HSCX | L1_DEB_HSCX_FIFO);
#endif
if ((ret = copy_from_user(&size, p, sizeof(int)))) {
printk(KERN_ERR"isar_load_firmware copy_from_user ret %d\n", ret);
return ret;
cfu_ret = copy_from_user(&size, p, sizeof(int));
if (cfu_ret) {
printk(KERN_ERR"isar_load_firmware copy_from_user ret %d\n", cfu_ret);
return -EFAULT;
}
p += sizeof(int);
printk(KERN_DEBUG"isar_load_firmware size: %d\n", size);
@ -953,7 +954,7 @@ isar_pump_statev_modem(struct BCState *bcs, u_char devt) {
break;
case PSEV_GSTN_CLR:
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "pump stev GSTN CLEAR", devt);
debugl1(cs, "pump stev GSTN CLEAR");
break;
default:
if (cs->debug & L1_DEB_HSCX)
@ -1268,7 +1269,7 @@ isar_int_main(struct IsdnCardState *cs)
static void
ftimer_handler(struct BCState *bcs) {
if (bcs->cs->debug)
debugl1(bcs->cs, "ftimer flags %04x",
debugl1(bcs->cs, "ftimer flags %04lx",
bcs->Flag);
test_and_clear_bit(BC_FLG_FTI_RUN, &bcs->Flag);
if (test_and_clear_bit(BC_FLG_LL_CONN, &bcs->Flag)) {
@ -1748,7 +1749,7 @@ isar_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) {
struct BCState *bcs;
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "isar_auxcmd cmd/ch %x/%d", ic->command, ic->arg);
debugl1(cs, "isar_auxcmd cmd/ch %x/%ld", ic->command, ic->arg);
switch (ic->command) {
case (ISDN_CMD_FAXCMD):
bcs = cs->channel[ic->arg].bcs;

View file

@ -21,6 +21,7 @@
#define B_XMTBUFREADY 1
#define B_ACKPENDING 2
__attribute__((format(printf, 2, 3)))
void debugl1(struct IsdnCardState *cs, char *fmt, ...);
void DChannel_proc_xmt(struct IsdnCardState *cs);
void DChannel_proc_rcv(struct IsdnCardState *cs);

View file

@ -66,7 +66,7 @@ static char *strL3Event[] =
"EV_TIMEOUT",
};
static void
static __attribute__((format(printf, 2, 3))) void
l3m_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;

View file

@ -254,7 +254,7 @@ static int make_raw_data(struct BCState *bcs) {
val >>= 1;
}
if (bcs->cs->debug & L1_DEB_HSCX)
debugl1(bcs->cs,"tiger make_raw: in %ld out %d.%d",
debugl1(bcs->cs,"tiger make_raw: in %u out %d.%d",
bcs->tx_skb->len, s_cnt, bitcnt);
if (bitcnt) {
while (8>bitcnt++) {
@ -361,7 +361,7 @@ static int make_raw_data_56k(struct BCState *bcs) {
val >>= 1;
}
if (bcs->cs->debug & L1_DEB_HSCX)
debugl1(bcs->cs,"tiger make_raw_56k: in %ld out %d.%d",
debugl1(bcs->cs,"tiger make_raw_56k: in %u out %d.%d",
bcs->tx_skb->len, s_cnt, bitcnt);
if (bitcnt) {
while (8>bitcnt++) {
@ -612,7 +612,7 @@ void netjet_fill_dma(struct BCState *bcs)
if (!bcs->tx_skb)
return;
if (bcs->cs->debug & L1_DEB_HSCX)
debugl1(bcs->cs,"tiger fill_dma1: c%d %4x", bcs->channel,
debugl1(bcs->cs,"tiger fill_dma1: c%d %4lx", bcs->channel,
bcs->Flag);
if (test_and_set_bit(BC_FLG_BUSY, &bcs->Flag))
return;
@ -625,7 +625,7 @@ void netjet_fill_dma(struct BCState *bcs)
return;
};
if (bcs->cs->debug & L1_DEB_HSCX)
debugl1(bcs->cs,"tiger fill_dma2: c%d %4x", bcs->channel,
debugl1(bcs->cs,"tiger fill_dma2: c%d %4lx", bcs->channel,
bcs->Flag);
if (test_and_clear_bit(BC_FLG_NOFRAME, &bcs->Flag)) {
write_raw(bcs, bcs->hw.tiger.sendp, bcs->hw.tiger.free);
@ -667,7 +667,7 @@ void netjet_fill_dma(struct BCState *bcs)
write_raw(bcs, p, cnt);
}
if (bcs->cs->debug & L1_DEB_HSCX)
debugl1(bcs->cs,"tiger fill_dma3: c%d %4x", bcs->channel,
debugl1(bcs->cs,"tiger fill_dma3: c%d %4lx", bcs->channel,
bcs->Flag);
}

View file

@ -167,7 +167,8 @@ static struct FsmNode L1FnList[] __initdata =
{ST_L1_F8, EV_IND_RSY, l1_ignore},
};
static void l1m_debug(struct FsmInst *fi, char *fmt, ...)
static __attribute__((format(printf, 2, 3)))
void l1m_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;
char buf[256];
@ -269,7 +270,8 @@ static char *strDoutEvent[] =
"EV_DOUT_UNDERRUN",
};
static void dout_debug(struct FsmInst *fi, char *fmt, ...)
static __attribute__((format(printf, 2, 3)))
void dout_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;
char buf[256];

View file

@ -61,7 +61,7 @@ static int isdn_concap_dl_data_req(struct concap_proto *concap, struct sk_buff *
static int isdn_concap_dl_connect_req(struct concap_proto *concap)
{
struct net_device *ndev = concap -> net_dev;
isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
isdn_net_local *lp = netdev_priv(ndev);
int ret;
IX25DEBUG( "isdn_concap_dl_connect_req: %s \n", ndev -> name);

View file

@ -827,7 +827,7 @@ isdn_net_dial(void)
void
isdn_net_hangup(struct net_device *d)
{
isdn_net_local *lp = (isdn_net_local *) netdev_priv(d);
isdn_net_local *lp = netdev_priv(d);
isdn_ctrl cmd;
#ifdef CONFIG_ISDN_X25
struct concap_proto *cprot = lp->netdev->cprot;
@ -1052,7 +1052,7 @@ isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
{
isdn_net_dev *nd;
isdn_net_local *slp;
isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
isdn_net_local *lp = netdev_priv(ndev);
int retv = NETDEV_TX_OK;
if (((isdn_net_local *) netdev_priv(ndev))->master) {
@ -1116,7 +1116,7 @@ isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb)
static void
isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev)
{
isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
isdn_net_local *lp = netdev_priv(dev);
if (!skb)
return;
if (lp->p_encap == ISDN_NET_ENCAP_ETHER) {
@ -1131,7 +1131,7 @@ isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev)
static void isdn_net_tx_timeout(struct net_device * ndev)
{
isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
isdn_net_local *lp = netdev_priv(ndev);
printk(KERN_WARNING "isdn_tx_timeout dev %s dialstate %d\n", ndev->name, lp->dialstate);
if (!lp->dialstate){
@ -1165,7 +1165,7 @@ static void isdn_net_tx_timeout(struct net_device * ndev)
static netdev_tx_t
isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
isdn_net_local *lp = netdev_priv(ndev);
#ifdef CONFIG_ISDN_X25
struct concap_proto * cprot = lp -> netdev -> cprot;
/* At this point hard_start_xmit() passes control to the encapsulation
@ -1347,7 +1347,7 @@ isdn_net_close(struct net_device *dev)
static struct net_device_stats *
isdn_net_get_stats(struct net_device *dev)
{
isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
isdn_net_local *lp = netdev_priv(dev);
return &lp->stats;
}
@ -1426,7 +1426,7 @@ isdn_net_ciscohdlck_alloc_skb(isdn_net_local *lp, int len)
static int
isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
isdn_net_local *lp = netdev_priv(dev);
unsigned long len = 0;
unsigned long expires = 0;
int tmp = 0;
@ -1493,7 +1493,7 @@ isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static int isdn_net_ioctl(struct net_device *dev,
struct ifreq *ifr, int cmd)
{
isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
isdn_net_local *lp = netdev_priv(dev);
switch (lp->p_encap) {
#ifdef CONFIG_ISDN_PPP
@ -1786,7 +1786,7 @@ isdn_net_ciscohdlck_receive(isdn_net_local *lp, struct sk_buff *skb)
static void
isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
{
isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
isdn_net_local *lp = netdev_priv(ndev);
isdn_net_local *olp = lp; /* original 'lp' */
#ifdef CONFIG_ISDN_X25
struct concap_proto *cprot = lp -> netdev -> cprot;
@ -1800,7 +1800,7 @@ isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
* handle master's statistics and hangup-timeout
*/
ndev = lp->master;
lp = (isdn_net_local *) netdev_priv(ndev);
lp = netdev_priv(ndev);
lp->stats.rx_packets++;
lp->stats.rx_bytes += skb->len;
}

View file

@ -1147,15 +1147,14 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
}
if (is->pass_filter
&& sk_run_filter(skb, is->pass_filter, is->pass_len) == 0) {
&& sk_run_filter(skb, is->pass_filter) == 0) {
if (is->debug & 0x2)
printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
kfree_skb(skb);
return;
}
if (!(is->active_filter
&& sk_run_filter(skb, is->active_filter,
is->active_len) == 0)) {
&& sk_run_filter(skb, is->active_filter) == 0)) {
if (is->debug & 0x2)
printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n");
lp->huptimer = 0;
@ -1221,7 +1220,7 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
struct ippp_struct *ipt,*ipts;
int slot, retval = NETDEV_TX_OK;
mlp = (isdn_net_local *) netdev_priv(netdev);
mlp = netdev_priv(netdev);
nd = mlp->netdev; /* get master lp */
slot = mlp->ppp_slot;
@ -1294,15 +1293,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
}
if (ipt->pass_filter
&& sk_run_filter(skb, ipt->pass_filter, ipt->pass_len) == 0) {
&& sk_run_filter(skb, ipt->pass_filter) == 0) {
if (ipt->debug & 0x4)
printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
kfree_skb(skb);
goto unlock;
}
if (!(ipt->active_filter
&& sk_run_filter(skb, ipt->active_filter,
ipt->active_len) == 0)) {
&& sk_run_filter(skb, ipt->active_filter) == 0)) {
if (ipt->debug & 0x4)
printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n");
lp->huptimer = 0;
@ -1492,9 +1490,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
}
drop |= is->pass_filter
&& sk_run_filter(skb, is->pass_filter, is->pass_len) == 0;
&& sk_run_filter(skb, is->pass_filter) == 0;
drop |= is->active_filter
&& sk_run_filter(skb, is->active_filter, is->active_len) == 0;
&& sk_run_filter(skb, is->active_filter) == 0;
skb_push(skb, IPPP_MAX_HEADER - 4);
return drop;
@ -1985,7 +1983,7 @@ isdn_ppp_dev_ioctl_stats(int slot, struct ifreq *ifr, struct net_device *dev)
{
struct ppp_stats __user *res = ifr->ifr_data;
struct ppp_stats t;
isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
isdn_net_local *lp = netdev_priv(dev);
if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats)))
return -EFAULT;
@ -2024,7 +2022,7 @@ isdn_ppp_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int error=0;
int len;
isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
isdn_net_local *lp = netdev_priv(dev);
if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP)
@ -2091,7 +2089,7 @@ isdn_ppp_dial_slave(char *name)
sdev = lp->slave;
while (sdev) {
isdn_net_local *mlp = (isdn_net_local *) netdev_priv(sdev);
isdn_net_local *mlp = netdev_priv(sdev);
if (!(mlp->flags & ISDN_NET_CONNECTED))
break;
sdev = mlp->slave;
@ -2099,7 +2097,7 @@ isdn_ppp_dial_slave(char *name)
if (!sdev)
return 2;
isdn_net_dial_req((isdn_net_local *) netdev_priv(sdev));
isdn_net_dial_req(netdev_priv(sdev));
return 0;
#else
return -1;
@ -2122,7 +2120,7 @@ isdn_ppp_hangup_slave(char *name)
sdev = lp->slave;
while (sdev) {
isdn_net_local *mlp = (isdn_net_local *) netdev_priv(sdev);
isdn_net_local *mlp = netdev_priv(sdev);
if (mlp->slave) { /* find last connected link in chain */
isdn_net_local *nlp = ISDN_SLAVE_PRIV(mlp);

View file

@ -99,12 +99,16 @@ static void
l1m_debug(struct FsmInst *fi, char *fmt, ...)
{
struct layer1 *l1 = fi->userdata;
struct va_format vaf;
va_list va;
va_start(va, fmt);
printk(KERN_DEBUG "%s: ", dev_name(&l1->dch->dev.dev));
vprintk(fmt, va);
printk("\n");
vaf.fmt = fmt;
vaf.va = &va;
printk(KERN_DEBUG "%s: %pV\n", dev_name(&l1->dch->dev.dev), &vaf);
va_end(va);
}

View file

@ -95,14 +95,20 @@ static void
l2m_debug(struct FsmInst *fi, char *fmt, ...)
{
struct layer2 *l2 = fi->userdata;
struct va_format vaf;
va_list va;
if (!(*debug & DEBUG_L2_FSM))
return;
va_start(va, fmt);
printk(KERN_DEBUG "l2 (sapi %d tei %d): ", l2->sapi, l2->tei);
vprintk(fmt, va);
printk("\n");
vaf.fmt = fmt;
vaf.va = &va;
printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n",
l2->sapi, l2->tei, &vaf);
va_end(va);
}

View file

@ -79,14 +79,19 @@ static void
da_debug(struct FsmInst *fi, char *fmt, ...)
{
struct manager *mgr = fi->userdata;
struct va_format vaf;
va_list va;
if (!(*debug & DEBUG_L2_TEIFSM))
return;
va_start(va, fmt);
printk(KERN_DEBUG "mgr(%d): ", mgr->ch.st->dev->id);
vprintk(fmt, va);
printk("\n");
vaf.fmt = fmt;
vaf.va = &va;
printk(KERN_DEBUG "mgr(%d): %pV\n", mgr->ch.st->dev->id, &vaf);
va_end(va);
}
@ -223,14 +228,20 @@ static void
tei_debug(struct FsmInst *fi, char *fmt, ...)
{
struct teimgr *tm = fi->userdata;
struct va_format vaf;
va_list va;
if (!(*debug & DEBUG_L2_TEIFSM))
return;
va_start(va, fmt);
printk(KERN_DEBUG "sapi(%d) tei(%d): ", tm->l2->sapi, tm->l2->tei);
vprintk(fmt, va);
printk("\n");
vaf.fmt = fmt;
vaf.va = &va;
printk(KERN_DEBUG "sapi(%d) tei(%d): %pV\n",
tm->l2->sapi, tm->l2->tei, &vaf);
va_end(va);
}

View file

@ -158,8 +158,8 @@ static int mem_start;
struct net_device * __init el1_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
static unsigned ports[] = { 0x280, 0x300, 0};
unsigned *port;
static const unsigned ports[] = { 0x280, 0x300, 0};
const unsigned *port;
int err = 0;
if (!dev)

View file

@ -392,8 +392,8 @@ el2_open(struct net_device *dev)
int retval;
if (dev->irq < 2) {
int irqlist[] = {5, 9, 3, 4, 0};
int *irqp = irqlist;
static const int irqlist[] = {5, 9, 3, 4, 0};
const int *irqp = irqlist;
outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
do {

View file

@ -201,7 +201,7 @@ struct net_local {
#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
#define RX_BUF_END (dev->mem_end - dev->mem_start)
#define TX_TIMEOUT 5
#define TX_TIMEOUT (HZ/20)
/*
That's it: only 86 bytes to set up the beast, including every extra
@ -311,8 +311,8 @@ static int mem_start;
struct net_device * __init el16_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
static unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0};
unsigned *port;
static const unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0};
const unsigned *port;
int err = -ENODEV;
if (!dev)

View file

@ -98,7 +98,7 @@ static int rx_nocopy, rx_copy, queued_packet;
#define WAIT_TX_AVAIL 200
/* Operational parameter that usually are not changed. */
#define TX_TIMEOUT 40 /* Time in jiffies before concluding Tx hung */
#define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */
/* The size here is somewhat misleading: the Corkscrew also uses the ISA
aliased registers at <base>+0x400.

View file

@ -317,13 +317,13 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
u8 POS;
u32 base;
struct mc32_local *lp = netdev_priv(dev);
static u16 mca_io_bases[]={
static const u16 mca_io_bases[] = {
0x7280,0x7290,
0x7680,0x7690,
0x7A80,0x7A90,
0x7E80,0x7E90
};
static u32 mca_mem_bases[]={
static const u32 mca_mem_bases[] = {
0x00C0000,
0x00C4000,
0x00C8000,
@ -333,7 +333,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
0x00D8000,
0x00DC000
};
static char *failures[]={
static const char * const failures[] = {
"Processor instruction",
"Processor data bus",
"Processor data bus",

View file

@ -1092,10 +1092,11 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct rtl8139_private *tp = netdev_priv(dev);
assert (dev != NULL);
flush_scheduled_work();
cancel_delayed_work_sync(&tp->thread);
unregister_netdev (dev);

View file

@ -191,7 +191,7 @@ enum commands {
#define RX_SUSPEND 0x0030
#define RX_ABORT 0x0040
#define TX_TIMEOUT 5
#define TX_TIMEOUT (HZ/20)
struct i596_reg {

View file

@ -1533,7 +1533,7 @@ config E100
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
to identify the adapter.
to identify the adapter.
For the latest Intel PRO/100 network driver for Linux, see:
@ -1786,17 +1786,17 @@ config KS8842
tristate "Micrel KSZ8841/42 with generic bus interface"
depends on HAS_IOMEM && DMA_ENGINE
help
This platform driver is for KSZ8841(1-port) / KS8842(2-port)
ethernet switch chip (managed, VLAN, QoS) from Micrel or
Timberdale(FPGA).
This platform driver is for KSZ8841(1-port) / KS8842(2-port)
ethernet switch chip (managed, VLAN, QoS) from Micrel or
Timberdale(FPGA).
config KS8851
tristate "Micrel KS8851 SPI"
depends on SPI
select MII
tristate "Micrel KS8851 SPI"
depends on SPI
select MII
select CRC32
help
SPI driver for Micrel KS8851 SPI attached network chip.
help
SPI driver for Micrel KS8851 SPI attached network chip.
config KS8851_MLL
tristate "Micrel KS8851 MLL"
@ -2133,25 +2133,25 @@ config IP1000
will be called ipg. This is recommended.
config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide at:
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
To compile this driver as a module, choose M here. The module
will be called igb.
To compile this driver as a module, choose M here. The module
will be called igb.
config IGB_DCA
bool "Direct Cache Access (DCA) Support"
@ -2163,25 +2163,25 @@ config IGB_DCA
is used, with the intent of lessening the impact of cache misses.
config IGBVF
tristate "Intel(R) 82576 Virtual Function Ethernet support"
depends on PCI
---help---
This driver supports Intel(R) 82576 virtual functions. For more
information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
tristate "Intel(R) 82576 Virtual Function Ethernet support"
depends on PCI
---help---
This driver supports Intel(R) 82576 virtual functions. For more
information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
<http://support.intel.com/support/network/adapter/pro100/21397.htm>
For general information and support, go to the Intel support
website at:
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
To compile this driver as a module, choose M here. The module
will be called igbvf.
To compile this driver as a module, choose M here. The module
will be called igbvf.
source "drivers/net/ixp2000/Kconfig"
@ -2233,6 +2233,7 @@ config YELLOWFIN
config R8169
tristate "Realtek 8169 gigabit ethernet support"
depends on PCI
select FW_LOADER
select CRC32
select MII
---help---
@ -2300,14 +2301,14 @@ config SKGE
will be called skge. This is recommended.
config SKGE_DEBUG
bool "Debugging interface"
depends on SKGE && DEBUG_FS
help
This option adds the ability to dump driver state for debugging.
The file /sys/kernel/debug/skge/ethX displays the state of the internal
transmit and receive rings.
bool "Debugging interface"
depends on SKGE && DEBUG_FS
help
This option adds the ability to dump driver state for debugging.
The file /sys/kernel/debug/skge/ethX displays the state of the internal
transmit and receive rings.
If unsure, say N.
If unsure, say N.
config SKY2
tristate "SysKonnect Yukon2 support"
@ -2326,14 +2327,14 @@ config SKY2
will be called sky2. This is recommended.
config SKY2_DEBUG
bool "Debugging interface"
depends on SKY2 && DEBUG_FS
help
This option adds the ability to dump driver state for debugging.
The file /sys/kernel/debug/sky2/ethX displays the state of the internal
transmit and receive rings.
bool "Debugging interface"
depends on SKY2 && DEBUG_FS
help
This option adds the ability to dump driver state for debugging.
The file /sys/kernel/debug/sky2/ethX displays the state of the internal
transmit and receive rings.
If unsure, say N.
If unsure, say N.
config VIA_VELOCITY
tristate "VIA Velocity support"
@ -2389,12 +2390,12 @@ config SPIDER_NET
Cell Processor-Based Blades from IBM.
config TSI108_ETH
tristate "Tundra TSI108 gigabit Ethernet support"
depends on TSI108_BRIDGE
help
This driver supports Tundra TSI108 gigabit Ethernet ports.
To compile this driver as a module, choose M here: the module
will be called tsi108_eth.
tristate "Tundra TSI108 gigabit Ethernet support"
depends on TSI108_BRIDGE
help
This driver supports Tundra TSI108 gigabit Ethernet ports.
To compile this driver as a module, choose M here: the module
will be called tsi108_eth.
config GELIC_NET
tristate "PS3 Gigabit Ethernet driver"
@ -2573,32 +2574,32 @@ config MDIO
tristate
config CHELSIO_T1
tristate "Chelsio 10Gb Ethernet support"
depends on PCI
tristate "Chelsio 10Gb Ethernet support"
depends on PCI
select CRC32
select MDIO
help
This driver supports Chelsio gigabit and 10-gigabit
Ethernet cards. More information about adapter features and
help
This driver supports Chelsio gigabit and 10-gigabit
Ethernet cards. More information about adapter features and
performance tuning is in <file:Documentation/networking/cxgb.txt>.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.html>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.html>.
Please send feedback to <linux-bugs@chelsio.com>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module, choose M here: the module
will be called cxgb.
To compile this driver as a module, choose M here: the module
will be called cxgb.
config CHELSIO_T1_1G
bool "Chelsio gigabit Ethernet support"
depends on CHELSIO_T1
help
Enables support for Chelsio's gigabit Ethernet PCI cards. If you
are using only 10G cards say 'N' here.
bool "Chelsio gigabit Ethernet support"
depends on CHELSIO_T1
help
Enables support for Chelsio's gigabit Ethernet PCI cards. If you
are using only 10G cards say 'N' here.
config CHELSIO_T3_DEPENDS
tristate
@ -2728,26 +2729,26 @@ config IXGBE_DCB
If unsure, say N.
config IXGBEVF
tristate "Intel(R) 82599 Virtual Function Ethernet support"
depends on PCI_MSI
---help---
This driver supports Intel(R) 82599 virtual functions. For more
information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
tristate "Intel(R) 82599 Virtual Function Ethernet support"
depends on PCI_MSI
---help---
This driver supports Intel(R) 82599 virtual functions. For more
information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
<http://support.intel.com/support/network/sb/CS-008441.htm>
<http://support.intel.com/support/network/sb/CS-008441.htm>
For general information and support, go to the Intel support
website at:
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
<http://support.intel.com>
More specific information on configuring the driver is in
<file:Documentation/networking/ixgbevf.txt>.
More specific information on configuring the driver is in
<file:Documentation/networking/ixgbevf.txt>.
To compile this driver as a module, choose M here. The module
will be called ixgbevf. MSI-X interrupt support is required
for this driver to work correctly.
To compile this driver as a module, choose M here. The module
will be called ixgbevf. MSI-X interrupt support is required
for this driver to work correctly.
config IXGB
tristate "Intel(R) PRO/10GbE support"
@ -2772,29 +2773,38 @@ config IXGB
will be called ixgb.
config S2IO
tristate "S2IO 10Gbe XFrame NIC"
tristate "Exar Xframe 10Gb Ethernet Adapter"
depends on PCI
---help---
This driver supports the 10Gbe XFrame NIC of S2IO.
This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
More specific information on configuring the driver is in
<file:Documentation/networking/s2io.txt>.
To compile this driver as a module, choose M here. The module
will be called s2io.
config VXGE
tristate "Neterion X3100 Series 10GbE PCIe Server Adapter"
tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
depends on PCI && INET
---help---
This driver supports Neterion Inc's X3100 Series 10 GbE PCIe
This driver supports Exar Corp's X3100 Series 10 GbE PCIe
I/O Virtualized Server Adapter.
More specific information on configuring the driver is in
<file:Documentation/networking/vxge.txt>.
To compile this driver as a module, choose M here. The module
will be called vxge.
config VXGE_DEBUG_TRACE_ALL
bool "Enabling All Debug trace statments in driver"
default n
depends on VXGE
---help---
Say Y here if you want to enabling all the debug trace statements in
driver. By default only few debug trace statements are enabled.
the vxge driver. By default only few debug trace statements are
enabled.
config MYRI10GE
tristate "Myricom Myri-10G Ethernet support"
@ -2906,18 +2916,18 @@ config QLGE
will be called qlge.
config BNA
tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
depends on PCI
---help---
This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
cards.
To compile this driver as a module, choose M here: the module
will be called bna.
tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
depends on PCI
---help---
This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
cards.
To compile this driver as a module, choose M here: the module
will be called bna.
For general information and support, go to the Brocade support
website at:
For general information and support, go to the Brocade support
website at:
<http://support.brocade.com>
<http://support.brocade.com>
source "drivers/net/sfc/Kconfig"
@ -3239,18 +3249,18 @@ config PPP_BSDCOMP
modules once you have said "make modules". If unsure, say N.
config PPP_MPPE
tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
depends on PPP && EXPERIMENTAL
select CRYPTO
select CRYPTO_SHA1
select CRYPTO_ARC4
select CRYPTO_ECB
---help---
Support for the MPPE Encryption protocol, as employed by the
Microsoft Point-to-Point Tunneling Protocol.
tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
depends on PPP && EXPERIMENTAL
select CRYPTO
select CRYPTO_SHA1
select CRYPTO_ARC4
select CRYPTO_ECB
---help---
Support for the MPPE Encryption protocol, as employed by the
Microsoft Point-to-Point Tunneling Protocol.
See http://pptpclient.sourceforge.net/ for information on
configuring PPTP clients and servers to utilize this method.
See http://pptpclient.sourceforge.net/ for information on
configuring PPTP clients and servers to utilize this method.
config PPPOE
tristate "PPP over Ethernet (EXPERIMENTAL)"
@ -3409,14 +3419,14 @@ config VIRTIO_NET
depends on EXPERIMENTAL && VIRTIO
---help---
This is the virtual network driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
config VMXNET3
tristate "VMware VMXNET3 ethernet driver"
depends on PCI && INET
help
This driver supports VMware's vmxnet3 virtual ethernet NIC.
To compile this driver as a module, choose M here: the
module will be called vmxnet3.
tristate "VMware VMXNET3 ethernet driver"
depends on PCI && INET
help
This driver supports VMware's vmxnet3 virtual ethernet NIC.
To compile this driver as a module, choose M here: the
module will be called vmxnet3.
endif # NETDEVICES

View file

@ -55,8 +55,6 @@ extern struct net_device *eth16i_probe(int unit);
extern struct net_device *i82596_probe(int unit);
extern struct net_device *ewrk3_probe(int unit);
extern struct net_device *el1_probe(int unit);
extern struct net_device *wavelan_probe(int unit);
extern struct net_device *arlan_probe(int unit);
extern struct net_device *el16_probe(int unit);
extern struct net_device *elmc_probe(int unit);
extern struct net_device *elplus_probe(int unit);
@ -68,7 +66,6 @@ extern struct net_device *ni5010_probe(int unit);
extern struct net_device *ni52_probe(int unit);
extern struct net_device *ni65_probe(int unit);
extern struct net_device *sonic_probe(int unit);
extern struct net_device *SK_init(int unit);
extern struct net_device *seeq8005_probe(int unit);
extern struct net_device *smc_init(int unit);
extern struct net_device *atarilance_probe(int unit);
@ -76,8 +73,6 @@ extern struct net_device *sun3lance_probe(int unit);
extern struct net_device *sun3_82586_probe(int unit);
extern struct net_device *apne_probe(int unit);
extern struct net_device *cs89x0_probe(int unit);
extern struct net_device *hplance_probe(int unit);
extern struct net_device *bagetlance_probe(int unit);
extern struct net_device *mvme147lance_probe(int unit);
extern struct net_device *tc515_probe(int unit);
extern struct net_device *lance_probe(int unit);

View file

@ -340,14 +340,6 @@ am79c961_close(struct net_device *dev)
return 0;
}
/*
* Get the current statistics.
*/
static struct net_device_stats *am79c961_getstats (struct net_device *dev)
{
return &dev->stats;
}
static void am79c961_mc_hash(char *addr, unsigned short *hash)
{
if (addr[0] & 0x01) {
@ -665,7 +657,6 @@ static const struct net_device_ops am79c961_netdev_ops = {
.ndo_open = am79c961_open,
.ndo_stop = am79c961_close,
.ndo_start_xmit = am79c961_sendpacket,
.ndo_get_stats = am79c961_getstats,
.ndo_set_multicast_list = am79c961_setmulticastlist,
.ndo_tx_timeout = am79c961_timeout,
.ndo_validate_addr = eth_validate_addr,

View file

@ -1229,8 +1229,10 @@ static int __devinit eth_init_one(struct platform_device *pdev)
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy);
port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if ((err = IS_ERR(port->phydev)))
if (IS_ERR(port->phydev)) {
err = PTR_ERR(port->phydev);
goto err_free_mem;
}
port->phydev->irq = PHY_POLL;

View file

@ -117,7 +117,7 @@
#define TX_DESC_SIZE 10
#define MAX_RBUFF_SZ 0x600
#define MAX_TBUFF_SZ 0x600
#define TX_TIMEOUT 50
#define TX_TIMEOUT (HZ/2)
#define DELAY 1000
#define CAM0 0x0

View file

@ -150,7 +150,7 @@ struct net_local {
#define PORT_OFFSET(o) (o)
#define TX_TIMEOUT 10
#define TX_TIMEOUT (HZ/10)
/* Index to functions, as function prototypes. */
@ -270,9 +270,9 @@ static const struct net_device_ops at1700_netdev_ops = {
static int __init at1700_probe1(struct net_device *dev, int ioaddr)
{
char fmv_irqmap[4] = {3, 7, 10, 15};
char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
static const char fmv_irqmap[4] = {3, 7, 10, 15};
static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
int slot, ret = -ENODEV;
struct net_local *lp = netdev_priv(dev);

View file

@ -116,7 +116,7 @@ MODULE_LICENSE("GPL");
#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
#define TX_TIMEOUT 20
#define TX_TIMEOUT (HZ/5)
/* The LANCE Rx and Tx ring descriptors. */
struct lance_rx_head {

View file

@ -2079,7 +2079,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
check_sum:
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
u8 css, cso;
cso = skb_transport_offset(skb);
cso = skb_checksum_start_offset(skb);
if (unlikely(cso & 0x1)) {
if (netif_msg_tx_err(adapter))

View file

@ -1649,7 +1649,7 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
u8 css, cso;
cso = skb_transport_offset(skb);
cso = skb_checksum_start_offset(skb);
if (unlikely(cso & 0x1)) {
netdev_err(adapter->netdev,
"payload offset should not ant event number\n");

View file

@ -2174,7 +2174,7 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
u8 css, cso;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
css = (u8) (skb->csum_start - skb_headroom(skb));
css = skb_checksum_start_offset(skb);
cso = css + (u8) skb->csum_offset;
if (unlikely(css & 0x1)) {
/* L1 hardware requires an even number here */

View file

@ -1504,8 +1504,8 @@ static void __devexit atl2_remove(struct pci_dev *pdev)
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_config_timer);
flush_scheduled_work();
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->link_chg_task);
unregister_netdev(netdev);

View file

@ -106,8 +106,6 @@ MODULE_VERSION(DRV_VERSION);
* complete immediately.
*/
struct au1000_private *au_macs[NUM_ETH_INTERFACES];
/*
* board-specific configurations
*

View file

@ -325,7 +325,7 @@ static void ax_block_output(struct net_device *dev, int count,
static void
ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
{
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
struct ei_device *ei_local = netdev_priv(dev);
void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
unsigned int memr;
@ -364,7 +364,7 @@ ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
static unsigned int
ax_phy_ei_inbits(struct net_device *dev, int no)
{
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
struct ei_device *ei_local = netdev_priv(dev);
void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
unsigned int memr;
unsigned int result = 0;
@ -412,7 +412,7 @@ ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
static int
ax_phy_read(struct net_device *dev, int phy_addr, int reg)
{
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
struct ei_device *ei_local = netdev_priv(dev);
unsigned long flags;
unsigned int result;
@ -435,7 +435,7 @@ ax_phy_read(struct net_device *dev, int phy_addr, int reg)
static void
ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
{
struct ei_device *ei = (struct ei_device *) netdev_priv(dev);
struct ei_device *ei = netdev_priv(dev);
struct ax_device *ax = to_ax_dev(dev);
unsigned long flags;

View file

@ -1097,7 +1097,7 @@ static int bcm_enet_stop(struct net_device *dev)
enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
/* make sure no mib update is scheduled */
flush_scheduled_work();
cancel_work_sync(&priv->mib_update_task);
/* disable dma & mac */
bcm_enet_disable_dma(priv, priv->tx_chan);

View file

@ -38,14 +38,17 @@
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define EMULEX_VENDOR_ID 0x10df
#define BE_DEVICE_ID1 0x211
#define BE_DEVICE_ID2 0x221
#define OC_DEVICE_ID1 0x700
#define OC_DEVICE_ID2 0x710
#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
static inline char *nic_name(struct pci_dev *pdev)
{
@ -53,7 +56,9 @@ static inline char *nic_name(struct pci_dev *pdev)
case OC_DEVICE_ID1:
return OC_NAME;
case OC_DEVICE_ID2:
return OC_NAME1;
return OC_NAME_BE;
case OC_DEVICE_ID3:
return OC_NAME_LANCER;
case BE_DEVICE_ID2:
return BE3_NAME;
default:
@ -149,6 +154,7 @@ struct be_eq_obj {
u16 min_eqd; /* in usecs */
u16 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */
u8 msix_vec_idx;
struct napi_struct napi;
};
@ -214,7 +220,9 @@ struct be_rx_obj {
struct be_rx_stats stats;
u8 rss_id;
bool rx_post_starved; /* Zero rx frags have been posted to BE */
u32 cache_line_barrier[16];
u16 last_frag_index;
u16 rsvd;
u32 cache_line_barrier[15];
};
struct be_vf_cfg {
@ -260,6 +268,8 @@ struct be_adapter {
u32 num_rx_qs;
u32 big_page_size; /* Compounded page size shared by rx wrbs */
u8 msix_vec_next_idx;
struct vlan_group *vlan_grp;
u16 vlans_added;
u16 max_vlans; /* Number of vlans supported */
@ -299,8 +309,8 @@ struct be_adapter {
bool sriov_enabled;
struct be_vf_cfg vf_cfg[BE_MAX_VF];
u8 base_eq_id;
u8 is_virtfn;
u32 sli_family;
};
#define be_physfn(adapter) (!adapter->is_virtfn)
@ -309,6 +319,8 @@ struct be_adapter {
#define BE_GEN2 2
#define BE_GEN3 3
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
extern const struct ethtool_ops be_ethtool_ops;
#define tx_stats(adapter) (&adapter->tx_stats)
@ -416,10 +428,17 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
{
u8 data;
u32 sli_intf;
pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
pci_read_config_byte(adapter->pdev, 0xFE, &data);
adapter->is_virtfn = (data != 0xAA);
if (lancer_chip(adapter)) {
pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
&sli_intf);
adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
} else {
pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
pci_read_config_byte(adapter->pdev, 0xFE, &data);
adapter->is_virtfn = (data != 0xAA);
}
}
static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)

View file

@ -323,7 +323,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
{
u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
u32 sem;
if (lancer_chip(adapter))
sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
else
sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
@ -685,16 +690,36 @@ int be_cmd_cq_create(struct be_adapter *adapter,
OPCODE_COMMON_CQ_CREATE, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (lancer_chip(adapter)) {
req->hdr.version = 1;
req->page_size = 1; /* 1 for 4K */
AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
no_delay);
AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
__ilog2_u32(cq->len/256));
AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
ctxt, eq->id);
AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
} else {
AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
ctxt, no_delay);
AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
__ilog2_u32(cq->len/256));
AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, solevent,
ctxt, sol_evts);
AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
}
AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
__ilog2_u32(cq->len/256));
AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@ -743,13 +768,27 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (lancer_chip(adapter)) {
req->hdr.version = 1;
req->cq_id = cpu_to_le16(cq->id);
AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
ctxt, cq->id);
AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
ctxt, 1);
} else {
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
}
AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
req->async_event_bitmap[0] |= 0x00000022;
req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

View file

@ -309,7 +309,7 @@ struct be_cmd_req_pmac_del {
/******************** Create CQ ***************************/
/* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field */
struct amap_cq_context {
struct amap_cq_context_be {
u8 cidx[11]; /* dword 0*/
u8 rsvd0; /* dword 0*/
u8 coalescwm[2]; /* dword 0*/
@ -332,14 +332,32 @@ struct amap_cq_context {
u8 rsvd5[32]; /* dword 3*/
} __packed;
struct amap_cq_context_lancer {
u8 rsvd0[12]; /* dword 0*/
u8 coalescwm[2]; /* dword 0*/
u8 nodelay; /* dword 0*/
u8 rsvd1[12]; /* dword 0*/
u8 count[2]; /* dword 0*/
u8 valid; /* dword 0*/
u8 rsvd2; /* dword 0*/
u8 eventable; /* dword 0*/
u8 eqid[16]; /* dword 1*/
u8 rsvd3[15]; /* dword 1*/
u8 armed; /* dword 1*/
u8 rsvd4[32]; /* dword 2*/
u8 rsvd5[32]; /* dword 3*/
} __packed;
struct be_cmd_req_cq_create {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u16 rsvd0;
u8 context[sizeof(struct amap_cq_context) / 8];
u8 page_size;
u8 rsvd0;
u8 context[sizeof(struct amap_cq_context_be) / 8];
struct phys_addr pages[8];
} __packed;
struct be_cmd_resp_cq_create {
struct be_cmd_resp_hdr hdr;
u16 cq_id;
@ -349,7 +367,7 @@ struct be_cmd_resp_cq_create {
/******************** Create MCCQ ***************************/
/* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field */
struct amap_mcc_context {
struct amap_mcc_context_be {
u8 con_index[14];
u8 rsvd0[2];
u8 ring_size[4];
@ -364,12 +382,23 @@ struct amap_mcc_context {
u8 rsvd2[32];
} __packed;
struct amap_mcc_context_lancer {
u8 async_cq_id[16];
u8 ring_size[4];
u8 rsvd0[12];
u8 rsvd1[31];
u8 valid;
u8 async_cq_valid[1];
u8 rsvd2[31];
u8 rsvd3[32];
} __packed;
struct be_cmd_req_mcc_create {
struct be_cmd_req_hdr hdr;
u16 num_pages;
u16 rsvd0;
u16 cq_id;
u32 async_event_bitmap[1];
u8 context[sizeof(struct amap_mcc_context) / 8];
u8 context[sizeof(struct amap_mcc_context_be) / 8];
struct phys_addr pages[8];
} __packed;
@ -605,6 +634,7 @@ struct be_hw_stats {
struct be_rxf_stats rxf;
u32 rsvd[48];
struct be_erx_stats erx;
u32 rsvd1[6];
};
struct be_cmd_req_get_stats {

View file

@ -549,7 +549,9 @@ be_test_ddr_dma(struct be_adapter *adapter)
{
int ret, i;
struct be_dma_mem ddrdma_cmd;
u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
static const u64 pattern[2] = {
0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
};
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,

View file

@ -32,10 +32,12 @@
#define MPU_EP_CONTROL 0
/********** MPU semphore ******************/
#define MPU_EP_SEMAPHORE_OFFSET 0xac
#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
#define EP_SEMAPHORE_POST_ERR_MASK 0x1
#define EP_SEMAPHORE_POST_ERR_SHIFT 31
#define MPU_EP_SEMAPHORE_OFFSET 0xac
#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
#define EP_SEMAPHORE_POST_ERR_MASK 0x1
#define EP_SEMAPHORE_POST_ERR_SHIFT 31
/* MPU semphore POST stage values */
#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
@ -66,6 +68,28 @@
#define PCICFG_UE_STATUS_LOW_MASK 0xA8
#define PCICFG_UE_STATUS_HI_MASK 0xAC
/******** SLI_INTF ***********************/
#define SLI_INTF_REG_OFFSET 0x58
#define SLI_INTF_VALID_MASK 0xE0000000
#define SLI_INTF_VALID 0xC0000000
#define SLI_INTF_HINT2_MASK 0x1F000000
#define SLI_INTF_HINT2_SHIFT 24
#define SLI_INTF_HINT1_MASK 0x00FF0000
#define SLI_INTF_HINT1_SHIFT 16
#define SLI_INTF_FAMILY_MASK 0x00000F00
#define SLI_INTF_FAMILY_SHIFT 8
#define SLI_INTF_IF_TYPE_MASK 0x0000F000
#define SLI_INTF_IF_TYPE_SHIFT 12
#define SLI_INTF_REV_MASK 0x000000F0
#define SLI_INTF_REV_SHIFT 4
#define SLI_INTF_FT_MASK 0x00000001
/* SLI family */
#define BE_SLI_FAMILY 0x0
#define LANCER_A0_SLI_FAMILY 0xA
/********* ISR0 Register offset **********/
#define CEV_ISR0_OFFSET 0xC18
#define CEV_ISR_SIZE 4
@ -73,6 +97,9 @@
/********* Event Q door bell *************/
#define DB_EQ_OFFSET DB_CQ_OFFSET
#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
/* Clear the interrupt for this eq */
#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
/* Must be 1 */
@ -85,6 +112,10 @@
/********* Compl Q door bell *************/
#define DB_CQ_OFFSET 0x120
#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
placing at 11-15 */
/* Number of event entries processed */
#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */

View file

@ -41,6 +41,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@ -188,6 +189,8 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
DB_EQ_RING_ID_EXT_MASK_SHIFT);
if (adapter->eeh_err)
return;
@ -205,6 +208,8 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
{
u32 val = 0;
val |= qid & DB_CQ_RING_ID_MASK;
val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
DB_CQ_RING_ID_EXT_MASK_SHIFT);
if (adapter->eeh_err)
return;
@ -404,7 +409,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
}
/* Determine number of WRB entries needed to xmit data in an skb */
static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
bool *dummy)
{
int cnt = (skb->len > skb->data_len);
@ -412,12 +418,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
/* to account for hdr wrb */
cnt++;
if (cnt & 1) {
if (lancer_chip(adapter) || !(cnt & 1)) {
*dummy = false;
} else {
/* add a dummy to make it an even num */
cnt++;
*dummy = true;
} else
*dummy = false;
}
BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
return cnt;
}
@ -443,8 +450,18 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
hdr, skb_shinfo(skb)->gso_size);
if (skb_is_gso_v6(skb))
if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
if (lancer_chip(adapter) && adapter->sli_family ==
LANCER_A0_SLI_FAMILY) {
AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
if (is_tcp_pkt(skb))
AMAP_SET_BITS(struct amap_eth_hdr_wrb,
tcpcs, hdr, 1);
else if (is_udp_pkt(skb))
AMAP_SET_BITS(struct amap_eth_hdr_wrb,
udpcs, hdr, 1);
}
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (is_tcp_pkt(skb))
AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@ -566,7 +583,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
u32 start = txq->head;
bool dummy_wrb, stopped = false;
wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
if (copied) {
@ -894,11 +911,17 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
for (i = 0; i < num_rcvd; i++) {
page_info = get_rx_page_info(adapter, rxo, rxq_idx);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
index_inc(&rxq_idx, rxq->len);
/* Skip out-of-buffer compl(lancer) or flush compl(BE) */
if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
rxo->last_frag_index = rxq_idx;
for (i = 0; i < num_rcvd; i++) {
page_info = get_rx_page_info(adapter, rxo, rxq_idx);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
index_inc(&rxq_idx, rxq->len);
}
}
}
@ -999,9 +1022,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
u8 vtm;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
/* Is it a flush compl that has no data */
if (unlikely(num_rcvd == 0))
return;
skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
if (unlikely(!skb)) {
@ -1035,7 +1055,8 @@ static void be_rx_compl_process(struct be_adapter *adapter,
return;
}
vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
vid = swab16(vid);
if (!lancer_chip(adapter))
vid = swab16(vid);
vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
} else {
netif_receive_skb(skb);
@ -1057,10 +1078,6 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
u8 pkt_type;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
/* Is it a flush compl that has no data */
if (unlikely(num_rcvd == 0))
return;
pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
@ -1113,7 +1130,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
napi_gro_frags(&eq_obj->napi);
} else {
vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
vid = swab16(vid);
if (!lancer_chip(adapter))
vid = swab16(vid);
if (!adapter->vlan_grp || adapter->vlans_added == 0)
return;
@ -1330,7 +1348,7 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
be_rx_compl_discard(adapter, rxo, rxcp);
be_rx_compl_reset(rxcp);
be_cq_notify(adapter, rx_cq->id, true, 1);
be_cq_notify(adapter, rx_cq->id, false, 1);
}
/* Then free posted rx buffer that were not used */
@ -1381,7 +1399,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
sent_skb = sent_skbs[txq->tail];
end_idx = txq->tail;
index_adv(&end_idx,
wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
txq->len);
be_tx_compl_process(adapter, end_idx);
}
}
@ -1476,7 +1495,9 @@ static int be_tx_queues_create(struct be_adapter *adapter)
/* Ask BE to create Tx Event queue */
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
goto tx_eq_free;
adapter->base_eq_id = adapter->tx_eq.q.id;
adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
/* Alloc TX eth compl queue */
cq = &adapter->tx_obj.cq;
@ -1554,6 +1575,9 @@ static int be_rx_queues_create(struct be_adapter *adapter)
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
rxo->adapter = adapter;
/* Init last_frag_index so that the frag index in the first
* completion will never match */
rxo->last_frag_index = 0xffff;
rxo->rx_eq.max_eqd = BE_MAX_EQD;
rxo->rx_eq.enable_aic = true;
@ -1568,6 +1592,8 @@ static int be_rx_queues_create(struct be_adapter *adapter)
if (rc)
goto err;
rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
/* CQ */
cq = &rxo->cq;
rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
@ -1578,7 +1604,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
if (rc)
goto err;
/* Rx Q */
q = &rxo->q;
rc = be_queue_alloc(adapter, q, RX_Q_LEN,
@ -1611,29 +1636,45 @@ static int be_rx_queues_create(struct be_adapter *adapter)
return -1;
}
/* There are 8 evt ids per func. Retruns the evt id's bit number */
static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
static bool event_peek(struct be_eq_obj *eq_obj)
{
return eq_id - adapter->base_eq_id;
struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
if (!eqe->evt)
return false;
else
return true;
}
static irqreturn_t be_intx(int irq, void *dev)
{
struct be_adapter *adapter = dev;
struct be_rx_obj *rxo;
int isr, i;
int isr, i, tx = 0 , rx = 0;
isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
(adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
if (!isr)
return IRQ_NONE;
if (lancer_chip(adapter)) {
if (event_peek(&adapter->tx_eq))
tx = event_handle(adapter, &adapter->tx_eq);
for_all_rx_queues(adapter, rxo, i) {
if (event_peek(&rxo->rx_eq))
rx |= event_handle(adapter, &rxo->rx_eq);
}
if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
event_handle(adapter, &adapter->tx_eq);
if (!(tx || rx))
return IRQ_NONE;
for_all_rx_queues(adapter, rxo, i) {
if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
event_handle(adapter, &rxo->rx_eq);
} else {
isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
(adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
if (!isr)
return IRQ_NONE;
if ((1 << adapter->tx_eq.msix_vec_idx & isr))
event_handle(adapter, &adapter->tx_eq);
for_all_rx_queues(adapter, rxo, i) {
if ((1 << rxo->rx_eq.msix_vec_idx & isr))
event_handle(adapter, &rxo->rx_eq);
}
}
return IRQ_HANDLED;
@ -1658,10 +1699,9 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
return IRQ_HANDLED;
}
static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
struct be_eth_rx_compl *rxcp)
static inline bool do_gro(struct be_rx_obj *rxo,
struct be_eth_rx_compl *rxcp, u8 err)
{
int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
if (err)
@ -1678,6 +1718,8 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
struct be_queue_info *rx_cq = &rxo->cq;
struct be_eth_rx_compl *rxcp;
u32 work_done;
u16 frag_index, num_rcvd;
u8 err;
rxo->stats.rx_polls++;
for (work_done = 0; work_done < budget; work_done++) {
@ -1685,10 +1727,22 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
if (!rxcp)
break;
if (do_gro(adapter, rxo, rxcp))
be_rx_compl_process_gro(adapter, rxo, rxcp);
else
be_rx_compl_process(adapter, rxo, rxcp);
err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
rxcp);
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
rxcp);
/* Skip out-of-buffer compl(lancer) or flush compl(BE) */
if (likely(frag_index != rxo->last_frag_index &&
num_rcvd != 0)) {
rxo->last_frag_index = frag_index;
if (do_gro(rxo, rxcp, err))
be_rx_compl_process_gro(adapter, rxo, rxcp);
else
be_rx_compl_process(adapter, rxo, rxcp);
}
be_rx_compl_reset(rxcp);
}
@ -1830,8 +1884,7 @@ static void be_worker(struct work_struct *work)
be_post_rx_frags(rxo);
}
}
if (!adapter->ue_detected)
if (!adapter->ue_detected && !lancer_chip(adapter))
be_detect_dump_ue(adapter);
reschedule:
@ -1910,10 +1963,10 @@ static void be_sriov_disable(struct be_adapter *adapter)
#endif
}
static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
static inline int be_msix_vec_get(struct be_adapter *adapter,
struct be_eq_obj *eq_obj)
{
return adapter->msix_entries[
be_evt_bit_get(adapter, eq_id)].vector;
return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
}
static int be_request_irq(struct be_adapter *adapter,
@ -1924,14 +1977,14 @@ static int be_request_irq(struct be_adapter *adapter,
int vec;
sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
vec = be_msix_vec_get(adapter, eq_obj->q.id);
vec = be_msix_vec_get(adapter, eq_obj);
return request_irq(vec, handler, 0, eq_obj->desc, context);
}
static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
void *context)
{
int vec = be_msix_vec_get(adapter, eq_obj->q.id);
int vec = be_msix_vec_get(adapter, eq_obj);
free_irq(vec, context);
}
@ -2036,14 +2089,15 @@ static int be_close(struct net_device *netdev)
netif_carrier_off(netdev);
adapter->link_up = false;
be_intr_set(adapter, false);
if (!lancer_chip(adapter))
be_intr_set(adapter, false);
if (adapter->msix_enabled) {
vec = be_msix_vec_get(adapter, tx_eq->q.id);
vec = be_msix_vec_get(adapter, tx_eq);
synchronize_irq(vec);
for_all_rx_queues(adapter, rxo, i) {
vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
vec = be_msix_vec_get(adapter, &rxo->rx_eq);
synchronize_irq(vec);
}
} else {
@ -2082,7 +2136,8 @@ static int be_open(struct net_device *netdev)
be_irq_register(adapter);
be_intr_set(adapter, true);
if (!lancer_chip(adapter))
be_intr_set(adapter, true);
/* The evt queues are created in unarmed state; arm them */
for_all_rx_queues(adapter, rxo, i) {
@ -2343,10 +2398,10 @@ static int be_flash_data(struct be_adapter *adapter,
int num_bytes;
const u8 *p = fw->data;
struct be_cmd_write_flashrom *req = flash_cmd->va;
struct flash_comp *pflashcomp;
const struct flash_comp *pflashcomp;
int num_comp;
struct flash_comp gen3_flash_types[9] = {
static const struct flash_comp gen3_flash_types[9] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
FLASH_IMAGE_MAX_SIZE_g3},
{ FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
@ -2366,7 +2421,7 @@ static int be_flash_data(struct be_adapter *adapter,
{ FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
FLASH_NCSI_IMAGE_MAX_SIZE_g3}
};
struct flash_comp gen2_flash_types[8] = {
static const struct flash_comp gen2_flash_types[8] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
FLASH_IMAGE_MAX_SIZE_g2},
{ FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
@ -2388,11 +2443,11 @@ static int be_flash_data(struct be_adapter *adapter,
if (adapter->generation == BE_GEN3) {
pflashcomp = gen3_flash_types;
filehdr_size = sizeof(struct flash_file_hdr_g3);
num_comp = 9;
num_comp = ARRAY_SIZE(gen3_flash_types);
} else {
pflashcomp = gen2_flash_types;
filehdr_size = sizeof(struct flash_file_hdr_g2);
num_comp = 8;
num_comp = ARRAY_SIZE(gen2_flash_types);
}
for (i = 0; i < num_comp; i++) {
if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
@ -2543,10 +2598,15 @@ static void be_netdev_init(struct net_device *netdev)
int i;
netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_GRO | NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
if (lancer_chip(adapter))
netdev->vlan_features |= NETIF_F_TSO6;
netdev->flags |= IFF_MULTICAST;
@ -2587,6 +2647,15 @@ static int be_map_pci_bars(struct be_adapter *adapter)
u8 __iomem *addr;
int pcicfg_reg, db_reg;
if (lancer_chip(adapter)) {
addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
pci_resource_len(adapter->pdev, 0));
if (addr == NULL)
return -ENOMEM;
adapter->db = addr;
return 0;
}
if (be_physfn(adapter)) {
addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
pci_resource_len(adapter->pdev, 2));
@ -2783,6 +2852,44 @@ static int be_get_config(struct be_adapter *adapter)
return 0;
}
static int be_dev_family_check(struct be_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
u32 sli_intf = 0, if_type;
switch (pdev->device) {
case BE_DEVICE_ID1:
case OC_DEVICE_ID1:
adapter->generation = BE_GEN2;
break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID2:
adapter->generation = BE_GEN3;
break;
case OC_DEVICE_ID3:
pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
SLI_INTF_IF_TYPE_SHIFT;
if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
if_type != 0x02) {
dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
return -EINVAL;
}
if (num_vfs > 0) {
dev_err(&pdev->dev, "VFs not supported\n");
return -EINVAL;
}
adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
SLI_INTF_FAMILY_SHIFT);
adapter->generation = BE_GEN3;
break;
default:
adapter->generation = 0;
}
return 0;
}
static int __devinit be_probe(struct pci_dev *pdev,
const struct pci_device_id *pdev_id)
{
@ -2805,22 +2912,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto rel_reg;
}
adapter = netdev_priv(netdev);
switch (pdev->device) {
case BE_DEVICE_ID1:
case OC_DEVICE_ID1:
adapter->generation = BE_GEN2;
break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID2:
adapter->generation = BE_GEN3;
break;
default:
adapter->generation = 0;
}
adapter->pdev = pdev;
pci_set_drvdata(pdev, adapter);
status = be_dev_family_check(adapter);
if (status)
goto free_netdev;
adapter->netdev = netdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
@ -2895,7 +2993,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
be_ctrl_cleanup(adapter);
free_netdev:
be_sriov_disable(adapter);
free_netdev(adapter->netdev);
free_netdev(netdev);
pci_set_drvdata(pdev, NULL);
rel_reg:
pci_release_regions(pdev);

View file

@ -112,16 +112,18 @@ struct bfa_ioc_pci_attr {
* IOC states
*/
enum bfa_ioc_state {
BFA_IOC_RESET = 1, /*!< IOC is in reset state */
BFA_IOC_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
BFA_IOC_HWINIT = 3, /*!< IOC h/w is being initialized */
BFA_IOC_GETATTR = 4, /*!< IOC is being configured */
BFA_IOC_OPERATIONAL = 5, /*!< IOC is operational */
BFA_IOC_INITFAIL = 6, /*!< IOC hardware failure */
BFA_IOC_HBFAIL = 7, /*!< IOC heart-beat failure */
BFA_IOC_DISABLING = 8, /*!< IOC is being disabled */
BFA_IOC_DISABLED = 9, /*!< IOC is disabled */
BFA_IOC_FWMISMATCH = 10, /*!< IOC f/w different from drivers */
BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */
BFA_IOC_RESET = 2, /*!< IOC is in reset state */
BFA_IOC_SEMWAIT = 3, /*!< Waiting for IOC h/w semaphore */
BFA_IOC_HWINIT = 4, /*!< IOC h/w is being initialized */
BFA_IOC_GETATTR = 5, /*!< IOC is being configured */
BFA_IOC_OPERATIONAL = 6, /*!< IOC is operational */
BFA_IOC_INITFAIL = 7, /*!< IOC hardware failure */
BFA_IOC_FAIL = 8, /*!< IOC heart-beat failure */
BFA_IOC_DISABLING = 9, /*!< IOC is being disabled */
BFA_IOC_DISABLED = 10, /*!< IOC is disabled */
BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */
BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */
};
/**

View file

@ -95,28 +95,6 @@ enum {
(type) == BFA_MFG_TYPE_CNA10P1 || \
bfa_mfg_is_mezz(type)))
/**
* Check if the card having old wwn/mac handling
*/
#define bfa_mfg_is_old_wwn_mac_model(type) (( \
(type) == BFA_MFG_TYPE_FC8P2 || \
(type) == BFA_MFG_TYPE_FC8P1 || \
(type) == BFA_MFG_TYPE_FC4P2 || \
(type) == BFA_MFG_TYPE_FC4P1 || \
(type) == BFA_MFG_TYPE_CNA10P2 || \
(type) == BFA_MFG_TYPE_CNA10P1 || \
(type) == BFA_MFG_TYPE_JAYHAWK || \
(type) == BFA_MFG_TYPE_WANCHESE))
#define bfa_mfg_increment_wwn_mac(m, i) \
do { \
u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2]; \
t += (i); \
(m)[0] = (t >> 16) & 0xFF; \
(m)[1] = (t >> 8) & 0xFF; \
(m)[2] = t & 0xFF; \
} while (0)
#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \
do { \
switch ((card_type)) { \

File diff suppressed because it is too large Load diff

View file

@ -26,16 +26,7 @@
#define BFA_IOC_TOV 3000 /* msecs */
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
#define BFA_IOC_HWINIT_MAX 2
#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
/**
* Generic Scatter Gather Element used by driver
*/
struct bfa_sge {
u32 sg_len;
void *sg_addr;
};
#define BFA_IOC_HWINIT_MAX 5
/**
* PCI device information required by IOC
@ -64,19 +55,6 @@ struct bfa_dma {
#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
/**
* @brief BFA dma address assignment macro
*/
#define bfa_dma_addr_set(dma_addr, pa) \
__bfa_dma_addr_set(&dma_addr, (u64)pa)
static inline void
__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
{
dma_addr->a32.addr_lo = (u32) pa;
dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa));
}
/**
* @brief BFA dma address assignment macro. (big endian format)
*/
@ -105,8 +83,11 @@ struct bfa_ioc_regs {
void __iomem *host_page_num_fn;
void __iomem *heartbeat;
void __iomem *ioc_fwstate;
void __iomem *alt_ioc_fwstate;
void __iomem *ll_halt;
void __iomem *alt_ll_halt;
void __iomem *err_set;
void __iomem *ioc_fail_sync;
void __iomem *shirq_isr_next;
void __iomem *shirq_msk_next;
void __iomem *smem_page_start;
@ -165,16 +146,22 @@ struct bfa_ioc_hbfail_notify {
(__notify)->cbarg = (__cbarg); \
} while (0)
struct bfa_iocpf {
bfa_fsm_t fsm;
struct bfa_ioc *ioc;
u32 retry_count;
bool auto_recover;
};
struct bfa_ioc {
bfa_fsm_t fsm;
struct bfa *bfa;
struct bfa_pcidev pcidev;
struct bfa_timer_mod *timer_mod;
struct timer_list ioc_timer;
struct timer_list iocpf_timer;
struct timer_list sem_timer;
struct timer_list hb_timer;
u32 hb_count;
u32 retry_count;
struct list_head hb_notify_q;
void *dbg_fwsave;
int dbg_fwsave_len;
@ -182,7 +169,6 @@ struct bfa_ioc {
enum bfi_mclass ioc_mc;
struct bfa_ioc_regs ioc_regs;
struct bfa_ioc_drv_stats stats;
bool auto_recover;
bool fcmode;
bool ctdev;
bool cna;
@ -195,6 +181,7 @@ struct bfa_ioc {
struct bfa_ioc_cbfn *cbfn;
struct bfa_ioc_mbox_mod mbox_mod;
struct bfa_ioc_hwif *ioc_hwif;
struct bfa_iocpf iocpf;
};
struct bfa_ioc_hwif {
@ -205,8 +192,12 @@ struct bfa_ioc_hwif {
void (*ioc_map_port) (struct bfa_ioc *ioc);
void (*ioc_isr_mode_set) (struct bfa_ioc *ioc,
bool msix);
void (*ioc_notify_hbfail) (struct bfa_ioc *ioc);
void (*ioc_notify_fail) (struct bfa_ioc *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
void (*ioc_sync_join) (struct bfa_ioc *ioc);
void (*ioc_sync_leave) (struct bfa_ioc *ioc);
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
};
#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@ -271,7 +262,6 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
struct bfa_ioc_hbfail_notify *notify);
@ -289,7 +279,8 @@ mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
*/
void bfa_nw_ioc_timeout(void *ioc);
void bfa_nw_ioc_hb_check(void *ioc);
void bfa_nw_ioc_sem_timeout(void *ioc);
void bfa_nw_iocpf_timeout(void *ioc);
void bfa_nw_iocpf_sem_timeout(void *ioc);
/*
* F/W Image Size & Chunk

View file

@ -22,6 +22,15 @@
#include "bfi_ctreg.h"
#include "bfa_defs.h"
#define bfa_ioc_ct_sync_pos(__ioc) \
((u32) (1 << bfa_ioc_pcifn(__ioc)))
#define BFA_IOC_SYNC_REQD_SH 16
#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
/*
* forward declarations
*/
@ -30,8 +39,12 @@ static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
static struct bfa_ioc_hwif nw_hwif_ct;
@ -48,8 +61,12 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
ioc->ioc_hwif = &nw_hwif_ct;
}
@ -86,6 +103,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
if (usecnt == 0) {
writel(1, ioc->ioc_regs.ioc_usage_reg);
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
writel(0, ioc->ioc_regs.ioc_fail_sync);
return true;
}
@ -149,12 +167,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
* Notify other functions on HB failure.
*/
static void
bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc)
bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
{
if (ioc->cna) {
writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
/* Wait for halt to take effect */
readl(ioc->ioc_regs.ll_halt);
readl(ioc->ioc_regs.alt_ll_halt);
} else {
writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
readl(ioc->ioc_regs.err_set);
@ -206,15 +226,19 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
if (ioc->port_id == 0) {
ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
} else {
ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
}
/*
@ -232,6 +256,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
/**
* sram memory access
@ -317,6 +342,77 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
bfa_nw_ioc_hw_sem_release(ioc);
}
/**
* Synchronized IOC failure processing routines
*/
static void
bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
}
static void
bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
bfa_ioc_ct_sync_pos(ioc);
writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
}
static void
bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
}
static bool
bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
u32 tmp_ackd;
if (sync_ackd == 0)
return true;
/**
* The check below is to see whether any other PCI fn
* has reinitialized the ASIC (reset sync_ackd bits)
* and failed again while this IOC was waiting for hw
* semaphore (in bfa_iocpf_sm_semwait()).
*/
tmp_ackd = sync_ackd;
if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
if (sync_reqd == sync_ackd) {
writel(bfa_ioc_ct_clear_sync_ackd(r32),
ioc->ioc_regs.ioc_fail_sync);
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
return true;
}
/**
* If another PCI fn reinitialized and failed again while
* this IOC was waiting for hw sem, the sync_ackd bit for
* this IOC need to be set again to allow reinitialization.
*/
if (tmp_ackd != sync_ackd)
writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
return false;
}
static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
{

View file

@ -535,6 +535,7 @@ enum {
#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
#define CPE_DEPTH_Q(__n) \
(CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
@ -552,22 +553,30 @@ enum {
(RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
#define RME_CI_PTR_Q(__n) \
(RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \
* (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \
* (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \
* (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \
* (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \
* (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \
* (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \
* (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \
* (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
#define HQM_QSET_RXQ_DRBL_P0(__n) \
(HQM_QSET0_RXQ_DRBL_P0 + (__n) * \
(HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
#define HQM_QSET_TXQ_DRBL_P0(__n) \
(HQM_QSET0_TXQ_DRBL_P0 + (__n) * \
(HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
#define HQM_QSET_IB_DRBL_1_P0(__n) \
(HQM_QSET0_IB_DRBL_1_P0 + (__n) * \
(HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
#define HQM_QSET_IB_DRBL_2_P0(__n) \
(HQM_QSET0_IB_DRBL_2_P0 + (__n) * \
(HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
#define HQM_QSET_RXQ_DRBL_P1(__n) \
(HQM_QSET0_RXQ_DRBL_P1 + (__n) * \
(HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
#define HQM_QSET_TXQ_DRBL_P1(__n) \
(HQM_QSET0_TXQ_DRBL_P1 + (__n) * \
(HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
#define HQM_QSET_IB_DRBL_1_P1(__n) \
(HQM_QSET0_IB_DRBL_1_P1 + (__n) * \
(HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
#define HQM_QSET_IB_DRBL_2_P1(__n) \
(HQM_QSET0_IB_DRBL_2_P1 + (__n) * \
(HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))

View file

@ -32,8 +32,6 @@ extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
/* Log string size */
#define BNA_MESSAGE_SIZE 256
#define bna_device_timer(_dev) bfa_timer_beat(&((_dev)->timer_mod))
/* MBOX API for PORT, TX, RX */
#define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \
do { \
@ -390,8 +388,8 @@ void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
/* API for RX */
int bna_port_mtu_get(struct bna_port *port);
void bna_llport_admin_up(struct bna_llport *llport);
void bna_llport_admin_down(struct bna_llport *llport);
void bna_llport_rx_started(struct bna_llport *llport);
void bna_llport_rx_stopped(struct bna_llport *llport);
/* API for BNAD */
void bna_port_enable(struct bna_port *port);

View file

@ -59,14 +59,70 @@ bna_port_cb_link_down(struct bna_port *port, int status)
port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
}
static inline int
llport_can_be_up(struct bna_llport *llport)
{
int ready = 0;
if (llport->type == BNA_PORT_T_REGULAR)
ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
(llport->flags & BNA_LLPORT_F_RX_STARTED) &&
(llport->flags & BNA_LLPORT_F_PORT_ENABLED));
else
ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
(llport->flags & BNA_LLPORT_F_RX_STARTED) &&
!(llport->flags & BNA_LLPORT_F_PORT_ENABLED));
return ready;
}
#define llport_is_up llport_can_be_up
enum bna_llport_event {
LLPORT_E_START = 1,
LLPORT_E_STOP = 2,
LLPORT_E_FAIL = 3,
LLPORT_E_UP = 4,
LLPORT_E_DOWN = 5,
LLPORT_E_FWRESP_UP_OK = 6,
LLPORT_E_FWRESP_UP_FAIL = 7,
LLPORT_E_FWRESP_DOWN = 8
};
static void
bna_llport_cb_port_enabled(struct bna_llport *llport)
{
llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
if (llport_can_be_up(llport))
bfa_fsm_send_event(llport, LLPORT_E_UP);
}
static void
bna_llport_cb_port_disabled(struct bna_llport *llport)
{
int llport_up = llport_is_up(llport);
llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
if (llport_up)
bfa_fsm_send_event(llport, LLPORT_E_DOWN);
}
/**
* MBOX
*/
static int
bna_is_aen(u8 msg_id)
{
return msg_id == BFI_LL_I2H_LINK_DOWN_AEN ||
msg_id == BFI_LL_I2H_LINK_UP_AEN;
switch (msg_id) {
case BFI_LL_I2H_LINK_DOWN_AEN:
case BFI_LL_I2H_LINK_UP_AEN:
case BFI_LL_I2H_PORT_ENABLE_AEN:
case BFI_LL_I2H_PORT_DISABLE_AEN:
return 1;
default:
return 0;
}
}
static void
@ -81,6 +137,12 @@ bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
case BFI_LL_I2H_LINK_DOWN_AEN:
bna_port_cb_link_down(&bna->port, aen->reason);
break;
case BFI_LL_I2H_PORT_ENABLE_AEN:
bna_llport_cb_port_enabled(&bna->port.llport);
break;
case BFI_LL_I2H_PORT_DISABLE_AEN:
bna_llport_cb_port_disabled(&bna->port.llport);
break;
default:
break;
}
@ -251,16 +313,6 @@ static void bna_llport_start(struct bna_llport *llport);
static void bna_llport_stop(struct bna_llport *llport);
static void bna_llport_fail(struct bna_llport *llport);
enum bna_llport_event {
LLPORT_E_START = 1,
LLPORT_E_STOP = 2,
LLPORT_E_FAIL = 3,
LLPORT_E_UP = 4,
LLPORT_E_DOWN = 5,
LLPORT_E_FWRESP_UP = 6,
LLPORT_E_FWRESP_DOWN = 7
};
enum bna_llport_state {
BNA_LLPORT_STOPPED = 1,
BNA_LLPORT_DOWN = 2,
@ -320,7 +372,7 @@ bna_llport_sm_stopped(struct bna_llport *llport,
/* No-op */
break;
case LLPORT_E_FWRESP_UP:
case LLPORT_E_FWRESP_UP_OK:
case LLPORT_E_FWRESP_DOWN:
/**
* These events are received due to flushing of mbox when
@ -366,6 +418,7 @@ bna_llport_sm_down(struct bna_llport *llport,
static void
bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
{
BUG_ON(!llport_can_be_up(llport));
/**
* NOTE: Do not call bna_fw_llport_up() here. That will over step
* mbox due to down_resp_wait -> up_resp_wait transition on event
@ -390,10 +443,14 @@ bna_llport_sm_up_resp_wait(struct bna_llport *llport,
bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
break;
case LLPORT_E_FWRESP_UP:
case LLPORT_E_FWRESP_UP_OK:
bfa_fsm_set_state(llport, bna_llport_sm_up);
break;
case LLPORT_E_FWRESP_UP_FAIL:
bfa_fsm_set_state(llport, bna_llport_sm_down);
break;
case LLPORT_E_FWRESP_DOWN:
/* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
bna_fw_llport_up(llport);
@ -431,11 +488,12 @@ bna_llport_sm_down_resp_wait(struct bna_llport *llport,
bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
break;
case LLPORT_E_FWRESP_UP:
case LLPORT_E_FWRESP_UP_OK:
/* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
bna_fw_llport_down(llport);
break;
case LLPORT_E_FWRESP_UP_FAIL:
case LLPORT_E_FWRESP_DOWN:
bfa_fsm_set_state(llport, bna_llport_sm_down);
break;
@ -496,11 +554,12 @@ bna_llport_sm_last_resp_wait(struct bna_llport *llport,
/* No-op */
break;
case LLPORT_E_FWRESP_UP:
case LLPORT_E_FWRESP_UP_OK:
/* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
bna_fw_llport_down(llport);
break;
case LLPORT_E_FWRESP_UP_FAIL:
case LLPORT_E_FWRESP_DOWN:
bfa_fsm_set_state(llport, bna_llport_sm_stopped);
break;
@ -541,7 +600,14 @@ bna_fw_cb_llport_up(void *arg, int status)
struct bna_llport *llport = (struct bna_llport *)arg;
bfa_q_qe_init(&llport->mbox_qe.qe);
bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP);
if (status == BFI_LL_CMD_FAIL) {
if (llport->type == BNA_PORT_T_REGULAR)
llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
else
llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_FAIL);
} else
bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_OK);
}
static void
@ -588,13 +654,14 @@ bna_port_cb_llport_stopped(struct bna_port *port,
static void
bna_llport_init(struct bna_llport *llport, struct bna *bna)
{
llport->flags |= BNA_LLPORT_F_ENABLED;
llport->flags |= BNA_LLPORT_F_ADMIN_UP;
llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
llport->type = BNA_PORT_T_REGULAR;
llport->bna = bna;
llport->link_status = BNA_LINK_DOWN;
llport->admin_up_count = 0;
llport->rx_started_count = 0;
llport->stop_cbfn = NULL;
@ -606,7 +673,8 @@ bna_llport_init(struct bna_llport *llport, struct bna *bna)
static void
bna_llport_uninit(struct bna_llport *llport)
{
llport->flags &= ~BNA_LLPORT_F_ENABLED;
llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
llport->bna = NULL;
}
@ -628,6 +696,8 @@ bna_llport_stop(struct bna_llport *llport)
static void
bna_llport_fail(struct bna_llport *llport)
{
/* Reset the physical port status to enabled */
llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
bfa_fsm_send_event(llport, LLPORT_E_FAIL);
}
@ -638,25 +708,31 @@ bna_llport_state_get(struct bna_llport *llport)
}
void
bna_llport_admin_up(struct bna_llport *llport)
bna_llport_rx_started(struct bna_llport *llport)
{
llport->admin_up_count++;
llport->rx_started_count++;
if (llport->admin_up_count == 1) {
llport->flags |= BNA_LLPORT_F_RX_ENABLED;
if (llport->flags & BNA_LLPORT_F_ENABLED)
if (llport->rx_started_count == 1) {
llport->flags |= BNA_LLPORT_F_RX_STARTED;
if (llport_can_be_up(llport))
bfa_fsm_send_event(llport, LLPORT_E_UP);
}
}
void
bna_llport_admin_down(struct bna_llport *llport)
bna_llport_rx_stopped(struct bna_llport *llport)
{
llport->admin_up_count--;
int llport_up = llport_is_up(llport);
if (llport->admin_up_count == 0) {
llport->flags &= ~BNA_LLPORT_F_RX_ENABLED;
if (llport->flags & BNA_LLPORT_F_ENABLED)
llport->rx_started_count--;
if (llport->rx_started_count == 0) {
llport->flags &= ~BNA_LLPORT_F_RX_STARTED;
if (llport_up)
bfa_fsm_send_event(llport, LLPORT_E_DOWN);
}
}
@ -2056,37 +2132,6 @@ rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
}
static void
__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
{
struct bna_rx_fndb_ram *rx_fndb_ram;
u32 ctrl_flags;
int i;
rx_fndb_ram = (struct bna_rx_fndb_ram *)
BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva,
RX_FNDB_RAM_BASE_OFFSET);
for (i = 0; i < BFI_MAX_RXF; i++) {
if (status == BNA_STATUS_T_ENABLED) {
if (i == rxf->rxf_id)
continue;
ctrl_flags =
readl(&rx_fndb_ram[i].control_flags);
ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
writel(ctrl_flags,
&rx_fndb_ram[i].control_flags);
} else {
ctrl_flags =
readl(&rx_fndb_ram[i].control_flags);
ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
writel(ctrl_flags,
&rx_fndb_ram[i].control_flags);
}
}
}
int
rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
{
@ -2152,46 +2197,6 @@ rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
return 0;
}
int
rxf_process_packet_filter_default(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
/* Enable/disable default mode */
if (is_default_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* move default configuration from pending -> active */
default_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active |= BNA_RXMODE_DEFAULT;
/* Disable VLAN filter to allow all VLANs */
__rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
/* Redirect all other RxF vlan filtering to this one */
__rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED);
rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
BNA_STATUS_T_ENABLED);
return 1;
} else if (is_default_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* move default configuration from pending -> active */
default_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
bna->rxf_default_id = BFI_MAX_RXF;
/* Revert VLAN filter */
__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
/* Stop RxF vlan filter table redirection */
__rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
BNA_STATUS_T_DISABLED);
return 1;
}
return 0;
}
int
rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
{
@ -2288,48 +2293,6 @@ rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
return 0;
}
int
rxf_clear_packet_filter_default(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
/* 8. Execute pending default mode disable command */
if (is_default_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* move default configuration from pending -> active */
default_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
bna->rxf_default_id = BFI_MAX_RXF;
/* Revert VLAN filter */
__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
/* Stop RxF vlan filter table redirection */
__rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
BNA_STATUS_T_DISABLED);
return 1;
}
/* 9. Clear active default mode; move it to pending enable */
if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
/* move default configuration from active -> pending */
default_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
/* Revert VLAN filter */
__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
/* Stop RxF vlan filter table redirection */
__rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
BNA_STATUS_T_DISABLED);
return 1;
}
return 0;
}
int
rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
{
@ -2404,28 +2367,6 @@ rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
}
void
rxf_reset_packet_filter_default(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
/* 8. Clear pending default mode disable */
if (is_default_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
default_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
bna->rxf_default_id = BFI_MAX_RXF;
}
/* 9. Move default mode config from active -> pending */
if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
default_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
}
}
void
rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
{
@ -2515,76 +2456,6 @@ rxf_promisc_disable(struct bna_rxf *rxf)
return ret;
}
/**
* Should only be called by bna_rxf_mode_set.
* Helps deciding if h/w configuration is needed or not.
* Returns:
* 0 = no h/w change
* 1 = need h/w change
*/
static int
rxf_default_enable(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
int ret = 0;
/* There can not be any pending disable command */
/* Do nothing if pending enable or already enabled */
if (is_default_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask) ||
(rxf->rxmode_active & BNA_RXMODE_DEFAULT)) {
/* Schedule enable */
} else {
/* Default mode should not be active in the system */
default_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
bna->rxf_default_id = rxf->rxf_id;
ret = 1;
}
return ret;
}
/**
* Should only be called by bna_rxf_mode_set.
* Helps deciding if h/w configuration is needed or not.
* Returns:
* 0 = no h/w change
* 1 = need h/w change
*/
static int
rxf_default_disable(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
int ret = 0;
/* There can not be any pending disable */
/* Turn off pending enable command , if any */
if (is_default_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* Promisc mode should not be active */
/* system default state should be pending */
default_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
/* Remove the default state from the system */
bna->rxf_default_id = BFI_MAX_RXF;
/* Schedule disable */
} else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
/* Default mode should be active in the system */
default_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
ret = 1;
/* Do nothing if already disabled */
} else {
}
return ret;
}
/**
* Should only be called by bna_rxf_mode_set.
* Helps deciding if h/w configuration is needed or not.
@ -2654,38 +2525,13 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
struct bna_rxf *rxf = &rx->rxf;
int need_hw_config = 0;
/* Error checks */
/* Process the commands */
if (is_promisc_enable(new_mode, bitmask)) {
/* If promisc mode is already enabled elsewhere in the system */
if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
(rx->bna->rxf_promisc_id != rxf->rxf_id))
goto err_return;
/* If default mode is already enabled in the system */
if (rx->bna->rxf_default_id != BFI_MAX_RXF)
goto err_return;
/* Trying to enable promiscuous and default mode together */
if (is_default_enable(new_mode, bitmask))
goto err_return;
}
if (is_default_enable(new_mode, bitmask)) {
/* If default mode is already enabled elsewhere in the system */
if ((rx->bna->rxf_default_id != BFI_MAX_RXF) &&
(rx->bna->rxf_default_id != rxf->rxf_id)) {
goto err_return;
}
/* If promiscuous mode is already enabled in the system */
if (rx->bna->rxf_promisc_id != BFI_MAX_RXF)
goto err_return;
}
/* Process the commands */
if (is_promisc_enable(new_mode, bitmask)) {
if (rxf_promisc_enable(rxf))
need_hw_config = 1;
} else if (is_promisc_disable(new_mode, bitmask)) {
@ -2693,14 +2539,6 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
need_hw_config = 1;
}
if (is_default_enable(new_mode, bitmask)) {
if (rxf_default_enable(rxf))
need_hw_config = 1;
} else if (is_default_disable(new_mode, bitmask)) {
if (rxf_default_disable(rxf))
need_hw_config = 1;
}
if (is_allmulti_enable(new_mode, bitmask)) {
if (rxf_allmulti_enable(rxf))
need_hw_config = 1;
@ -3126,7 +2964,6 @@ bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
bna->rxf_default_id = BFI_MAX_RXF;
bna->rxf_promisc_id = BFI_MAX_RXF;
/* Mbox q element for posting stat request to f/w */

View file

@ -1226,8 +1226,7 @@ rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
/* Apply the VLAN filter */
if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) &&
!(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))
__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
}
@ -1276,9 +1275,6 @@ rxf_process_packet_filter(struct bna_rxf *rxf)
if (rxf_process_packet_filter_promisc(rxf))
return 1;
if (rxf_process_packet_filter_default(rxf))
return 1;
if (rxf_process_packet_filter_allmulti(rxf))
return 1;
@ -1340,9 +1336,6 @@ rxf_clear_packet_filter(struct bna_rxf *rxf)
if (rxf_clear_packet_filter_promisc(rxf))
return 1;
if (rxf_clear_packet_filter_default(rxf))
return 1;
if (rxf_clear_packet_filter_allmulti(rxf))
return 1;
@ -1389,8 +1382,6 @@ rxf_reset_packet_filter(struct bna_rxf *rxf)
rxf_reset_packet_filter_promisc(rxf);
rxf_reset_packet_filter_default(rxf);
rxf_reset_packet_filter_allmulti(rxf);
}
@ -1441,12 +1432,16 @@ bna_rxf_init(struct bna_rxf *rxf,
memset(rxf->vlan_filter_table, 0,
(sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
/* Set up VLAN 0 for pure priority tagged packets */
rxf->vlan_filter_table[0] |= 1;
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
}
static void
bna_rxf_uninit(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
struct bna_mac *mac;
bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
@ -1473,6 +1468,27 @@ bna_rxf_uninit(struct bna_rxf *rxf)
bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
}
/* Turn off pending promisc mode */
if (is_promisc_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* system promisc state should be pending */
BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id));
promisc_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
bna->rxf_promisc_id = BFI_MAX_RXF;
}
/* Promisc mode should not be active */
BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC);
/* Turn off pending all-multi mode */
if (is_allmulti_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
allmulti_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
}
/* Allmulti mode should not be active */
BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI);
rxf->rx = NULL;
}
@ -1947,7 +1963,7 @@ bna_rx_sm_started_entry(struct bna_rx *rx)
bna_ib_ack(&rxp->cq.ib->door_bell, 0);
}
bna_llport_admin_up(&rx->bna->port.llport);
bna_llport_rx_started(&rx->bna->port.llport);
}
void
@ -1955,13 +1971,13 @@ bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_FAIL:
bna_llport_admin_down(&rx->bna->port.llport);
bna_llport_rx_stopped(&rx->bna->port.llport);
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
rx_ib_fail(rx);
bna_rxf_fail(&rx->rxf);
break;
case RX_E_STOP:
bna_llport_admin_down(&rx->bna->port.llport);
bna_llport_rx_stopped(&rx->bna->port.llport);
bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
break;
default:
@ -3373,7 +3389,7 @@ __bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
(txq->priority & 0x3));
(txq->priority & 0x7));
txq_cfg.wvc_n_cquota_n_rquota =
((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
(BFI_TX_MAX_WRR_QUOTA & 0xfff));

View file

@ -165,8 +165,7 @@ enum bna_rxp_type {
enum bna_rxmode {
BNA_RXMODE_PROMISC = 1,
BNA_RXMODE_DEFAULT = 2,
BNA_RXMODE_ALLMULTI = 4
BNA_RXMODE_ALLMULTI = 2
};
enum bna_rx_event {
@ -249,8 +248,9 @@ enum bna_link_status {
};
enum bna_llport_flags {
BNA_LLPORT_F_ENABLED = 1,
BNA_LLPORT_F_RX_ENABLED = 2
BNA_LLPORT_F_ADMIN_UP = 1,
BNA_LLPORT_F_PORT_ENABLED = 2,
BNA_LLPORT_F_RX_STARTED = 4
};
enum bna_port_flags {
@ -405,7 +405,7 @@ struct bna_llport {
enum bna_link_status link_status;
int admin_up_count;
int rx_started_count;
void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
@ -1117,7 +1117,6 @@ struct bna {
struct bna_rit_mod rit_mod;
int rxf_default_id;
int rxf_promisc_id;
struct bna_mbox_qe mbox_qe;

View file

@ -70,6 +70,8 @@ do { \
(sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
} while (0)
#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
/*
* Reinitialize completions in CQ, once Rx is taken down
*/
@ -107,7 +109,7 @@ static void
bnad_free_all_txbufs(struct bnad *bnad,
struct bna_tcb *tcb)
{
u16 unmap_cons;
u32 unmap_cons;
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb = NULL;
@ -130,7 +132,9 @@ bnad_free_all_txbufs(struct bnad *bnad,
PCI_DMA_TODEVICE);
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
unmap_cons++;
if (++unmap_cons >= unmap_q->q_depth)
break;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
pci_unmap_page(bnad->pcidev,
pci_unmap_addr(&unmap_array[unmap_cons],
@ -139,7 +143,8 @@ bnad_free_all_txbufs(struct bnad *bnad,
PCI_DMA_TODEVICE);
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0);
unmap_cons++;
if (++unmap_cons >= unmap_q->q_depth)
break;
}
dev_kfree_skb_any(skb);
}
@ -167,11 +172,11 @@ bnad_free_txbufs(struct bnad *bnad,
/*
* Just return if TX is stopped. This check is useful
* when bnad_free_txbufs() runs out of a tasklet scheduled
* before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
* before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
* but this routine runs actually after the cleanup has been
* executed.
*/
if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
return 0;
updated_hw_cons = *(tcb->hw_consumer_index);
@ -239,7 +244,7 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr)
{
struct bnad *bnad = (struct bnad *)bnad_ptr;
struct bna_tcb *tcb;
u32 acked;
u32 acked = 0;
int i, j;
for (i = 0; i < bnad->num_tx; i++) {
@ -252,10 +257,26 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr)
(!test_and_set_bit(BNAD_TXQ_FREE_SENT,
&tcb->flags))) {
acked = bnad_free_txbufs(bnad, tcb);
bna_ib_ack(tcb->i_dbell, acked);
if (likely(test_bit(BNAD_TXQ_TX_STARTED,
&tcb->flags)))
bna_ib_ack(tcb->i_dbell, acked);
smp_mb__before_clear_bit();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
}
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
&tcb->flags)))
continue;
if (netif_queue_stopped(bnad->netdev)) {
if (acked && netif_carrier_ok(bnad->netdev) &&
BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
BNAD_NETIF_WAKE_THRESHOLD) {
netif_wake_queue(bnad->netdev);
/* TODO */
/* Counters for individual TxQs? */
BNAD_UPDATE_CTR(bnad,
netif_queue_wakeup);
}
}
}
}
}
@ -264,7 +285,7 @@ static u32
bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
{
struct net_device *netdev = bnad->netdev;
u32 sent;
u32 sent = 0;
if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
return 0;
@ -275,12 +296,15 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
netif_carrier_ok(netdev) &&
BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
BNAD_NETIF_WAKE_THRESHOLD) {
netif_wake_queue(netdev);
BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
netif_wake_queue(netdev);
BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
}
}
}
if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
bna_ib_ack(tcb->i_dbell, sent);
} else
bna_ib_ack(tcb->i_dbell, 0);
smp_mb__before_clear_bit();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
@ -313,25 +337,24 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
}
static void
bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_unmap_q *unmap_q;
struct sk_buff *skb;
int unmap_cons;
unmap_q = rcb->unmap_q;
while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
BUG_ON(!(skb));
unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
skb = unmap_q->unmap_array[unmap_cons].skb;
if (!skb)
continue;
unmap_q->unmap_array[unmap_cons].skb = NULL;
pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
unmap_array[unmap_q->consumer_index],
dma_addr), rcb->rxq->buffer_size +
NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
unmap_array[unmap_cons],
dma_addr), rcb->rxq->buffer_size,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(skb);
BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
}
bnad_reset_rcb(bnad, rcb);
}
@ -385,43 +408,11 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
unmap_q->producer_index = unmap_prod;
rcb->producer_index = unmap_prod;
smp_mb();
bna_rxq_prod_indx_doorbell(rcb);
if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
bna_rxq_prod_indx_doorbell(rcb);
}
}
/*
* Locking is required in the enable path
* because it is called from a napi poll
* context, where the bna_lock is not held
* unlike the IRQ context.
*/
static void
bnad_enable_txrx_irqs(struct bnad *bnad)
{
struct bna_tcb *tcb;
struct bna_ccb *ccb;
int i, j;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
for (i = 0; i < bnad->num_tx; i++) {
for (j = 0; j < bnad->num_txq_per_tx; j++) {
tcb = bnad->tx_info[i].tcb[j];
bna_ib_coalescing_timer_set(tcb->i_dbell,
tcb->txq->ib->ib_config.coalescing_timeo);
bna_ib_ack(tcb->i_dbell, 0);
}
}
for (i = 0; i < bnad->num_rx; i++) {
for (j = 0; j < bnad->num_rxp_per_rx; j++) {
ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
bnad_enable_rx_irq_unsafe(ccb);
}
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
static inline void
bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
{
@ -448,6 +439,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
return 0;
prefetch(bnad->netdev);
BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
wi_range);
@ -544,12 +538,15 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
if (likely(ccb)) {
bna_ib_ack(ccb->i_dbell, packets);
if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
bna_ib_ack(ccb->i_dbell, packets);
bnad_refill_rxq(bnad, ccb->rcb[0]);
if (ccb->rcb[1])
bnad_refill_rxq(bnad, ccb->rcb[1]);
} else
bna_ib_ack(ccb->i_dbell, 0);
} else {
if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
bna_ib_ack(ccb->i_dbell, 0);
}
return packets;
}
@ -557,6 +554,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
static void
bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
{
if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
return;
bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
bna_ib_ack(ccb->i_dbell, 0);
}
@ -566,7 +566,8 @@ bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
{
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
/* Because of polling context */
spin_lock_irqsave(&bnad->bna_lock, flags);
bnad_enable_rx_irq_unsafe(ccb);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@ -575,9 +576,11 @@ static void
bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
{
struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
struct napi_struct *napi = &rx_ctrl->napi;
if (likely(napi_schedule_prep(napi))) {
bnad_disable_rx_irq(bnad, ccb);
__napi_schedule((&rx_ctrl->napi));
__napi_schedule(napi);
}
BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
}
@ -602,12 +605,11 @@ bnad_msix_mbox_handler(int irq, void *data)
{
u32 intr_status;
unsigned long flags;
struct net_device *netdev = data;
struct bnad *bnad;
struct bnad *bnad = (struct bnad *)data;
bnad = netdev_priv(netdev);
if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
return IRQ_HANDLED;
/* BNA_ISR_GET(bnad); Inc Ref count */
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_intr_status_get(&bnad->bna, intr_status);
@ -617,7 +619,6 @@ bnad_msix_mbox_handler(int irq, void *data)
spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* BNAD_ISR_PUT(bnad); Dec Ref count */
return IRQ_HANDLED;
}
@ -627,8 +628,7 @@ bnad_isr(int irq, void *data)
int i, j;
u32 intr_status;
unsigned long flags;
struct net_device *netdev = data;
struct bnad *bnad = netdev_priv(netdev);
struct bnad *bnad = (struct bnad *)data;
struct bnad_rx_info *rx_info;
struct bnad_rx_ctrl *rx_ctrl;
@ -642,16 +642,21 @@ bnad_isr(int irq, void *data)
spin_lock_irqsave(&bnad->bna_lock, flags);
if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
if (BNA_IS_MBOX_ERR_INTR(intr_status))
bna_mbox_handler(&bnad->bna, intr_status);
if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
spin_unlock_irqrestore(&bnad->bna_lock, flags);
goto done;
}
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
if (!BNA_IS_INTX_DATA_INTR(intr_status))
return IRQ_HANDLED;
/* Process data interrupts */
/* Tx processing */
for (i = 0; i < bnad->num_tx; i++) {
for (j = 0; j < bnad->num_txq_per_tx; j++)
bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
}
/* Rx processing */
for (i = 0; i < bnad->num_rx; i++) {
rx_info = &bnad->rx_info[i];
if (!rx_info->rx)
@ -663,7 +668,6 @@ bnad_isr(int irq, void *data)
rx_ctrl->ccb);
}
}
done:
return IRQ_HANDLED;
}
@ -674,11 +678,7 @@ bnad_isr(int irq, void *data)
static void
bnad_enable_mbox_irq(struct bnad *bnad)
{
int irq = BNAD_GET_MBOX_IRQ(bnad);
if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
if (bnad->cfg_flags & BNAD_CF_MSIX)
enable_irq(irq);
clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
}
@ -690,16 +690,21 @@ bnad_enable_mbox_irq(struct bnad *bnad)
static void
bnad_disable_mbox_irq(struct bnad *bnad)
{
int irq = BNAD_GET_MBOX_IRQ(bnad);
if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
if (bnad->cfg_flags & BNAD_CF_MSIX)
disable_irq_nosync(irq);
set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
}
static void
bnad_set_netdev_perm_addr(struct bnad *bnad)
{
struct net_device *netdev = bnad->netdev;
memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
if (is_zero_ether_addr(netdev->dev_addr))
memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
}
/* Control Path Handlers */
/* Callbacks */
@ -755,11 +760,14 @@ bnad_cb_port_link_status(struct bnad *bnad,
if (link_up) {
if (!netif_carrier_ok(bnad->netdev)) {
struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
if (!tcb)
return;
pr_warn("bna: %s link up\n",
bnad->netdev->name);
netif_carrier_on(bnad->netdev);
BNAD_UPDATE_CTR(bnad, link_toggle);
if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
/* Force an immediate Transmit Schedule */
pr_info("bna: %s TX_STARTED\n",
bnad->netdev->name);
@ -807,6 +815,18 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
{
struct bnad_tx_info *tx_info =
(struct bnad_tx_info *)tcb->txq->tx->priv;
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
cpu_relax();
bnad_free_all_txbufs(bnad, tcb);
unmap_q->producer_index = 0;
unmap_q->consumer_index = 0;
smp_mb__before_clear_bit();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
tx_info->tcb[tcb->id] = NULL;
}
@ -821,6 +841,12 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
}
static void
bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
{
bnad_free_all_rxbufs(bnad, rcb);
}
static void
bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
{
@ -849,7 +875,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
if (tx_info != &bnad->tx_info[0])
return;
clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
netif_stop_queue(bnad->netdev);
pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
}
@ -857,9 +883,36 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
static void
bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
{
if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
return;
clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
cpu_relax();
bnad_free_all_txbufs(bnad, tcb);
unmap_q->producer_index = 0;
unmap_q->consumer_index = 0;
smp_mb__before_clear_bit();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
/*
* Workaround for first device enable failure & we
* get a 0 MAC address. We try to get the MAC address
* again here.
*/
if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
bnad_set_netdev_perm_addr(bnad);
}
set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
if (netif_carrier_ok(bnad->netdev)) {
pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
netif_wake_queue(bnad->netdev);
@ -870,40 +923,22 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
static void
bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
{
struct bnad_unmap_q *unmap_q;
if (!tcb || (!tcb->unmap_q))
return;
unmap_q = tcb->unmap_q;
if (!unmap_q->unmap_array)
return;
if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
return;
bnad_free_all_txbufs(bnad, tcb);
unmap_q->producer_index = 0;
unmap_q->consumer_index = 0;
smp_mb__before_clear_bit();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
/* Delay only once for the whole Tx Path Shutdown */
if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
mdelay(BNAD_TXRX_SYNC_MDELAY);
}
static void
bnad_cb_rx_cleanup(struct bnad *bnad,
struct bna_ccb *ccb)
{
bnad_cq_cmpl_init(bnad, ccb);
bnad_free_rxbufs(bnad, ccb->rcb[0]);
clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
if (ccb->rcb[1]) {
bnad_free_rxbufs(bnad, ccb->rcb[1]);
if (ccb->rcb[1])
clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
}
if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
mdelay(BNAD_TXRX_SYNC_MDELAY);
}
static void
@ -911,6 +946,13 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_unmap_q *unmap_q = rcb->unmap_q;
clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
if (rcb == rcb->cq->ccb->rcb[0])
bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
bnad_free_all_rxbufs(bnad, rcb);
set_bit(BNAD_RXQ_STARTED, &rcb->flags);
/* Now allocate & post buffers for this RCB */
@ -1047,7 +1089,7 @@ bnad_mbox_irq_free(struct bnad *bnad,
spin_unlock_irqrestore(&bnad->bna_lock, flags);
irq = BNAD_GET_MBOX_IRQ(bnad);
free_irq(irq, bnad->netdev);
free_irq(irq, bnad);
kfree(intr_info->idl);
}
@ -1061,7 +1103,7 @@ static int
bnad_mbox_irq_alloc(struct bnad *bnad,
struct bna_intr_info *intr_info)
{
int err;
int err = 0;
unsigned long flags;
u32 irq;
irq_handler_t irq_handler;
@ -1096,22 +1138,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
*/
set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
err = request_irq(irq, irq_handler, flags,
bnad->mbox_irq_name, bnad->netdev);
bnad->mbox_irq_name, bnad);
if (err) {
kfree(intr_info->idl);
intr_info->idl = NULL;
return err;
}
spin_lock_irqsave(&bnad->bna_lock, flags);
if (bnad->cfg_flags & BNAD_CF_MSIX)
disable_irq_nosync(irq);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
return 0;
return err;
}
static void
@ -1388,13 +1425,24 @@ bnad_ioc_hb_check(unsigned long data)
}
static void
bnad_ioc_sem_timeout(unsigned long data)
bnad_iocpf_timeout(unsigned long data)
{
struct bnad *bnad = (struct bnad *)data;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc);
bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
static void
bnad_iocpf_sem_timeout(unsigned long data)
{
struct bnad *bnad = (struct bnad *)data;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@ -1555,62 +1603,19 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget)
return rcvd;
}
static int
bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
{
struct bnad_rx_ctrl *rx_ctrl =
container_of(napi, struct bnad_rx_ctrl, napi);
struct bna_ccb *ccb;
struct bnad *bnad;
int rcvd = 0;
int i, j;
ccb = rx_ctrl->ccb;
bnad = ccb->bnad;
if (!netif_carrier_ok(bnad->netdev))
goto poll_exit;
/* Handle Tx Completions, if any */
for (i = 0; i < bnad->num_tx; i++) {
for (j = 0; j < bnad->num_txq_per_tx; j++)
bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
}
/* Handle Rx Completions */
rcvd = bnad_poll_cq(bnad, ccb, budget);
if (rcvd == budget)
return rcvd;
poll_exit:
napi_complete((napi));
BNAD_UPDATE_CTR(bnad, netif_rx_complete);
bnad_enable_txrx_irqs(bnad);
return rcvd;
}
static void
bnad_napi_enable(struct bnad *bnad, u32 rx_id)
{
int (*napi_poll) (struct napi_struct *, int);
struct bnad_rx_ctrl *rx_ctrl;
int i;
unsigned long flags;
spin_lock_irqsave(&bnad->bna_lock, flags);
if (bnad->cfg_flags & BNAD_CF_MSIX)
napi_poll = bnad_napi_poll_rx;
else
napi_poll = bnad_napi_poll_txrx;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
/* Initialize & enable NAPI */
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
napi_poll, 64);
bnad_napi_poll_rx, 64);
napi_enable(&rx_ctrl->napi);
}
}
@ -1825,6 +1830,7 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
/* Initialize the Rx event handlers */
rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
rx_cbfn.rcb_destroy_cbfn = NULL;
rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
@ -1968,6 +1974,27 @@ bnad_enable_default_bcast(struct bnad *bnad)
return 0;
}
/* Called with bnad_conf_lock() held */
static void
bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
{
u16 vlan_id;
unsigned long flags;
if (!bnad->vlan_grp)
return;
BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) {
if (!vlan_group_get_device(bnad->vlan_grp, vlan_id))
continue;
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
}
/* Statistics utilities */
void
bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
@ -2152,16 +2179,6 @@ bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
bnad->num_rxp_per_rx = 1;
}
static void
bnad_set_netdev_perm_addr(struct bnad *bnad)
{
struct net_device *netdev = bnad->netdev;
memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
if (is_zero_ether_addr(netdev->dev_addr))
memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
}
/* Enable / disable device */
static void
bnad_device_disable(struct bnad *bnad)
@ -2353,6 +2370,9 @@ bnad_open(struct net_device *netdev)
/* Enable broadcast */
bnad_enable_default_bcast(bnad);
/* Restore VLANs, if any */
bnad_restore_vlans(bnad, 0);
/* Set the UCAST address */
spin_lock_irqsave(&bnad->bna_lock, flags);
bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
@ -2433,21 +2453,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
/*
* Takes care of the Tx that is scheduled between clearing the flag
* and the netif_stop_queue() call.
*/
if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
tx_id = 0;
tx_info = &bnad->tx_info[tx_id];
tcb = tx_info->tcb[tx_id];
unmap_q = tcb->unmap_q;
/*
* Takes care of the Tx that is scheduled between clearing the flag
* and the netif_stop_queue() call.
*/
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
vectors = 1 + skb_shinfo(skb)->nr_frags;
if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
dev_kfree_skb(skb);
@ -2462,7 +2482,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
tcb->consumer_index &&
!test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
acked = bnad_free_txbufs(bnad, tcb);
bna_ib_ack(tcb->i_dbell, acked);
if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
bna_ib_ack(tcb->i_dbell, acked);
smp_mb__before_clear_bit();
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
} else {
@ -2624,6 +2645,10 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
tcb->producer_index = txq_prod;
smp_mb();
if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
return NETDEV_TX_OK;
bna_txq_prod_indx_doorbell(tcb);
if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
@ -3032,7 +3057,7 @@ static int __devinit
bnad_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pcidev_id)
{
bool using_dac;
bool using_dac = false;
int err;
struct bnad *bnad;
struct bna *bna;
@ -3066,7 +3091,7 @@ bnad_pci_probe(struct pci_dev *pdev,
/*
* PCI initialization
* Output : using_dac = 1 for 64 bit DMA
* = 0 for 32 bit DMA
* = 0 for 32 bit DMA
*/
err = bnad_pci_init(bnad, pdev, &using_dac);
if (err)
@ -3084,6 +3109,9 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Initialize netdev structure, set up ethtool ops */
bnad_netdev_init(bnad, using_dac);
/* Set link to down state */
netif_carrier_off(netdev);
bnad_enable_msix(bnad);
/* Get resource requirement form bna */
@ -3115,11 +3143,13 @@ bnad_pci_probe(struct pci_dev *pdev,
((unsigned long)bnad));
setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
((unsigned long)bnad));
setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout,
setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
((unsigned long)bnad));
setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
((unsigned long)bnad));
/* Now start the timer before calling IOC */
mod_timer(&bnad->bna.device.ioc.ioc_timer,
mod_timer(&bnad->bna.device.ioc.iocpf_timer,
jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
/*
@ -3137,11 +3167,6 @@ bnad_pci_probe(struct pci_dev *pdev,
mutex_unlock(&bnad->conf_mutex);
/*
* Make sure the link appears down to the stack
*/
netif_carrier_off(netdev);
/* Finally, reguister with net_device layer */
err = register_netdev(netdev);
if (err) {

View file

@ -51,6 +51,7 @@
*/
struct bnad_rx_ctrl {
struct bna_ccb *ccb;
unsigned long flags;
struct napi_struct napi;
};
@ -64,7 +65,7 @@ struct bnad_rx_ctrl {
#define BNAD_NAME "bna"
#define BNAD_NAME_LEN 64
#define BNAD_VERSION "2.3.2.0"
#define BNAD_VERSION "2.3.2.3"
#define BNAD_MAILBOX_MSIX_VECTORS 1
@ -82,6 +83,7 @@ struct bnad_rx_ctrl {
/* Bit positions for tcb->flags */
#define BNAD_TXQ_FREE_SENT 0
#define BNAD_TXQ_TX_STARTED 1
/* Bit positions for rcb->flags */
#define BNAD_RXQ_REFILL 0
@ -124,6 +126,7 @@ struct bnad_completion {
struct bnad_drv_stats {
u64 netif_queue_stop;
u64 netif_queue_wakeup;
u64 netif_queue_stopped;
u64 tso4;
u64 tso6;
u64 tso_err;
@ -199,12 +202,12 @@ struct bnad_unmap_q {
/* Set, tested & cleared using xxx_bit() functions */
/* Values indicated bit positions */
#define BNAD_RF_CEE_RUNNING 1
#define BNAD_RF_HW_ERROR 2
#define BNAD_RF_MBOX_IRQ_DISABLED 3
#define BNAD_RF_TX_STARTED 4
#define BNAD_RF_RX_STARTED 5
#define BNAD_RF_DIM_TIMER_RUNNING 6
#define BNAD_RF_STATS_TIMER_RUNNING 7
#define BNAD_RF_MBOX_IRQ_DISABLED 2
#define BNAD_RF_RX_STARTED 3
#define BNAD_RF_DIM_TIMER_RUNNING 4
#define BNAD_RF_STATS_TIMER_RUNNING 5
#define BNAD_RF_TX_SHUTDOWN_DELAYED 6
#define BNAD_RF_RX_SHUTDOWN_DELAYED 7
struct bnad {
struct net_device *netdev;
@ -306,8 +309,10 @@ extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id);
extern void bnad_dim_timer_start(struct bnad *bnad);
/* Statistics */
extern void bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats);
extern void bnad_netdev_qstats_fill(struct bnad *bnad,
struct rtnl_link_stats64 *stats);
extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
struct rtnl_link_stats64 *stats);
/**
* MACROS
@ -320,9 +325,11 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64
#define bnad_enable_rx_irq_unsafe(_ccb) \
{ \
bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
(_ccb)->rx_coalescing_timeo); \
bna_ib_ack((_ccb)->i_dbell, 0); \
if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\
bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
(_ccb)->rx_coalescing_timeo); \
bna_ib_ack((_ccb)->i_dbell, 0); \
} \
}
#define bnad_dim_timer_running(_bnad) \

View file

@ -68,6 +68,7 @@ static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"netif_queue_stop",
"netif_queue_wakeup",
"netif_queue_stopped",
"tso4",
"tso6",
"tso_err",
@ -330,10 +331,6 @@ do { \
BNAD_GET_REG(PCIE_MISC_REG);
BNAD_GET_REG(HOST_SEM0_REG);
BNAD_GET_REG(HOST_SEM1_REG);
BNAD_GET_REG(HOST_SEM2_REG);
BNAD_GET_REG(HOST_SEM3_REG);
BNAD_GET_REG(HOST_SEM0_INFO_REG);
BNAD_GET_REG(HOST_SEM1_INFO_REG);
BNAD_GET_REG(HOST_SEM2_INFO_REG);
@ -1184,6 +1181,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
bi = sizeof(*net_stats64) / sizeof(u64);
/* Get netif_queue_stopped from stack */
bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
/* Fill driver stats into ethtool buffers */
stats64 = (u64 *)&bnad->stats.drv_stats;
for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)

View file

@ -56,11 +56,11 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
#define DRV_MODULE_VERSION "2.0.18"
#define DRV_MODULE_RELDATE "Oct 7, 2010"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw"
#define DRV_MODULE_VERSION "2.0.21"
#define DRV_MODULE_RELDATE "Dec 23, 2010"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1.fw"
#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
@ -766,13 +766,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
int j;
rxr->rx_buf_ring =
vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
if (rxr->rx_buf_ring == NULL)
return -ENOMEM;
memset(rxr->rx_buf_ring, 0,
SW_RXBD_RING_SIZE * bp->rx_max_ring);
for (j = 0; j < bp->rx_max_ring; j++) {
rxr->rx_desc_ring[j] =
dma_alloc_coherent(&bp->pdev->dev,
@ -785,13 +782,11 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
}
if (bp->rx_pg_ring_size) {
rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
bp->rx_max_pg_ring);
if (rxr->rx_pg_ring == NULL)
return -ENOMEM;
memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
bp->rx_max_pg_ring);
}
for (j = 0; j < bp->rx_max_pg_ring; j++) {
@ -4645,13 +4640,28 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
/* Wait for the current PCI transaction to complete before
* issuing a reset. */
REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
udelay(5);
if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
(CHIP_NUM(bp) == CHIP_NUM_5708)) {
REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
udelay(5);
} else { /* 5709 */
val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
for (i = 0; i < 100; i++) {
msleep(1);
val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
break;
}
}
/* Wait for the firmware to tell us it is ok to issue a reset. */
bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
@ -4673,7 +4683,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
} else {
val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
@ -6086,7 +6096,7 @@ bnx2_request_irq(struct bnx2 *bp)
}
static void
bnx2_free_irq(struct bnx2 *bp)
__bnx2_free_irq(struct bnx2 *bp)
{
struct bnx2_irq *irq;
int i;
@ -6097,6 +6107,13 @@ bnx2_free_irq(struct bnx2 *bp)
free_irq(irq->vector, &bp->bnx2_napi[i]);
irq->requested = 0;
}
}
static void
bnx2_free_irq(struct bnx2 *bp)
{
__bnx2_free_irq(bp);
if (bp->flags & BNX2_FLAG_USING_MSI)
pci_disable_msi(bp->pdev);
else if (bp->flags & BNX2_FLAG_USING_MSIX)
@ -6801,28 +6818,30 @@ bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
u32 *p = _p, i, offset;
u8 *orig_p = _p;
struct bnx2 *bp = netdev_priv(dev);
u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
0x0800, 0x0880, 0x0c00, 0x0c10,
0x0c30, 0x0d08, 0x1000, 0x101c,
0x1040, 0x1048, 0x1080, 0x10a4,
0x1400, 0x1490, 0x1498, 0x14f0,
0x1500, 0x155c, 0x1580, 0x15dc,
0x1600, 0x1658, 0x1680, 0x16d8,
0x1800, 0x1820, 0x1840, 0x1854,
0x1880, 0x1894, 0x1900, 0x1984,
0x1c00, 0x1c0c, 0x1c40, 0x1c54,
0x1c80, 0x1c94, 0x1d00, 0x1d84,
0x2000, 0x2030, 0x23c0, 0x2400,
0x2800, 0x2820, 0x2830, 0x2850,
0x2b40, 0x2c10, 0x2fc0, 0x3058,
0x3c00, 0x3c94, 0x4000, 0x4010,
0x4080, 0x4090, 0x43c0, 0x4458,
0x4c00, 0x4c18, 0x4c40, 0x4c54,
0x4fc0, 0x5010, 0x53c0, 0x5444,
0x5c00, 0x5c18, 0x5c80, 0x5c90,
0x5fc0, 0x6000, 0x6400, 0x6428,
0x6800, 0x6848, 0x684c, 0x6860,
0x6888, 0x6910, 0x8000 };
static const u32 reg_boundaries[] = {
0x0000, 0x0098, 0x0400, 0x045c,
0x0800, 0x0880, 0x0c00, 0x0c10,
0x0c30, 0x0d08, 0x1000, 0x101c,
0x1040, 0x1048, 0x1080, 0x10a4,
0x1400, 0x1490, 0x1498, 0x14f0,
0x1500, 0x155c, 0x1580, 0x15dc,
0x1600, 0x1658, 0x1680, 0x16d8,
0x1800, 0x1820, 0x1840, 0x1854,
0x1880, 0x1894, 0x1900, 0x1984,
0x1c00, 0x1c0c, 0x1c40, 0x1c54,
0x1c80, 0x1c94, 0x1d00, 0x1d84,
0x2000, 0x2030, 0x23c0, 0x2400,
0x2800, 0x2820, 0x2830, 0x2850,
0x2b40, 0x2c10, 0x2fc0, 0x3058,
0x3c00, 0x3c94, 0x4000, 0x4010,
0x4080, 0x4090, 0x43c0, 0x4458,
0x4c00, 0x4c18, 0x4c40, 0x4c54,
0x4fc0, 0x5010, 0x53c0, 0x5444,
0x5c00, 0x5c18, 0x5c80, 0x5c90,
0x5fc0, 0x6000, 0x6400, 0x6428,
0x6800, 0x6848, 0x684c, 0x6860,
0x6888, 0x6910, 0x8000
};
regs->version = 0;
@ -7080,6 +7099,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
bnx2_netif_stop(bp, true);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
__bnx2_free_irq(bp);
bnx2_free_skbs(bp);
bnx2_free_mem(bp);
}
@ -7091,6 +7111,9 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
int rc;
rc = bnx2_alloc_mem(bp);
if (!rc)
rc = bnx2_request_irq(bp);
if (!rc)
rc = bnx2_init_nic(bp, 0);
@ -7914,15 +7937,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
goto err_out_release;
}
bnx2_set_power_state(bp, PCI_D0);
/* Configure byte swap and enable write to the reg_window registers.
* Rely on CPU to do target byte swapping on big endian systems
* The chip's target access swapping will not swap all accesses
*/
pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
bnx2_set_power_state(bp, PCI_D0);
REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
@ -8383,8 +8406,6 @@ bnx2_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
flush_scheduled_work();
unregister_netdev(dev);
if (bp->mips_firmware)
@ -8421,7 +8442,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
if (!netif_running(dev))
return 0;
flush_scheduled_work();
cancel_work_sync(&bp->reset_task);
bnx2_netif_stop(bp, true);
netif_device_detach(dev);
del_timer_sync(&bp->timer);

View file

@ -461,6 +461,8 @@ struct l2_fhdr {
#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090
#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094
#define BNX2_PCICFG_DEVICE_CONTROL 0x000000b4
#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND ((1L<<5)<<16)
/*
* pci_reg definition

View file

@ -4,4 +4,4 @@
obj-$(CONFIG_BNX2X) += bnx2x.o
bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o
bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o

View file

@ -13,6 +13,8 @@
#ifndef BNX2X_H
#define BNX2X_H
#include <linux/netdevice.h>
#include <linux/types.h>
/* compilation time flags */
@ -20,15 +22,17 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
#define DRV_MODULE_VERSION "1.60.01-0"
#define DRV_MODULE_RELDATE "2010/11/12"
#define DRV_MODULE_VERSION "1.62.00-3"
#define DRV_MODULE_RELDATE "2010/12/21"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
#define BNX2X_NEW_NAPI
#if defined(CONFIG_DCB)
#define BCM_DCB
#endif
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
#include "../cnic_if.h"
@ -48,6 +52,7 @@
#include "bnx2x_fw_defs.h"
#include "bnx2x_hsi.h"
#include "bnx2x_link.h"
#include "bnx2x_dcb.h"
#include "bnx2x_stats.h"
/* error/debug prints */
@ -199,10 +204,25 @@ void bnx2x_panic_dump(struct bnx2x *bp);
/* EQ completions */
#define HC_SP_INDEX_EQ_CONS 7
/* FCoE L2 connection completions */
#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6
#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4
/* iSCSI L2 */
#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
/* Special clients parameters */
/* SB indices */
/* FCoE L2 */
#define BNX2X_FCOE_L2_RX_INDEX \
(&bp->def_status_blk->sp_sb.\
index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
#define BNX2X_FCOE_L2_TX_INDEX \
(&bp->def_status_blk->sp_sb.\
index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
/**
* CIDs and CLIDs:
* CLIDs below is a CLID for func 0, then the CLID for other
@ -215,12 +235,19 @@ void bnx2x_panic_dump(struct bnx2x *bp);
#define BNX2X_ISCSI_ETH_CL_ID 17
#define BNX2X_ISCSI_ETH_CID 17
/* FCoE L2 */
#define BNX2X_FCOE_ETH_CL_ID 18
#define BNX2X_FCOE_ETH_CID 18
/** Additional rings budgeting */
#ifdef BCM_CNIC
#define CNIC_CONTEXT_USE 1
#define FCOE_CONTEXT_USE 1
#else
#define CNIC_CONTEXT_USE 0
#define FCOE_CONTEXT_USE 0
#endif /* BCM_CNIC */
#define NONE_ETH_CONTEXT_USE (FCOE_CONTEXT_USE)
#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@ -401,6 +428,17 @@ struct bnx2x_fastpath {
};
#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
#ifdef BCM_CNIC
/* FCoE L2 `fastpath' is right after the eth entries */
#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
#else
#define IS_FCOE_FP(fp) false
#define IS_FCOE_IDX(idx) false
#endif
/* MC hsi */
@ -669,8 +707,14 @@ struct bnx2x_port {
enum {
CAM_ETH_LINE = 0,
CAM_ISCSI_ETH_LINE,
CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
CAM_FIP_ETH_LINE,
CAM_FIP_MCAST_LINE,
CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE
};
/* number of MACs per function in NIG memory - used for SI mode */
#define NIG_LLH_FUNC_MEM_SIZE 16
/* number of entries in NIG_REG_LLHX_FUNC_MEM */
#define NIG_LLH_FUNC_MEM_MAX_OFFSET 8
#define BNX2X_VF_ID_INVALID 0xFF
@ -710,6 +754,14 @@ enum {
*/
#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
/*
* The number of FP-SB allocated by the driver == max number of regular L2
* queues + 1 for the CNIC which also consumes an FP-SB
*/
#define FP_SB_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE)
#define NUM_IGU_SB_REQUIRED(cid_cnt) \
(FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE)
union cdu_context {
struct eth_context eth;
char pad[1024];
@ -722,7 +774,8 @@ union cdu_context {
#ifdef BCM_CNIC
#define CNIC_ISCSI_CID_MAX 256
#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX)
#define CNIC_FCOE_CID_MAX 2048
#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
#endif
@ -770,6 +823,8 @@ struct bnx2x_slowpath {
u32 wb_comp;
u32 wb_data[4];
/* pfc configuration for DCBX ramrod */
struct flow_control_configuration pfc_config;
};
#define bnx2x_sp(bp, var) (&bp->slowpath->var)
@ -918,6 +973,10 @@ struct bnx2x {
#define DISABLE_MSI_FLAG 0x200
#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
#define MF_FUNC_DIS 0x1000
#define FCOE_MACS_SET 0x2000
#define NO_FCOE_FLAG 0x4000
#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
int pf_num; /* absolute PF number */
int pfid; /* per-path PF number */
@ -967,6 +1026,8 @@ struct bnx2x {
u16 mf_ov;
u8 mf_mode;
#define IS_MF(bp) (bp->mf_mode != 0)
#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
u8 wol;
@ -1010,6 +1071,7 @@ struct bnx2x {
#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
#define BNX2X_ACCEPT_BROADCAST 0x0010
#define BNX2X_ACCEPT_UNMATCHED_UCAST 0x0020
#define BNX2X_PROMISCUOUS_MODE 0x10000
u32 rx_mode;
@ -1062,7 +1124,8 @@ struct bnx2x {
u16 cnic_kwq_pending;
u16 cnic_spq_pending;
struct mutex cnic_mutex;
u8 iscsi_mac[6];
u8 iscsi_mac[ETH_ALEN];
u8 fip_mac[ETH_ALEN];
#endif
int dmae_ready;
@ -1122,6 +1185,31 @@ struct bnx2x {
char fw_ver[32];
const struct firmware *firmware;
/* LLDP params */
struct bnx2x_config_lldp_params lldp_config_params;
/* DCB support on/off */
u16 dcb_state;
#define BNX2X_DCB_STATE_OFF 0
#define BNX2X_DCB_STATE_ON 1
/* DCBX engine mode */
int dcbx_enabled;
#define BNX2X_DCBX_ENABLED_OFF 0
#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1
#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2
#define BNX2X_DCBX_ENABLED_INVALID (-1)
bool dcbx_mode_uset;
struct bnx2x_config_dcbx_params dcbx_config_params;
struct bnx2x_dcbx_port_params dcbx_port_params;
int dcb_version;
/* DCBX Negotation results */
struct dcbx_features dcbx_local_feat;
u32 dcbx_error;
};
/**
@ -1152,10 +1240,17 @@ struct bnx2x {
#define RSS_IPV6_TCP_CAP 0x0008
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE)
/* ethtool statistics are displayed for all regular ethernet queues and the
* fcoe L2 queue if not disabled
*/
#define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \
(BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE))
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE)
#define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1)
#define RSS_IPV4_CAP_MASK \
TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
@ -1248,6 +1343,7 @@ struct bnx2x_client_ramrod_params {
u16 cl_id;
u32 cid;
u8 poll;
#define CLIENT_IS_FCOE 0x01
#define CLIENT_IS_LEADING_RSS 0x02
u8 flags;
};
@ -1280,11 +1376,54 @@ struct bnx2x_func_init_params {
u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
};
#define for_each_queue(bp, var) \
for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
#define for_each_nondefault_queue(bp, var) \
for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
#define for_each_eth_queue(bp, var) \
for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
#define for_each_nondefault_eth_queue(bp, var) \
for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
#define for_each_napi_queue(bp, var) \
for (var = 0; \
var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \
if (skip_queue(bp, var)) \
continue; \
else
#define for_each_queue(bp, var) \
for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
if (skip_queue(bp, var)) \
continue; \
else
#define for_each_rx_queue(bp, var) \
for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
if (skip_rx_queue(bp, var)) \
continue; \
else
#define for_each_tx_queue(bp, var) \
for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
if (skip_tx_queue(bp, var)) \
continue; \
else
#define for_each_nondefault_queue(bp, var) \
for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) \
if (skip_queue(bp, var)) \
continue; \
else
/* skip rx queue
* if FCOE l2 support is diabled and this is the fcoe L2 queue
*/
#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
/* skip tx queue
* if FCOE l2 support is diabled and this is the fcoe L2 queue
*/
#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02
@ -1329,7 +1468,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_ILT_ZALLOC(x, y, size) \
do { \
x = pci_alloc_consistent(bp->pdev, size, y); \
x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x) \
memset(x, 0, size); \
} while (0)
@ -1337,7 +1476,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_ILT_FREE(x, y, size) \
do { \
if (x) { \
pci_free_consistent(bp->pdev, size, x, y); \
dma_free_coherent(&bp->pdev->dev, size, x, y); \
x = NULL; \
y = 0; \
} \
@ -1608,10 +1747,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
(T_ETH_MAC_COMMAND_INVALIDATE))
#define CAM_INVALIDATE(x) \
(x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
/* Number of u32 elements in MC hash array */
#define MC_HASH_SIZE 8
#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \

View file

@ -698,6 +698,29 @@ void bnx2x_release_phy_lock(struct bnx2x *bp)
mutex_unlock(&bp->port.phy_mutex);
}
/* calculates MF speed according to current linespeed and MF configuration */
u16 bnx2x_get_mf_speed(struct bnx2x *bp)
{
u16 line_speed = bp->link_vars.line_speed;
if (IS_MF(bp)) {
u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT;
/* Calculate the current MAX line speed limit for the DCC
* capable devices
*/
if (IS_MF_SD(bp)) {
u16 vn_max_rate = maxCfg * 100;
if (vn_max_rate < line_speed)
line_speed = vn_max_rate;
} else /* IS_MF_SI(bp)) */
line_speed = (line_speed * maxCfg) / 100;
}
return line_speed;
}
void bnx2x_link_report(struct bnx2x *bp)
{
if (bp->flags & MF_FUNC_DIS) {
@ -713,17 +736,8 @@ void bnx2x_link_report(struct bnx2x *bp)
netif_carrier_on(bp->dev);
netdev_info(bp->dev, "NIC Link is Up, ");
line_speed = bp->link_vars.line_speed;
if (IS_MF(bp)) {
u16 vn_max_rate;
line_speed = bnx2x_get_mf_speed(bp);
vn_max_rate =
((bp->mf_config[BP_VN(bp)] &
FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
if (vn_max_rate < line_speed)
line_speed = vn_max_rate;
}
pr_cont("%d Mbps ", line_speed);
if (bp->link_vars.duplex == DUPLEX_FULL)
@ -813,7 +827,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
DP(NETIF_MSG_IFUP,
"mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
for_each_queue(bp, j) {
for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
if (!fp->disable_tpa) {
@ -866,7 +880,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
}
}
for_each_queue(bp, j) {
for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
fp->rx_bd_cons = 0;
@ -897,7 +911,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
{
int i;
for_each_queue(bp, i) {
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
u16 bd_cons = fp->tx_bd_cons;
@ -915,7 +929,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
{
int i, j;
for_each_queue(bp, j) {
for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
for (i = 0; i < NUM_RX_BD; i++) {
@ -956,7 +970,7 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
#ifdef BCM_CNIC
offset++;
#endif
for_each_queue(bp, i) {
for_each_eth_queue(bp, i) {
DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
"state %x\n", i, bp->msix_table[i + offset].vector,
bnx2x_fp(bp, i, state));
@ -990,14 +1004,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
msix_vec++;
#endif
for_each_queue(bp, i) {
for_each_eth_queue(bp, i) {
bp->msix_table[msix_vec].entry = msix_vec;
DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
"(fastpath #%u)\n", msix_vec, msix_vec, i);
msix_vec++;
}
req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
@ -1053,7 +1067,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
#ifdef BCM_CNIC
offset++;
#endif
for_each_queue(bp, i) {
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
bp->dev->name, i);
@ -1070,7 +1084,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
fp->state = BNX2X_FP_STATE_IRQ;
}
i = BNX2X_NUM_QUEUES(bp);
i = BNX2X_NUM_ETH_QUEUES(bp);
offset = 1 + CNIC_CONTEXT_USE;
netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
" ... fp[%d] %d\n",
@ -1117,7 +1131,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
{
int i;
for_each_queue(bp, i)
for_each_napi_queue(bp, i)
napi_enable(&bnx2x_fp(bp, i, napi));
}
@ -1125,7 +1139,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp)
{
int i;
for_each_queue(bp, i)
for_each_napi_queue(bp, i)
napi_disable(&bnx2x_fp(bp, i, napi));
}
@ -1153,6 +1167,35 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
netif_tx_disable(bp->dev);
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
{
#ifdef BCM_CNIC
struct bnx2x *bp = netdev_priv(dev);
if (NO_FCOE(bp))
return skb_tx_hash(dev, skb);
else {
struct ethhdr *hdr = (struct ethhdr *)skb->data;
u16 ether_type = ntohs(hdr->h_proto);
/* Skip VLAN tag if present */
if (ether_type == ETH_P_8021Q) {
struct vlan_ethhdr *vhdr =
(struct vlan_ethhdr *)skb->data;
ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
}
/* If ethertype is FCoE or FIP - use FCoE ring */
if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
return bnx2x_fcoe(bp, index);
}
#endif
/* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
*/
return __skb_tx_hash(dev, skb,
dev->real_num_tx_queues - FCOE_CONTEXT_USE);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
{
switch (bp->multi_mode) {
@ -1167,8 +1210,23 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
bp->num_queues = 1;
break;
}
/* Add special queues */
bp->num_queues += NONE_ETH_CONTEXT_USE;
}
#ifdef BCM_CNIC
static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
{
if (!NO_FCOE(bp)) {
if (!IS_MF_SD(bp))
bnx2x_set_fip_eth_mac_addr(bp, 1);
bnx2x_set_all_enode_macs(bp, 1);
bp->flags |= FCOE_MACS_SET;
}
}
#endif
static void bnx2x_release_firmware(struct bnx2x *bp)
{
kfree(bp->init_ops_offsets);
@ -1177,6 +1235,20 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
release_firmware(bp->firmware);
}
static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
{
int rc, num = bp->num_queues;
#ifdef BCM_CNIC
if (NO_FCOE(bp))
num -= FCOE_CONTEXT_USE;
#endif
netif_set_real_num_tx_queues(bp->dev, num);
rc = netif_set_real_num_rx_queues(bp->dev, num);
return rc;
}
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
@ -1203,10 +1275,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bnx2x_alloc_mem(bp))
return -ENOMEM;
netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
rc = bnx2x_set_real_num_queues(bp);
if (rc) {
BNX2X_ERR("Unable to update real_num_rx_queues\n");
BNX2X_ERR("Unable to set real_num_queues\n");
goto load_error0;
}
@ -1214,6 +1285,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_fp(bp, i, disable_tpa) =
((bp->flags & TPA_ENABLE_FLAG) == 0);
#ifdef BCM_CNIC
/* We don't want TPA on FCoE L2 ring */
bnx2x_fcoe(bp, disable_tpa) = 1;
#endif
bnx2x_napi_enable(bp);
/* Send LOAD_REQUEST command to MCP
@ -1296,6 +1371,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
bnx2x_dcbx_init(bp);
bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
rc = bnx2x_func_start(bp);
@ -1344,6 +1421,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Now when Clients are configured we are ready to work */
bp->state = BNX2X_STATE_OPEN;
#ifdef BCM_CNIC
bnx2x_set_fcoe_eth_macs(bp);
#endif
bnx2x_set_eth_mac(bp, 1);
if (bp->port.pmf)
@ -1402,7 +1483,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
for_each_queue(bp, i)
for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
/* Release IRQs */
@ -1473,7 +1554,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
for_each_queue(bp, i)
for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
bnx2x_free_mem(bp);
@ -1577,6 +1658,17 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
/* Fall out from the NAPI loop if needed */
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
#ifdef BCM_CNIC
/* No need to update SB for FCoE L2 ring as long as
* it's connected to the default SB and the SB
* has been updated when NAPI was scheduled.
*/
if (IS_FCOE_FP(fp)) {
napi_complete(napi);
break;
}
#endif
bnx2x_update_fpsb_idx(fp);
/* bnx2x_has_rx_work() reads the status block,
* thus we need to ensure that status block indices
@ -1692,11 +1784,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
}
}
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
if (skb_is_gso_v6(skb))
rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
else if (skb_is_gso(skb))
rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
return rc;
}
@ -2242,7 +2333,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
bp->fp = fp;
/* msix table */
tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
GFP_KERNEL);
if (!tbl)
goto alloc_err;

View file

@ -72,6 +72,16 @@ void bnx2x__link_status_update(struct bnx2x *bp);
*/
void bnx2x_link_report(struct bnx2x *bp);
/**
* calculates MF speed according to current linespeed and MF
* configuration
*
* @param bp
*
* @return u16
*/
u16 bnx2x_get_mf_speed(struct bnx2x *bp);
/**
* MSI-X slowpath interrupt handler
*
@ -232,6 +242,30 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
*/
void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
#ifdef BCM_CNIC
/**
* Set/Clear FIP MAC(s) at the next enties in the CAM after the ETH
* MAC(s). This function will wait until the ramdord completion
* returns.
*
* @param bp driver handle
* @param set set or clear the CAM entry
*
* @return 0 if cussess, -ENODEV if ramrod doesn't return.
*/
int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
/**
* Set/Clear ALL_ENODE mcast MAC.
*
* @param bp
* @param set
*
* @return int
*/
int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
#endif
/**
* Set MAC filtering configurations.
*
@ -289,6 +323,13 @@ int bnx2x_func_start(struct bnx2x *bp);
*/
void bnx2x_ilt_set_info(struct bnx2x *bp);
/**
* Inintialize dcbx protocol
*
* @param bp
*/
void bnx2x_dcbx_init(struct bnx2x *bp);
/**
* Set power state to the requested value. Currently only D0 and
* D3hot are supported.
@ -309,6 +350,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
/* hard_xmit callback */
netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
int bnx2x_change_mac_addr(struct net_device *dev, void *p);
/* NAPI poll Rx part */
@ -685,7 +729,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
int i;
/* Add NAPI objects */
for_each_queue(bp, i)
for_each_napi_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@ -694,7 +738,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
for_each_queue(bp, i)
for_each_napi_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
@ -860,7 +904,7 @@ static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
{
int i, j;
for_each_queue(bp, j) {
for_each_tx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
for (i = 1; i <= NUM_TX_RINGS; i++) {
@ -939,7 +983,30 @@ static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
}
}
#ifdef BCM_CNIC
static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
{
bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID +
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
bnx2x_fcoe(bp, bp) = bp;
bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
bnx2x_fcoe(bp, index) = FCOE_IDX;
bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX;
/* qZone id equals to FW (per path) client id */
bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) +
BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
ETH_MAX_RX_CLIENTS_E1H);
/* init shortcut */
bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ?
USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) :
USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id);
}
#endif
static inline void __storm_memset_struct(struct bnx2x *bp,
u32 addr, size_t size, u32 *data)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,196 @@
/* bnx2x_dcb.h: Broadcom Everest network driver.
*
* Copyright 2009-2010 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available
* at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*
* Maintained by: Eilon Greenstein <eilong@broadcom.com>
* Written by: Dmitry Kravkov
*
*/
#ifndef BNX2X_DCB_H
#define BNX2X_DCB_H
#include "bnx2x_hsi.h"
#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */
struct bnx2x_dcbx_app_params {
u32 enabled;
u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
};
#define E2_NUM_OF_COS 2
#define BNX2X_DCBX_COS_NOT_STRICT 0
#define BNX2X_DCBX_COS_LOW_STRICT 1
#define BNX2X_DCBX_COS_HIGH_STRICT 2
struct bnx2x_dcbx_cos_params {
u32 bw_tbl;
u32 pri_bitmask;
u8 strict;
u8 pauseable;
};
struct bnx2x_dcbx_pg_params {
u32 enabled;
u8 num_of_cos; /* valid COS entries */
struct bnx2x_dcbx_cos_params cos_params[E2_NUM_OF_COS];
};
struct bnx2x_dcbx_pfc_params {
u32 enabled;
u32 priority_non_pauseable_mask;
};
struct bnx2x_dcbx_port_params {
struct bnx2x_dcbx_pfc_params pfc;
struct bnx2x_dcbx_pg_params ets;
struct bnx2x_dcbx_app_params app;
};
#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF)
#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0
#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
/*******************************************************************************
* LLDP protocol configuration parameters.
******************************************************************************/
struct bnx2x_config_lldp_params {
u32 overwrite_settings;
u32 msg_tx_hold;
u32 msg_fast_tx;
u32 tx_credit_max;
u32 msg_tx_interval;
u32 tx_fast;
};
struct bnx2x_admin_priority_app_table {
u32 valid;
u32 priority;
#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF)
u32 traffic_type;
#define TRAFFIC_TYPE_ETH 0
#define TRAFFIC_TYPE_PORT 1
u32 app_id;
};
/*******************************************************************************
* DCBX protocol configuration parameters.
******************************************************************************/
struct bnx2x_config_dcbx_params {
u32 overwrite_settings;
u32 admin_dcbx_version;
u32 admin_ets_enable;
u32 admin_pfc_enable;
u32 admin_tc_supported_tx_enable;
u32 admin_ets_configuration_tx_enable;
u32 admin_ets_recommendation_tx_enable;
u32 admin_pfc_tx_enable;
u32 admin_application_priority_tx_enable;
u32 admin_ets_willing;
u32 admin_ets_reco_valid;
u32 admin_pfc_willing;
u32 admin_app_priority_willing;
u32 admin_configuration_bw_precentage[8];
u32 admin_configuration_ets_pg[8];
u32 admin_recommendation_bw_precentage[8];
u32 admin_recommendation_ets_pg[8];
u32 admin_pfc_bitmap;
struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
u32 admin_default_priority;
};
#define GET_FLAGS(flags, bits) ((flags) & (bits))
#define SET_FLAGS(flags, bits) ((flags) |= (bits))
#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits))
enum {
DCBX_READ_LOCAL_MIB,
DCBX_READ_REMOTE_MIB
};
#define ETH_TYPE_FCOE (0x8906)
#define TCP_PORT_ISCSI (0xCBC)
#define PFC_VALUE_FRAME_SIZE (512)
#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \
((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed))
#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130
#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170
struct cos_entry_help_data {
u32 pri_join_mask;
u32 cos_bw;
u8 strict;
bool pausable;
};
struct cos_help_data {
struct cos_entry_help_data data[E2_NUM_OF_COS];
u8 num_of_cos;
};
#define DCBX_ILLEGAL_PG (0xFF)
#define DCBX_PFC_PRI_MASK (0xFF)
#define DCBX_STRICT_PRIORITY (15)
#define DCBX_INVALID_COS_BW (0xFFFFFFFF)
#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \
((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
#define DCBX_PFC_PRI_PAUSE_MASK(bp) \
((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \
((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \
(DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \
(pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \
(!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
struct pg_entry_help_data {
u8 num_of_dif_pri;
u8 pg;
u32 pg_priority;
};
struct pg_help_data {
struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
u8 num_of_pg;
};
/* forward DCB/PFC related declarations */
struct bnx2x;
void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp);
void bnx2x_dcbx_update(struct work_struct *work);
void bnx2x_dcbx_init_params(struct bnx2x *bp);
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
enum {
BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
BNX2X_DCBX_STATE_TX_PAUSED = 0x2,
BNX2X_DCBX_STATE_TX_RELEASED = 0x4
};
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
/* DCB netlink */
#ifdef BCM_DCB
extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
#endif /* BCM_DCB */
#endif /* BNX2X_DCB_H */

View file

@ -25,6 +25,143 @@
#include "bnx2x_cmn.h"
#include "bnx2x_dump.h"
/* Note: in the format strings below %s is replaced by the queue-name which is
* either its index or 'fcoe' for the fcoe queue. Make sure the format string
* length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
*/
#define MAX_QUEUE_NAME_LEN 4
static const struct {
long offset;
int size;
char string[ETH_GSTRING_LEN];
} bnx2x_q_stats_arr[] = {
/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
{ Q_STATS_OFFSET32(error_bytes_received_hi),
8, "[%s]: rx_error_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
8, "[%s]: rx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
8, "[%s]: rx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
8, "[%s]: rx_bcast_packets" },
{ Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
{ Q_STATS_OFFSET32(rx_err_discard_pkt),
4, "[%s]: rx_phy_ip_err_discards"},
{ Q_STATS_OFFSET32(rx_skb_alloc_failed),
4, "[%s]: rx_skb_alloc_discard" },
{ Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8, "[%s]: tx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8, "[%s]: tx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8, "[%s]: tx_bcast_packets" }
};
#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
static const struct {
long offset;
int size;
u32 flags;
#define STATS_FLAGS_PORT 1
#define STATS_FLAGS_FUNC 2
#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
char string[ETH_GSTRING_LEN];
} bnx2x_stats_arr[] = {
/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
8, STATS_FLAGS_BOTH, "rx_bytes" },
{ STATS_OFFSET32(error_bytes_received_hi),
8, STATS_FLAGS_BOTH, "rx_error_bytes" },
{ STATS_OFFSET32(total_unicast_packets_received_hi),
8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
{ STATS_OFFSET32(total_multicast_packets_received_hi),
8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
{ STATS_OFFSET32(total_broadcast_packets_received_hi),
8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
{ STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8, STATS_FLAGS_PORT, "rx_crc_errors" },
{ STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8, STATS_FLAGS_PORT, "rx_align_errors" },
{ STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8, STATS_FLAGS_PORT, "rx_undersize_packets" },
{ STATS_OFFSET32(etherstatsoverrsizepkts_hi),
8, STATS_FLAGS_PORT, "rx_oversize_packets" },
/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8, STATS_FLAGS_PORT, "rx_fragments" },
{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8, STATS_FLAGS_PORT, "rx_jabbers" },
{ STATS_OFFSET32(no_buff_discard_hi),
8, STATS_FLAGS_BOTH, "rx_discards" },
{ STATS_OFFSET32(mac_filter_discard),
4, STATS_FLAGS_PORT, "rx_filtered_packets" },
{ STATS_OFFSET32(xxoverflow_discard),
4, STATS_FLAGS_PORT, "rx_fw_discards" },
{ STATS_OFFSET32(brb_drop_hi),
8, STATS_FLAGS_PORT, "rx_brb_discard" },
{ STATS_OFFSET32(brb_truncate_hi),
8, STATS_FLAGS_PORT, "rx_brb_truncate" },
{ STATS_OFFSET32(pause_frames_received_hi),
8, STATS_FLAGS_PORT, "rx_pause_frames" },
{ STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
{ STATS_OFFSET32(nig_timer_max),
4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
{ STATS_OFFSET32(rx_skb_alloc_failed),
4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
{ STATS_OFFSET32(hw_csum_err),
4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
{ STATS_OFFSET32(total_bytes_transmitted_hi),
8, STATS_FLAGS_BOTH, "tx_bytes" },
{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8, STATS_FLAGS_PORT, "tx_error_bytes" },
{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
{ STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
{ STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8, STATS_FLAGS_PORT, "tx_mac_errors" },
{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8, STATS_FLAGS_PORT, "tx_carrier_errors" },
/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_single_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_multi_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8, STATS_FLAGS_PORT, "tx_deferred" },
{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8, STATS_FLAGS_PORT, "tx_excess_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8, STATS_FLAGS_PORT, "tx_late_collisions" },
{ STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8, STATS_FLAGS_PORT, "tx_total_collisions" },
{ STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
{ STATS_OFFSET32(pause_frames_sent_hi),
8, STATS_FLAGS_PORT, "tx_pause_frames" }
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
@ -45,14 +182,9 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->speed = bp->link_params.req_line_speed[cfg_idx];
cmd->duplex = bp->link_params.req_duplex[cfg_idx];
}
if (IS_MF(bp)) {
u16 vn_max_rate = ((bp->mf_config[BP_VN(bp)] &
FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT) *
100;
if (vn_max_rate < cmd->speed)
cmd->speed = vn_max_rate;
}
if (IS_MF(bp))
cmd->speed = bnx2x_get_mf_speed(bp);
if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
cmd->port = PORT_TP;
@ -87,18 +219,57 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
u32 speed;
if (IS_MF(bp))
if (IS_MF_SD(bp))
return 0;
DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
" supported 0x%x advertising 0x%x speed %d speed_hi %d\n"
" duplex %d port %d phy_address %d transceiver %d\n"
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
cmd->speed_hi,
cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
speed = cmd->speed;
speed |= (cmd->speed_hi << 16);
if (IS_MF_SI(bp)) {
u32 param = 0;
u32 line_speed = bp->link_vars.line_speed;
/* use 10G if no link detected */
if (!line_speed)
line_speed = 10000;
if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
BNX2X_DEV_INFO("To set speed BC %X or higher "
"is required, please upgrade BC\n",
REQ_BC_VER_4_SET_MF_BW);
return -EINVAL;
}
if (line_speed < speed) {
BNX2X_DEV_INFO("New speed should be less or equal "
"to actual line speed\n");
return -EINVAL;
}
/* load old values */
param = bp->mf_config[BP_VN(bp)];
/* leave only MIN value */
param &= FUNC_MF_CFG_MIN_BW_MASK;
/* set new MAX value */
param |= (((speed * 100) / line_speed)
<< FUNC_MF_CFG_MAX_BW_SHIFT)
& FUNC_MF_CFG_MAX_BW_MASK;
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
return 0;
}
cfg_idx = bnx2x_get_link_cfg_idx(bp);
old_multi_phy_config = bp->link_params.multi_phy_config;
switch (cmd->port) {
@ -168,8 +339,6 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} else { /* forced speed */
/* advertise the requested speed and duplex if supported */
u32 speed = cmd->speed;
speed |= (cmd->speed_hi << 16);
switch (speed) {
case SPEED_10:
if (cmd->duplex == DUPLEX_FULL) {
@ -1286,7 +1455,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
save_val = REG_RD(bp, offset);
REG_WR(bp, offset, (wr_val & mask));
REG_WR(bp, offset, wr_val & mask);
val = REG_RD(bp, offset);
@ -1499,8 +1668,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
* updates that have been performed while interrupts were
* disabled.
*/
if (bp->common.int_block == INT_BLOCK_IGU)
if (bp->common.int_block == INT_BLOCK_IGU) {
/* Disable local BHes to prevent a dead-lock situation between
* sch_direct_xmit() and bnx2x_run_loopback() (calling
* bnx2x_tx_int()), as both are taking netif_tx_lock().
*/
local_bh_disable();
bnx2x_tx_int(fp_tx);
local_bh_enable();
}
rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
if (rx_idx != rx_start_idx + num_pkts)
@ -1650,7 +1826,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
config->hdr.client_id = bp->fp->cl_id;
config->hdr.reserved1 = 0;
bp->set_mac_pending++;
bp->set_mac_pending = 1;
smp_wmb();
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mac_config)),
@ -1748,134 +1924,6 @@ static void bnx2x_self_test(struct net_device *dev,
#endif
}
static const struct {
long offset;
int size;
u8 string[ETH_GSTRING_LEN];
} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
{ Q_STATS_OFFSET32(error_bytes_received_hi),
8, "[%d]: rx_error_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
8, "[%d]: rx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
8, "[%d]: rx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
8, "[%d]: rx_bcast_packets" },
{ Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
{ Q_STATS_OFFSET32(rx_err_discard_pkt),
4, "[%d]: rx_phy_ip_err_discards"},
{ Q_STATS_OFFSET32(rx_skb_alloc_failed),
4, "[%d]: rx_skb_alloc_discard" },
{ Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8, "[%d]: tx_ucast_packets" },
{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8, "[%d]: tx_mcast_packets" },
{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8, "[%d]: tx_bcast_packets" }
};
static const struct {
long offset;
int size;
u32 flags;
#define STATS_FLAGS_PORT 1
#define STATS_FLAGS_FUNC 2
#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
u8 string[ETH_GSTRING_LEN];
} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
8, STATS_FLAGS_BOTH, "rx_bytes" },
{ STATS_OFFSET32(error_bytes_received_hi),
8, STATS_FLAGS_BOTH, "rx_error_bytes" },
{ STATS_OFFSET32(total_unicast_packets_received_hi),
8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
{ STATS_OFFSET32(total_multicast_packets_received_hi),
8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
{ STATS_OFFSET32(total_broadcast_packets_received_hi),
8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
{ STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
8, STATS_FLAGS_PORT, "rx_crc_errors" },
{ STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
8, STATS_FLAGS_PORT, "rx_align_errors" },
{ STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
8, STATS_FLAGS_PORT, "rx_undersize_packets" },
{ STATS_OFFSET32(etherstatsoverrsizepkts_hi),
8, STATS_FLAGS_PORT, "rx_oversize_packets" },
/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
8, STATS_FLAGS_PORT, "rx_fragments" },
{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
8, STATS_FLAGS_PORT, "rx_jabbers" },
{ STATS_OFFSET32(no_buff_discard_hi),
8, STATS_FLAGS_BOTH, "rx_discards" },
{ STATS_OFFSET32(mac_filter_discard),
4, STATS_FLAGS_PORT, "rx_filtered_packets" },
{ STATS_OFFSET32(xxoverflow_discard),
4, STATS_FLAGS_PORT, "rx_fw_discards" },
{ STATS_OFFSET32(brb_drop_hi),
8, STATS_FLAGS_PORT, "rx_brb_discard" },
{ STATS_OFFSET32(brb_truncate_hi),
8, STATS_FLAGS_PORT, "rx_brb_truncate" },
{ STATS_OFFSET32(pause_frames_received_hi),
8, STATS_FLAGS_PORT, "rx_pause_frames" },
{ STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
{ STATS_OFFSET32(nig_timer_max),
4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
{ STATS_OFFSET32(rx_skb_alloc_failed),
4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
{ STATS_OFFSET32(hw_csum_err),
4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
{ STATS_OFFSET32(total_bytes_transmitted_hi),
8, STATS_FLAGS_BOTH, "tx_bytes" },
{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8, STATS_FLAGS_PORT, "tx_error_bytes" },
{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
{ STATS_OFFSET32(total_multicast_packets_transmitted_hi),
8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
{ STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
8, STATS_FLAGS_PORT, "tx_mac_errors" },
{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
8, STATS_FLAGS_PORT, "tx_carrier_errors" },
/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_single_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
8, STATS_FLAGS_PORT, "tx_multi_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
8, STATS_FLAGS_PORT, "tx_deferred" },
{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
8, STATS_FLAGS_PORT, "tx_excess_collisions" },
{ STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
8, STATS_FLAGS_PORT, "tx_late_collisions" },
{ STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
8, STATS_FLAGS_PORT, "tx_total_collisions" },
{ STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
{ STATS_OFFSET32(pause_frames_sent_hi),
8, STATS_FLAGS_PORT, "tx_pause_frames" }
};
#define IS_PORT_STAT(i) \
((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
@ -1890,7 +1938,8 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
num_stats = BNX2X_NUM_STAT_QUEUES(bp) *
BNX2X_NUM_Q_STATS;
if (!IS_MF_MODE_STAT(bp))
num_stats += BNX2X_NUM_STATS;
} else {
@ -1916,15 +1965,25 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
int i, j, k;
char queue_name[MAX_QUEUE_NAME_LEN+1];
switch (stringset) {
case ETH_SS_STATS:
if (is_multi(bp)) {
k = 0;
for_each_queue(bp, i) {
for_each_napi_queue(bp, i) {
memset(queue_name, 0, sizeof(queue_name));
if (IS_FCOE_IDX(i))
sprintf(queue_name, "fcoe");
else
sprintf(queue_name, "%d", i);
for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
sprintf(buf + (k + j)*ETH_GSTRING_LEN,
bnx2x_q_stats_arr[j].string, i);
snprintf(buf + (k + j)*ETH_GSTRING_LEN,
ETH_GSTRING_LEN,
bnx2x_q_stats_arr[j].string,
queue_name);
k += BNX2X_NUM_Q_STATS;
}
if (IS_MF_MODE_STAT(bp))
@ -1958,7 +2017,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
if (is_multi(bp)) {
k = 0;
for_each_queue(bp, i) {
for_each_napi_queue(bp, i) {
hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
if (bnx2x_q_stats_arr[j].size == 0) {

View file

@ -434,7 +434,12 @@ struct shared_feat_cfg { /* NVRAM Offset */
#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000
#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002
#define SHARED_FEATURE_MF_MODE_DISABLED 0x00000100
#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
};
@ -679,7 +684,7 @@ struct shm_dev_info { /* size */
#define E1VN_MAX 1
#define E1HVN_MAX 4
#define E2_VF_MAX 64
/* This value (in milliseconds) determines the frequency of the driver
* issuing the PULSE message code. The firmware monitors this periodic
* pulse to determine when to switch to an OS-absent mode. */
@ -815,6 +820,11 @@ struct drv_func_mb {
#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
@ -888,6 +898,7 @@ struct drv_func_mb {
u32 drv_status;
#define DRV_STATUS_PMF 0x00000001
#define DRV_STATUS_SET_MF_BW 0x00000004
#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
@ -896,6 +907,8 @@ struct drv_func_mb {
#define DRV_STATUS_DCC_RESERVED1 0x00000800
#define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
#define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
#define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
u32 virt_mac_upper;
#define VIRT_MAC_SIGN_MASK 0xffff0000
@ -988,12 +1001,43 @@ struct func_mf_cfg {
};
/* This structure is not applicable and should not be accessed on 57711 */
struct func_ext_cfg {
u32 func_cfg;
#define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF
#define MACP_FUNC_CFG_FLAGS_SHIFT 0
#define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
#define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
u32 iscsi_mac_addr_upper;
u32 iscsi_mac_addr_lower;
u32 fcoe_mac_addr_upper;
u32 fcoe_mac_addr_lower;
u32 fcoe_wwn_port_name_upper;
u32 fcoe_wwn_port_name_lower;
u32 fcoe_wwn_node_name_upper;
u32 fcoe_wwn_node_name_lower;
u32 preserve_data;
#define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
#define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
};
struct mf_cfg {
struct shared_mf_cfg shared_mf_config;
struct port_mf_cfg port_mf_config[PORT_MAX];
struct func_mf_cfg func_mf_config[E1H_FUNC_MAX];
struct func_ext_cfg func_ext_config[E1H_FUNC_MAX];
};
@ -1049,6 +1093,251 @@ struct fw_flr_mb {
struct fw_flr_ack ack;
};
/**** SUPPORT FOR SHMEM ARRRAYS ***
* The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
* define arrays with storage types smaller then unsigned dwords.
* The macros below add generic support for SHMEM arrays with numeric elements
* that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword
* array with individual bit-filed elements accessed using shifts and masks.
*
*/
/* eb is the bitwidth of a single element */
#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1)
#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb)))
/* the bit-position macro allows the used to flip the order of the arrays
* elements on a per byte or word boundary.
*
* example: an array with 8 entries each 4 bit wide. This array will fit into
* a single dword. The diagrmas below show the array order of the nibbles.
*
* SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
*
* | | | |
* 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* | | | |
*
* SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
*
* | | | |
* 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 |
* | | | |
*
* SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
*
* | | | |
* 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 |
* | | | |
*/
#define SHMEM_ARRAY_BITPOS(i, eb, fb) \
((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
(((i)%((fb)/(eb))) * (eb)))
#define SHMEM_ARRAY_GET(a, i, eb, fb) \
((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
SHMEM_ARRAY_MASK(eb))
#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
do { \
a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
SHMEM_ARRAY_BITPOS(i, eb, fb)); \
a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
SHMEM_ARRAY_BITPOS(i, eb, fb)); \
} while (0)
/****START OF DCBX STRUCTURES DECLARATIONS****/
#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8
#define DCBX_PRI_PG_BITWIDTH 4
#define DCBX_PRI_PG_FBITS 8
#define DCBX_PRI_PG_GET(a, i) \
SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS)
#define DCBX_PRI_PG_SET(a, i, val) \
SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val)
#define DCBX_MAX_NUM_PG_BW_ENTRIES 8
#define DCBX_BW_PG_BITWIDTH 8
#define DCBX_PG_BW_GET(a, i) \
SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH)
#define DCBX_PG_BW_SET(a, i, val) \
SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val)
#define DCBX_STRICT_PRI_PG 15
#define DCBX_MAX_APP_PROTOCOL 16
#define FCOE_APP_IDX 0
#define ISCSI_APP_IDX 1
#define PREDEFINED_APP_IDX_MAX 2
struct dcbx_ets_feature {
u32 enabled;
u32 pg_bw_tbl[2];
u32 pri_pg_tbl[1];
};
struct dcbx_pfc_feature {
#ifdef __BIG_ENDIAN
u8 pri_en_bitmap;
#define DCBX_PFC_PRI_0 0x01
#define DCBX_PFC_PRI_1 0x02
#define DCBX_PFC_PRI_2 0x04
#define DCBX_PFC_PRI_3 0x08
#define DCBX_PFC_PRI_4 0x10
#define DCBX_PFC_PRI_5 0x20
#define DCBX_PFC_PRI_6 0x40
#define DCBX_PFC_PRI_7 0x80
u8 pfc_caps;
u8 reserved;
u8 enabled;
#elif defined(__LITTLE_ENDIAN)
u8 enabled;
u8 reserved;
u8 pfc_caps;
u8 pri_en_bitmap;
#define DCBX_PFC_PRI_0 0x01
#define DCBX_PFC_PRI_1 0x02
#define DCBX_PFC_PRI_2 0x04
#define DCBX_PFC_PRI_3 0x08
#define DCBX_PFC_PRI_4 0x10
#define DCBX_PFC_PRI_5 0x20
#define DCBX_PFC_PRI_6 0x40
#define DCBX_PFC_PRI_7 0x80
#endif
};
struct dcbx_app_priority_entry {
#ifdef __BIG_ENDIAN
u16 app_id;
u8 pri_bitmap;
u8 appBitfield;
#define DCBX_APP_ENTRY_VALID 0x01
#define DCBX_APP_ENTRY_SF_MASK 0x30
#define DCBX_APP_ENTRY_SF_SHIFT 4
#define DCBX_APP_SF_ETH_TYPE 0x10
#define DCBX_APP_SF_PORT 0x20
#elif defined(__LITTLE_ENDIAN)
u8 appBitfield;
#define DCBX_APP_ENTRY_VALID 0x01
#define DCBX_APP_ENTRY_SF_MASK 0x30
#define DCBX_APP_ENTRY_SF_SHIFT 4
#define DCBX_APP_SF_ETH_TYPE 0x10
#define DCBX_APP_SF_PORT 0x20
u8 pri_bitmap;
u16 app_id;
#endif
};
struct dcbx_app_priority_feature {
#ifdef __BIG_ENDIAN
u8 reserved;
u8 default_pri;
u8 tc_supported;
u8 enabled;
#elif defined(__LITTLE_ENDIAN)
u8 enabled;
u8 tc_supported;
u8 default_pri;
u8 reserved;
#endif
struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
};
struct dcbx_features {
struct dcbx_ets_feature ets;
struct dcbx_pfc_feature pfc;
struct dcbx_app_priority_feature app;
};
struct lldp_params {
#ifdef __BIG_ENDIAN
u8 msg_fast_tx_interval;
u8 msg_tx_hold;
u8 msg_tx_interval;
u8 admin_status;
#define LLDP_TX_ONLY 0x01
#define LLDP_RX_ONLY 0x02
#define LLDP_TX_RX 0x03
#define LLDP_DISABLED 0x04
u8 reserved1;
u8 tx_fast;
u8 tx_crd_max;
u8 tx_crd;
#elif defined(__LITTLE_ENDIAN)
u8 admin_status;
#define LLDP_TX_ONLY 0x01
#define LLDP_RX_ONLY 0x02
#define LLDP_TX_RX 0x03
#define LLDP_DISABLED 0x04
u8 msg_tx_interval;
u8 msg_tx_hold;
u8 msg_fast_tx_interval;
u8 tx_crd;
u8 tx_crd_max;
u8 tx_fast;
u8 reserved1;
#endif
#define REM_CHASSIS_ID_STAT_LEN 4
#define REM_PORT_ID_STAT_LEN 4
u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
u32 peer_port_id[REM_PORT_ID_STAT_LEN];
};
struct lldp_dcbx_stat {
#define LOCAL_CHASSIS_ID_STAT_LEN 2
#define LOCAL_PORT_ID_STAT_LEN 2
u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
u32 local_port_id[LOCAL_PORT_ID_STAT_LEN];
u32 num_tx_dcbx_pkts;
u32 num_rx_dcbx_pkts;
};
struct lldp_admin_mib {
u32 ver_cfg_flags;
#define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001
#define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002
#define DCBX_APP_CONFIG_TX_ENABLED 0x00000004
#define DCBX_ETS_RECO_TX_ENABLED 0x00000008
#define DCBX_ETS_RECO_VALID 0x00000010
#define DCBX_ETS_WILLING 0x00000020
#define DCBX_PFC_WILLING 0x00000040
#define DCBX_APP_WILLING 0x00000080
#define DCBX_VERSION_CEE 0x00000100
#define DCBX_VERSION_IEEE 0x00000200
#define DCBX_DCBX_ENABLED 0x00000400
#define DCBX_CEE_VERSION_MASK 0x0000f000
#define DCBX_CEE_VERSION_SHIFT 12
#define DCBX_CEE_MAX_VERSION_MASK 0x000f0000
#define DCBX_CEE_MAX_VERSION_SHIFT 16
struct dcbx_features features;
};
struct lldp_remote_mib {
u32 prefix_seq_num;
u32 flags;
#define DCBX_ETS_TLV_RX 0x00000001
#define DCBX_PFC_TLV_RX 0x00000002
#define DCBX_APP_TLV_RX 0x00000004
#define DCBX_ETS_RX_ERROR 0x00000010
#define DCBX_PFC_RX_ERROR 0x00000020
#define DCBX_APP_RX_ERROR 0x00000040
#define DCBX_ETS_REM_WILLING 0x00000100
#define DCBX_PFC_REM_WILLING 0x00000200
#define DCBX_APP_REM_WILLING 0x00000400
#define DCBX_REMOTE_ETS_RECO_VALID 0x00001000
struct dcbx_features features;
u32 suffix_seq_num;
};
struct lldp_local_mib {
u32 prefix_seq_num;
u32 error;
#define DCBX_LOCAL_ETS_ERROR 0x00000001
#define DCBX_LOCAL_PFC_ERROR 0x00000002
#define DCBX_LOCAL_APP_ERROR 0x00000004
#define DCBX_LOCAL_PFC_MISMATCH 0x00000010
#define DCBX_LOCAL_APP_MISMATCH 0x00000020
struct dcbx_features features;
u32 suffix_seq_num;
};
/***END OF DCBX STRUCTURES DECLARATIONS***/
struct shmem2_region {
@ -1072,7 +1361,12 @@ struct shmem2_region {
#define SHMEM_MF_CFG_ADDR_NONE 0x00000000
struct fw_flr_mb flr_mb;
u32 reserved[3];
u32 dcbx_lldp_params_offset;
#define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000
u32 dcbx_neg_res_offset;
#define SHMEM_DCBX_NEG_RES_NONE 0x00000000
u32 dcbx_remote_mib_offset;
#define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000
/*
* The other shmemX_base_addr holds the other path's shmem address
* required for example in case of common phy init, or for path1 to know
@ -1081,6 +1375,10 @@ struct shmem2_region {
*/
u32 other_shmem_base_addr;
u32 other_shmem2_base_addr;
u32 reserved1[E2_VF_MAX / 32];
u32 reserved2[E2_FUNC_MAX][E2_VF_MAX / 32];
u32 dcbx_lldp_dcbx_stat_offset;
#define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000
};
@ -1534,8 +1832,8 @@ struct host_func_stats {
#define BCM_5710_FW_MAJOR_VERSION 6
#define BCM_5710_FW_MINOR_VERSION 0
#define BCM_5710_FW_REVISION_VERSION 34
#define BCM_5710_FW_MINOR_VERSION 2
#define BCM_5710_FW_REVISION_VERSION 5
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@ -2982,6 +3280,25 @@ struct fairness_vars_per_vn {
};
/*
* The data for flow control configuration
*/
struct flow_control_configuration {
struct priority_cos
traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
#if defined(__BIG_ENDIAN)
u16 reserved1;
u8 dcb_version;
u8 dcb_enabled;
#elif defined(__LITTLE_ENDIAN)
u8 dcb_enabled;
u8 dcb_version;
u16 reserved1;
#endif
u32 reserved2;
};
/*
* FW version stored in the Xstorm RAM
*/

View file

@ -164,7 +164,8 @@
#define EDC_MODE_PASSIVE_DAC 0x0055
#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
/**********************************************************/
/* INTERFACE */
/**********************************************************/
@ -205,6 +206,270 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
return val;
}
/******************************************************************/
/* ETS section */
/******************************************************************/
void bnx2x_ets_disabled(struct link_params *params)
{
/* ETS disabled configuration*/
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
/**
* mapping between entry priority to client number (0,1,2 -debug and
* management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
* 3bits client num.
* PRI4 | PRI3 | PRI2 | PRI1 | PRI0
* cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
/**
* Bitmap of 5bits length. Each bit specifies whether the entry behaves
* as strict. Bits 0,1,2 - debug and management entries, 3 -
* COS0 entry, 4 - COS1 entry.
* COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
* bit4 bit3 bit2 bit1 bit0
* MCP and debug are strict
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
/* defines which entries (clients) are subjected to WFQ arbitration */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
/**
* For strict priority entries defines the number of consecutive
* slots for the highest priority.
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
/**
* mapping between the CREDIT_WEIGHT registers and actual client
* numbers
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
/* ETS mode disable */
REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
/**
* If ETS mode is enabled (there is no strict priority) defines a WFQ
* weight for COS0/COS1.
*/
REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
/* Defines the number of consecutive slots for the strict priority */
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
}
void bnx2x_ets_bw_limit_common(const struct link_params *params)
{
/* ETS disabled configuration */
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
/**
* defines which entries (clients) are subjected to WFQ arbitration
* COS0 0x8
* COS1 0x10
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
/**
* mapping between the ARB_CREDIT_WEIGHT registers and actual
* client numbers (WEIGHT_0 does not actually have to represent
* client 0)
* PRI4 | PRI3 | PRI2 | PRI1 | PRI0
* cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
/* ETS mode enabled*/
REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
/* Defines the number of consecutive slots for the strict priority */
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
/**
* Bitmap of 5bits length. Each bit specifies whether the entry behaves
* as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
* entry, 4 - COS1 entry.
* COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
* bit4 bit3 bit2 bit1 bit0
* MCP and debug are strict
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
}
void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
const u32 cos1_bw)
{
/* ETS disabled configuration*/
struct bnx2x *bp = params->bp;
const u32 total_bw = cos0_bw + cos1_bw;
u32 cos0_credit_weight = 0;
u32 cos1_credit_weight = 0;
DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
if ((0 == total_bw) ||
(0 == cos0_bw) ||
(0 == cos1_bw)) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_bw_limit: Total BW can't be zero\n");
return;
}
cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
total_bw;
cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
total_bw;
bnx2x_ets_bw_limit_common(params);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
}
u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
{
/* ETS disabled configuration*/
struct bnx2x *bp = params->bp;
u32 val = 0;
DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
/**
* Bitmap of 5bits length. Each bit specifies whether the entry behaves
* as strict. Bits 0,1,2 - debug and management entries,
* 3 - COS0 entry, 4 - COS1 entry.
* COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
* bit4 bit3 bit2 bit1 bit0
* MCP and debug are strict
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
/**
* For strict priority entries defines the number of consecutive slots
* for the highest priority.
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
/* ETS mode disable */
REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
/* Defines the number of consecutive slots for the strict priority */
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
/* Defines the number of consecutive slots for the strict priority */
REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
/**
* mapping between entry priority to client number (0,1,2 -debug and
* management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
* 3bits client num.
* PRI4 | PRI3 | PRI2 | PRI1 | PRI0
* dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
* dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
*/
val = (0 == strict_cos) ? 0x2318 : 0x22E0;
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
return 0;
}
/******************************************************************/
/* ETS section */
/******************************************************************/
static void bnx2x_bmac2_get_pfc_stat(struct link_params *params,
u32 pfc_frames_sent[2],
u32 pfc_frames_received[2])
{
/* Read pfc statistic */
struct bnx2x *bp = params->bp;
u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
DP(NETIF_MSG_LINK, "pfc statistic read from BMAC\n");
REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_STAT_GTPP,
pfc_frames_sent, 2);
REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_STAT_GRPP,
pfc_frames_received, 2);
}
static void bnx2x_emac_get_pfc_stat(struct link_params *params,
u32 pfc_frames_sent[2],
u32 pfc_frames_received[2])
{
/* Read pfc statistic */
struct bnx2x *bp = params->bp;
u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u32 val_xon = 0;
u32 val_xoff = 0;
DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
/* PFC received frames */
val_xoff = REG_RD(bp, emac_base +
EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
pfc_frames_received[0] = val_xon + val_xoff;
/* PFC received sent */
val_xoff = REG_RD(bp, emac_base +
EMAC_REG_RX_PFC_STATS_XOFF_SENT);
val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
pfc_frames_sent[0] = val_xon + val_xoff;
}
void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
u32 pfc_frames_sent[2],
u32 pfc_frames_received[2])
{
/* Read pfc statistic */
struct bnx2x *bp = params->bp;
u32 val = 0;
DP(NETIF_MSG_LINK, "pfc statistic\n");
if (!vars->link_up)
return;
val = REG_RD(bp, MISC_REG_RESET_REG_2);
if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
== 0) {
DP(NETIF_MSG_LINK, "About to read stats from EMAC\n");
bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
pfc_frames_received);
} else {
DP(NETIF_MSG_LINK, "About to read stats from BMAC\n");
bnx2x_bmac2_get_pfc_stat(params, pfc_frames_sent,
pfc_frames_received);
}
}
/******************************************************************/
/* MAC/PBF section */
/******************************************************************/
static void bnx2x_emac_init(struct link_params *params,
struct link_vars *vars)
{
@ -315,24 +580,55 @@ static u8 bnx2x_emac_enable(struct link_params *params,
/* pause enable/disable */
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_FLOW_EN);
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
bnx2x_bits_en(bp, emac_base +
EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_FLOW_EN);
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
(EMAC_TX_MODE_EXT_PAUSE_EN |
EMAC_TX_MODE_FLOW_EN));
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
bnx2x_bits_en(bp, emac_base +
EMAC_REG_EMAC_TX_MODE,
(EMAC_TX_MODE_EXT_PAUSE_EN |
EMAC_TX_MODE_FLOW_EN));
(EMAC_TX_MODE_EXT_PAUSE_EN |
EMAC_TX_MODE_FLOW_EN));
if (!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED)) {
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
bnx2x_bits_en(bp, emac_base +
EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_FLOW_EN);
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
bnx2x_bits_en(bp, emac_base +
EMAC_REG_EMAC_TX_MODE,
(EMAC_TX_MODE_EXT_PAUSE_EN |
EMAC_TX_MODE_FLOW_EN));
} else
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_FLOW_EN);
}
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
/**
* Setting this bit causes MAC control frames (except for pause
* frames) to be passed on for processing. This setting has no
* affect on the operation of the pause frames. This bit effects
* all packets regardless of RX Parser packet sorting logic.
* Turn the PFC off to make sure we are in Xon state before
* enabling it.
*/
EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
DP(NETIF_MSG_LINK, "PFC is enabled\n");
/* Enable PFC again */
EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
EMAC_REG_RX_PFC_MODE_RX_EN |
EMAC_REG_RX_PFC_MODE_TX_EN |
EMAC_REG_RX_PFC_MODE_PRIORITIES);
EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
((0x0101 <<
EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
(0x00ff <<
EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
}
EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
/* Set Loopback */
@ -362,7 +658,9 @@ static u8 bnx2x_emac_enable(struct link_params *params,
/* enable the NIG in/out to the emac */
REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
val = 0;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
if ((params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) ||
(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val = 1;
REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
@ -383,9 +681,38 @@ static u8 bnx2x_emac_enable(struct link_params *params,
return 0;
}
static void bnx2x_update_bmac2(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
static void bnx2x_update_pfc_bmac1(struct link_params *params,
struct link_vars *vars)
{
u32 wb_data[2];
struct bnx2x *bp = params->bp;
u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
NIG_REG_INGRESS_BMAC0_MEM;
u32 val = 0x14;
if ((!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED)) &&
(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
/* Enable BigMAC to react on received Pause packets */
val |= (1<<5);
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
/* tx control */
val = 0xc0;
if (!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) &&
(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val |= 0x800000;
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
}
static void bnx2x_update_pfc_bmac2(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
/*
* Set rx control: Strip CRC and enable BigMAC to relay
@ -397,7 +724,9 @@ static void bnx2x_update_bmac2(struct link_params *params,
NIG_REG_INGRESS_BMAC0_MEM;
u32 val = 0x14;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
if ((!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED)) &&
(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
/* Enable BigMAC to react on received Pause packets */
val |= (1<<5);
wb_data[0] = val;
@ -408,14 +737,47 @@ static void bnx2x_update_bmac2(struct link_params *params,
/* Tx control */
val = 0xc0;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
if (!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) &&
(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val |= 0x800000;
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL,
wb_data, 2);
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
DP(NETIF_MSG_LINK, "PFC is enabled\n");
/* Enable PFC RX & TX & STATS and set 8 COS */
wb_data[0] = 0x0;
wb_data[0] |= (1<<0); /* RX */
wb_data[0] |= (1<<1); /* TX */
wb_data[0] |= (1<<2); /* Force initial Xon */
wb_data[0] |= (1<<3); /* 8 cos */
wb_data[0] |= (1<<5); /* STATS */
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
wb_data, 2);
/* Clear the force Xon */
wb_data[0] &= ~(1<<2);
} else {
DP(NETIF_MSG_LINK, "PFC is disabled\n");
/* disable PFC RX & TX & STATS and set 8 COS */
wb_data[0] = 0x8;
wb_data[1] = 0;
}
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
/**
* Set Time (based unit is 512 bit time) between automatic
* re-sending of PP packets amd enable automatic re-send of
* Per-Priroity Packet as long as pp_gen is asserted and
* pp_disable is low.
*/
val = 0x8000;
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
val |= (1<<16); /* enable automatic re-send */
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
@ -427,6 +789,9 @@ static void bnx2x_update_bmac2(struct link_params *params,
val |= 0x4; /* Local loopback */
DP(NETIF_MSG_LINK, "enable bmac loopback\n");
}
/* When PFC enabled, Pass pause frames towards the NIG. */
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
val |= ((1<<6)|(1<<5));
wb_data[0] = val;
wb_data[1] = 0;
@ -434,6 +799,239 @@ static void bnx2x_update_bmac2(struct link_params *params,
wb_data, 2);
}
static void bnx2x_update_pfc_brb(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *pfc_params)
{
struct bnx2x *bp = params->bp;
int set_pfc = params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED;
/* default - pause configuration */
u32 pause_xoff_th = PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
u32 pause_xon_th = PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
u32 full_xoff_th = PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
u32 full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
if (set_pfc && pfc_params)
/* First COS */
if (!pfc_params->cos0_pauseable) {
pause_xoff_th =
PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
pause_xon_th =
PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
full_xoff_th =
PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
full_xon_th =
PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
}
/* The number of free blocks below which the pause signal to class 0
of MAC #n is asserted. n=0,1 */
REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
/* The number of free blocks above which the pause signal to class 0
of MAC #n is de-asserted. n=0,1 */
REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
/* The number of free blocks below which the full signal to class 0
of MAC #n is asserted. n=0,1 */
REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
/* The number of free blocks above which the full signal to class 0
of MAC #n is de-asserted. n=0,1 */
REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
if (set_pfc && pfc_params) {
/* Second COS */
if (pfc_params->cos1_pauseable) {
pause_xoff_th =
PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
pause_xon_th =
PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
full_xoff_th =
PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
full_xon_th =
PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
} else {
pause_xoff_th =
PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
pause_xon_th =
PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
full_xoff_th =
PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
full_xon_th =
PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
}
/**
* The number of free blocks below which the pause signal to
* class 1 of MAC #n is asserted. n=0,1
**/
REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
/**
* The number of free blocks above which the pause signal to
* class 1 of MAC #n is de-asserted. n=0,1
**/
REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
/**
* The number of free blocks below which the full signal to
* class 1 of MAC #n is asserted. n=0,1
**/
REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
/**
* The number of free blocks above which the full signal to
* class 1 of MAC #n is de-asserted. n=0,1
**/
REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
}
}
static void bnx2x_update_pfc_nig(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
{
u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
u32 pkt_priority_to_cos = 0;
u32 val;
struct bnx2x *bp = params->bp;
int port = params->port;
int set_pfc = params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED;
DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
/**
* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
* MAC control frames (that are not pause packets)
* will be forwarded to the XCM.
*/
xcm_mask = REG_RD(bp,
port ? NIG_REG_LLH1_XCM_MASK :
NIG_REG_LLH0_XCM_MASK);
/**
* nig params will override non PFC params, since it's possible to
* do transition from PFC to SAFC
*/
if (set_pfc) {
pause_enable = 0;
llfc_out_en = 0;
llfc_enable = 0;
ppp_enable = 1;
xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
xcm0_out_en = 0;
p0_hwpfc_enable = 1;
} else {
if (nig_params) {
llfc_out_en = nig_params->llfc_out_en;
llfc_enable = nig_params->llfc_enable;
pause_enable = nig_params->pause_enable;
} else /*defaul non PFC mode - PAUSE */
pause_enable = 1;
xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
xcm0_out_en = 1;
}
REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
NIG_REG_LLFC_ENABLE_0, llfc_enable);
REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
NIG_REG_PAUSE_ENABLE_0, pause_enable);
REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
NIG_REG_PPP_ENABLE_0, ppp_enable);
REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
NIG_REG_LLH0_XCM_MASK, xcm_mask);
REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
/* output enable for RX_XCM # IF */
REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
/* HW PFC TX enable */
REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
/* 0x2 = BMAC, 0x1= EMAC */
switch (vars->mac_type) {
case MAC_TYPE_EMAC:
val = 1;
break;
case MAC_TYPE_BMAC:
val = 0;
break;
default:
val = 0;
break;
}
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val);
if (nig_params) {
pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
REG_WR(bp, port ? NIG_REG_P1_RX_COS0_PRIORITY_MASK :
NIG_REG_P0_RX_COS0_PRIORITY_MASK,
nig_params->rx_cos0_priority_mask);
REG_WR(bp, port ? NIG_REG_P1_RX_COS1_PRIORITY_MASK :
NIG_REG_P0_RX_COS1_PRIORITY_MASK,
nig_params->rx_cos1_priority_mask);
REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
nig_params->llfc_high_priority_classes);
REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
nig_params->llfc_low_priority_classes);
}
REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
NIG_REG_P0_PKT_PRIORITY_TO_COS,
pkt_priority_to_cos);
}
void bnx2x_update_pfc(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *pfc_params)
{
/**
* The PFC and pause are orthogonal to one another, meaning when
* PFC is enabled, the pause are disabled, and when PFC is
* disabled, pause are set according to the pause result.
*/
u32 val;
struct bnx2x *bp = params->bp;
/* update NIG params */
bnx2x_update_pfc_nig(params, vars, pfc_params);
/* update BRB params */
bnx2x_update_pfc_brb(params, vars, pfc_params);
if (!vars->link_up)
return;
val = REG_RD(bp, MISC_REG_RESET_REG_2);
if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
== 0) {
DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
bnx2x_emac_enable(params, vars, 0);
return;
}
DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
if (CHIP_IS_E2(bp))
bnx2x_update_pfc_bmac2(params, vars, 0);
else
bnx2x_update_pfc_bmac1(params, vars);
val = 0;
if ((params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) ||
(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val = 1;
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
}
static u8 bnx2x_bmac1_enable(struct link_params *params,
struct link_vars *vars,
@ -465,15 +1063,6 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
wb_data, 2);
/* tx control */
val = 0xc0;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
val |= 0x800000;
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL,
wb_data, 2);
/* mac control */
val = 0x3;
if (is_lb) {
@ -491,14 +1080,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
wb_data, 2);
/* rx control set to don't strip crc */
val = 0x14;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
val |= 0x20;
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL,
wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
/* set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
@ -595,7 +1177,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
wb_data, 2);
udelay(30);
bnx2x_update_bmac2(params, vars, is_lb);
bnx2x_update_pfc_bmac2(params, vars, is_lb);
return 0;
}
@ -627,7 +1209,9 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
val = 0;
if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
if ((params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) ||
(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
val = 1;
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
@ -3904,7 +4488,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;;
return 0;
msleep(1);
}
return -EINVAL;
@ -3988,7 +4572,7 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;;
return 0;
msleep(1);
}

View file

@ -65,6 +65,22 @@
#define FW_PARAM_MDIO_CTRL_OFFSET 16
#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE 170
#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE 0
#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE 250
#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE 0
#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE 10
#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE 90
#define PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE 50
#define PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE 250
#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
/***********************************************************/
/* Structs */
/***********************************************************/
@ -216,6 +232,7 @@ struct link_params {
u32 feature_config_flags;
#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
/* Will be populated during common init */
@ -332,4 +349,43 @@ u8 bnx2x_phy_probe(struct link_params *params);
u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base, u8 port);
/* PFC port configuration params */
struct bnx2x_nig_brb_pfc_port_params {
/* NIG */
u32 pause_enable;
u32 llfc_out_en;
u32 llfc_enable;
u32 pkt_priority_to_cos;
u32 rx_cos0_priority_mask;
u32 rx_cos1_priority_mask;
u32 llfc_high_priority_classes;
u32 llfc_low_priority_classes;
/* BRB */
u32 cos0_pauseable;
u32 cos1_pauseable;
};
/**
* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
* when link is already up
*/
void bnx2x_update_pfc(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *pfc_params);
/* Used to configure the ETS to disable */
void bnx2x_ets_disabled(struct link_params *params);
/* Used to configure the ETS to BW limited */
void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
const u32 cos1_bw);
/* Used to configure the ETS to strict */
u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
/* Read pfc statistic*/
void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
u32 pfc_frames_sent[2],
u32 pfc_frames_received[2]);
#endif /* BNX2X_LINK_H */

File diff suppressed because it is too large Load diff

View file

@ -1615,6 +1615,8 @@
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0)
#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0)
#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
@ -1744,12 +1746,16 @@
~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
port */
#define NIG_REG_LLFC_ENABLE_0 0x16208
#define NIG_REG_LLFC_ENABLE_1 0x1620c
/* [RW 16] classes are high-priority for port0 */
#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058
#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c
/* [RW 16] classes are low-priority for port0 */
#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060
#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064
/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
#define NIG_REG_LLFC_OUT_EN_0 0x160c8
#define NIG_REG_LLFC_OUT_EN_1 0x160cc
#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
@ -1774,6 +1780,8 @@
/* [RW 8] event id for llh0 */
#define NIG_REG_LLH0_EVENT_ID 0x10084
#define NIG_REG_LLH0_FUNC_EN 0x160fc
#define NIG_REG_LLH0_FUNC_MEM 0x16180
#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140
#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
/* [RW 1] Determine the IP version to look for in
~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
@ -1797,6 +1805,9 @@
#define NIG_REG_LLH1_ERROR_MASK 0x10090
/* [RW 8] event id for llh1 */
#define NIG_REG_LLH1_EVENT_ID 0x10088
#define NIG_REG_LLH1_FUNC_MEM 0x161c0
#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
/* [RW 8] init credit counter for port1 in LLH */
#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
#define NIG_REG_LLH1_XCM_MASK 0x10134
@ -1907,11 +1918,17 @@
~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
port */
#define NIG_REG_PAUSE_ENABLE_0 0x160c0
#define NIG_REG_PAUSE_ENABLE_1 0x160c4
/* [RW 1] Input enable for RX PBF LP IF */
#define NIG_REG_PBF_LB_IN_EN 0x100b4
/* [RW 1] Value of this register will be transmitted to port swap when
~nig_registers_strap_override.strap_override =1 */
#define NIG_REG_PORT_SWAP 0x10394
/* [RW 1] PPP enable for port0. This register may get 1 only when
* ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
* same port */
#define NIG_REG_PPP_ENABLE_0 0x160b0
#define NIG_REG_PPP_ENABLE_1 0x160b4
/* [RW 1] output enable for RX parser descriptor IF */
#define NIG_REG_PRS_EOP_OUT_EN 0x10104
/* [RW 1] Input enable for RX parser request IF */
@ -1978,6 +1995,14 @@
#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15)
#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18)
#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
#define PBF_REG_COS0_UPPER_BOUND 0x15c05c
/* [RW 31] The weight of COS0 in the ETS command arbiter. */
#define PBF_REG_COS0_WEIGHT 0x15c054
/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
#define PBF_REG_COS1_UPPER_BOUND 0x15c060
/* [RW 31] The weight of COS1 in the ETS command arbiter. */
#define PBF_REG_COS1_WEIGHT 0x15c058
/* [RW 1] Disable processing further tasks from port 0 (after ending the
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
@ -1988,9 +2013,16 @@
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
#define PBF_REG_DISABLE_PF 0x1402e8
/* [RW 1] Indicates that ETS is performed between the COSes in the command
* arbiter. If reset strict priority w/ anti-starvation will be performed
* w/o WFQ. */
#define PBF_REG_ETS_ENABLED 0x15c050
/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
* Ethernet header. */
#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
/* [RW 1] Indicates which COS is conncted to the highest priority in the
* command arbiter. */
#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
#define PBF_REG_IF_ENABLE_REG 0x140044
/* [RW 1] Init bit. When set the initial credits are copied to the credit
registers (except the port credits). Should be set and then reset after
@ -2016,6 +2048,10 @@
#define PBF_REG_MAC_LB_ENABLE 0x140040
/* [RW 6] Bit-map indicating which headers must appear in the packet */
#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
/* [RW 16] The number of strict priority arbitration slots between 2 RR
* arbitration slots. A value of 0 means no strict priority cycles; i.e. the
* strict-priority w/ anti-starvation arbiter is a RR arbiter. */
#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064
/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
not suppoterd. */
#define PBF_REG_P0_ARB_THRSH 0x1400e4
@ -4970,7 +5006,23 @@
#define EMAC_REG_EMAC_TX_MODE 0xbc
#define EMAC_REG_EMAC_TX_STAT_AC 0x280
#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
#define EMAC_REG_RX_PFC_MODE 0x320
#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2)
#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1)
#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0)
#define EMAC_REG_RX_PFC_PARAM 0x324
#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0
#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16
#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328
#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0)
#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330
#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0)
#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c
#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0)
#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334
#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0)
#define EMAC_RX_MODE_FLOW_EN (1L<<2)
#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
#define EMAC_RX_MODE_RESET (1L<<0)

Some files were not shown because too many files have changed in this diff Show more