2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* Common code for low-level network console, dump, and debugger code
|
|
|
|
*
|
|
|
|
* Derived from netconsole, kgdb-over-ethernet, and netdump patches
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_NETPOLL_H
|
|
|
|
#define _LINUX_NETPOLL_H
|
|
|
|
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/interrupt.h>
|
2005-08-11 20:27:43 -06:00
|
|
|
#include <linux/rcupdate.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
#include <linux/list.h>
|
|
|
|
|
2013-01-07 13:52:39 -07:00
|
|
|
union inet_addr {
|
|
|
|
__u32 all[4];
|
|
|
|
__be32 ip;
|
|
|
|
__be32 ip6[4];
|
|
|
|
struct in_addr in;
|
|
|
|
struct in6_addr in6;
|
|
|
|
};
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
struct netpoll {
|
|
|
|
struct net_device *dev;
|
2006-10-26 16:46:56 -06:00
|
|
|
char dev_name[IFNAMSIZ];
|
|
|
|
const char *name;
|
2005-04-16 16:20:36 -06:00
|
|
|
void (*rx_hook)(struct netpoll *, int, char *, int);
|
2006-10-26 16:46:55 -06:00
|
|
|
|
2013-01-07 13:52:39 -07:00
|
|
|
union inet_addr local_ip, remote_ip;
|
|
|
|
bool ipv6;
|
2005-04-16 16:20:36 -06:00
|
|
|
u16 local_port, remote_port;
|
2007-11-19 20:23:29 -07:00
|
|
|
u8 remote_mac[ETH_ALEN];
|
2010-01-12 07:27:30 -07:00
|
|
|
|
|
|
|
struct list_head rx; /* rx_np list element */
|
2013-02-11 03:25:30 -07:00
|
|
|
struct work_struct cleanup_work;
|
2005-06-22 23:05:31 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
struct netpoll_info {
|
2006-10-26 16:46:50 -06:00
|
|
|
atomic_t refcnt;
|
2010-01-12 07:27:30 -07:00
|
|
|
|
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit
9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that,
while this patch protects the tg3 driver from having its ndo_poll_controller
routine called during device initalization, it does nothing for the driver
during shutdown. I.e. it would be entirely possible to have the
ndo_poll_controller method (or subsequently the ndo_poll) routine called for a
driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or
ndo_open routine could be called. Given that the two latter routines tend to
initizlize and free many data structures that the former two rely on, the result
can easily be data corruption or various other crashes. Furthermore, it seems
that this is potentially a problem with all net drivers that support netpoll,
and so this should ideally be fixed in a common path.
As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic
context, so I've come up with this solution. We can use a mutex to sleep in
open/close paths and just do a mutex_trylock in the napi poll path and abandon
the poll attempt if we're locked, as we'll just retry the poll on the next send
anyway.
I've tested this here by flooding netconsole with messages on a system whos nic
driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx
workqueue would be forced to send frames and poll the device. While this was
going on I rapidly ifdown/up'ed the interface and watched for any problems.
I've not found any.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ivan Vecera <ivecera@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Ben Hutchings <bhutchings@solarflare.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-05 01:05:43 -07:00
|
|
|
unsigned long rx_flags;
|
2005-06-22 23:05:59 -06:00
|
|
|
spinlock_t rx_lock;
|
2013-04-29 23:35:05 -06:00
|
|
|
struct semaphore dev_lock;
|
2010-01-12 07:27:30 -07:00
|
|
|
struct list_head rx_np; /* netpolls that registered an rx_hook */
|
|
|
|
|
2013-01-07 13:52:39 -07:00
|
|
|
struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
|
2006-10-26 16:46:51 -06:00
|
|
|
struct sk_buff_head txq;
|
2010-01-12 07:27:30 -07:00
|
|
|
|
2006-12-05 12:36:26 -07:00
|
|
|
struct delayed_work tx_work;
|
2010-05-06 01:47:21 -06:00
|
|
|
|
|
|
|
struct netpoll *netpoll;
|
2012-08-09 19:24:38 -06:00
|
|
|
struct rcu_head rcu;
|
2005-04-16 16:20:36 -06:00
|
|
|
};
|
|
|
|
|
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit
9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that,
while this patch protects the tg3 driver from having its ndo_poll_controller
routine called during device initalization, it does nothing for the driver
during shutdown. I.e. it would be entirely possible to have the
ndo_poll_controller method (or subsequently the ndo_poll) routine called for a
driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or
ndo_open routine could be called. Given that the two latter routines tend to
initizlize and free many data structures that the former two rely on, the result
can easily be data corruption or various other crashes. Furthermore, it seems
that this is potentially a problem with all net drivers that support netpoll,
and so this should ideally be fixed in a common path.
As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic
context, so I've come up with this solution. We can use a mutex to sleep in
open/close paths and just do a mutex_trylock in the napi poll path and abandon
the poll attempt if we're locked, as we'll just retry the poll on the next send
anyway.
I've tested this here by flooding netconsole with messages on a system whos nic
driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx
workqueue would be forced to send frames and poll the device. While this was
going on I rapidly ifdown/up'ed the interface and watched for any problems.
I've not found any.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ivan Vecera <ivecera@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Ben Hutchings <bhutchings@solarflare.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-05 01:05:43 -07:00
|
|
|
#ifdef CONFIG_NETPOLL
|
2013-05-27 13:53:31 -06:00
|
|
|
extern void netpoll_rx_disable(struct net_device *dev);
|
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit
9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that,
while this patch protects the tg3 driver from having its ndo_poll_controller
routine called during device initalization, it does nothing for the driver
during shutdown. I.e. it would be entirely possible to have the
ndo_poll_controller method (or subsequently the ndo_poll) routine called for a
driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or
ndo_open routine could be called. Given that the two latter routines tend to
initizlize and free many data structures that the former two rely on, the result
can easily be data corruption or various other crashes. Furthermore, it seems
that this is potentially a problem with all net drivers that support netpoll,
and so this should ideally be fixed in a common path.
As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic
context, so I've come up with this solution. We can use a mutex to sleep in
open/close paths and just do a mutex_trylock in the napi poll path and abandon
the poll attempt if we're locked, as we'll just retry the poll on the next send
anyway.
I've tested this here by flooding netconsole with messages on a system whos nic
driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx
workqueue would be forced to send frames and poll the device. While this was
going on I rapidly ifdown/up'ed the interface and watched for any problems.
I've not found any.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ivan Vecera <ivecera@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Ben Hutchings <bhutchings@solarflare.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-05 01:05:43 -07:00
|
|
|
extern void netpoll_rx_enable(struct net_device *dev);
|
|
|
|
#else
|
2013-05-27 13:53:31 -06:00
|
|
|
static inline void netpoll_rx_disable(struct net_device *dev) { return; }
|
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit
9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that,
while this patch protects the tg3 driver from having its ndo_poll_controller
routine called during device initalization, it does nothing for the driver
during shutdown. I.e. it would be entirely possible to have the
ndo_poll_controller method (or subsequently the ndo_poll) routine called for a
driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or
ndo_open routine could be called. Given that the two latter routines tend to
initizlize and free many data structures that the former two rely on, the result
can easily be data corruption or various other crashes. Furthermore, it seems
that this is potentially a problem with all net drivers that support netpoll,
and so this should ideally be fixed in a common path.
As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic
context, so I've come up with this solution. We can use a mutex to sleep in
open/close paths and just do a mutex_trylock in the napi poll path and abandon
the poll attempt if we're locked, as we'll just retry the poll on the next send
anyway.
I've tested this here by flooding netconsole with messages on a system whos nic
driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx
workqueue would be forced to send frames and poll the device. While this was
going on I rapidly ifdown/up'ed the interface and watched for any problems.
I've not found any.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ivan Vecera <ivecera@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Ben Hutchings <bhutchings@solarflare.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-05 01:05:43 -07:00
|
|
|
static inline void netpoll_rx_enable(struct net_device *dev) { return; }
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
|
[NET] netconsole: Support dynamic reconfiguration using configfs
Based upon initial work by Keiichi Kii <k-keiichi@bx.jp.nec.com>.
This patch introduces support for dynamic reconfiguration (adding, removing
and/or modifying parameters of netconsole targets at runtime) using a
userspace interface exported via configfs. Documentation is also updated
accordingly.
Issues and brief design overview:
(1) Kernel-initiated creation / destruction of kernel objects is not
possible with configfs -- the lifetimes of the "config items" is managed
exclusively from userspace. But netconsole must support boot/module
params too, and these are parsed in kernel and hence netpolls must be
setup from the kernel. Joel Becker suggested to separately manage the
lifetimes of the two kinds of netconsole_target objects -- those created
via configfs mkdir(2) from userspace and those specified from the
boot/module option string. This adds complexity and some redundancy here
and also means that boot/module param-created targets are not exposed
through the configfs namespace (and hence cannot be updated / destroyed
dynamically). However, this saves us from locking / refcounting
complexities that would need to be introduced in configfs to support
kernel-initiated item creation / destroy there.
(2) In configfs, item creation takes place in the call chain of the
mkdir(2) syscall in the driver subsystem. If we used an ioctl(2) to
create / destroy objects from userspace, the special userspace program is
able to fill out the structure to be passed into the ioctl and hence
specify attributes such as local interface that are required at the time
we set up the netpoll. For configfs, this information is not available at
the time of mkdir(2). So, we keep all newly-created targets (via
configfs) disabled by default. The user is expected to set various
attributes appropriately (including the local network interface if
required) and then write(2) "1" to the "enabled" attribute. Thus,
netpoll_setup() is then called on the set parameters in the context of
_this_ write(2) on the "enabled" attribute itself. This design enables
the user to reconfigure existing netconsole targets at runtime to be
attached to newly-come-up interfaces that may not have existed when
netconsole was loaded or when the targets were actually created. All this
effectively enables us to get rid of custom ioctls.
(3) Ultra-paranoid configfs attribute show() and store() operations, with
sanity and input range checking, using only safe string primitives, and
compliant with the recommendations in Documentation/filesystems/sysfs.txt.
(4) A new function netpoll_print_options() is created in the netpoll API,
that just prints out the configured parameters for a netpoll structure.
netpoll_parse_options() is modified to use that and it is also exported to
be used from netconsole.
Signed-off-by: Satyam Sharma <satyam@infradead.org>
Acked-by: Keiichi Kii <k-keiichi@bx.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-08-10 16:35:05 -06:00
|
|
|
void netpoll_print_options(struct netpoll *np);
|
2005-04-16 16:20:36 -06:00
|
|
|
int netpoll_parse_options(struct netpoll *np, char *opt);
|
2012-08-09 19:24:37 -06:00
|
|
|
int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp);
|
2005-04-16 16:20:36 -06:00
|
|
|
int netpoll_setup(struct netpoll *np);
|
|
|
|
int netpoll_trap(void);
|
|
|
|
void netpoll_set_trap(int trap);
|
2010-06-10 10:12:48 -06:00
|
|
|
void __netpoll_cleanup(struct netpoll *np);
|
2013-02-11 03:25:30 -07:00
|
|
|
void __netpoll_free_async(struct netpoll *np);
|
2005-04-16 16:20:36 -06:00
|
|
|
void netpoll_cleanup(struct netpoll *np);
|
2012-08-09 19:24:40 -06:00
|
|
|
int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
|
2010-10-13 10:01:49 -06:00
|
|
|
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
|
|
|
struct net_device *dev);
|
|
|
|
static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
|
|
|
|
{
|
2012-08-09 19:24:42 -06:00
|
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
2010-10-13 10:01:49 -06:00
|
|
|
netpoll_send_skb_on_dev(np, skb, np->dev);
|
2012-08-09 19:24:42 -06:00
|
|
|
local_irq_restore(flags);
|
2010-10-13 10:01:49 -06:00
|
|
|
}
|
|
|
|
|
2006-10-26 16:46:55 -06:00
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
#ifdef CONFIG_NETPOLL
|
2012-08-09 19:24:46 -06:00
|
|
|
static inline bool netpoll_rx_on(struct sk_buff *skb)
|
2012-08-09 19:24:41 -06:00
|
|
|
{
|
|
|
|
struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
|
|
|
|
|
|
|
|
return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
|
|
|
|
}
|
|
|
|
|
2010-05-06 02:20:10 -06:00
|
|
|
static inline bool netpoll_rx(struct sk_buff *skb)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2010-06-10 10:12:44 -06:00
|
|
|
struct netpoll_info *npinfo;
|
2005-06-22 23:05:59 -06:00
|
|
|
unsigned long flags;
|
2010-05-06 02:20:10 -06:00
|
|
|
bool ret = false;
|
2005-06-22 23:05:31 -06:00
|
|
|
|
2010-09-17 17:55:03 -06:00
|
|
|
local_irq_save(flags);
|
2010-06-10 10:12:44 -06:00
|
|
|
|
2012-08-09 19:24:41 -06:00
|
|
|
if (!netpoll_rx_on(skb))
|
2010-06-10 10:12:44 -06:00
|
|
|
goto out;
|
2005-06-22 23:05:31 -06:00
|
|
|
|
2012-08-09 19:24:41 -06:00
|
|
|
npinfo = rcu_dereference_bh(skb->dev->npinfo);
|
2010-09-17 17:55:03 -06:00
|
|
|
spin_lock(&npinfo->rx_lock);
|
2008-03-04 13:28:49 -07:00
|
|
|
/* check rx_flags again with the lock held */
|
2012-08-09 19:24:40 -06:00
|
|
|
if (npinfo->rx_flags && __netpoll_rx(skb, npinfo))
|
2010-05-06 02:20:10 -06:00
|
|
|
ret = true;
|
2010-09-17 17:55:03 -06:00
|
|
|
spin_unlock(&npinfo->rx_lock);
|
2005-06-22 23:05:59 -06:00
|
|
|
|
2010-06-10 10:12:44 -06:00
|
|
|
out:
|
2010-09-17 17:55:03 -06:00
|
|
|
local_irq_restore(flags);
|
2005-06-22 23:05:59 -06:00
|
|
|
return ret;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 17:41:36 -06:00
|
|
|
static inline int netpoll_receive_skb(struct sk_buff *skb)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 17:41:36 -06:00
|
|
|
if (!list_empty(&skb->dev->napi_list))
|
|
|
|
return netpoll_rx(skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *netpoll_poll_lock(struct napi_struct *napi)
|
|
|
|
{
|
|
|
|
struct net_device *dev = napi->dev;
|
|
|
|
|
|
|
|
if (dev && dev->npinfo) {
|
|
|
|
spin_lock(&napi->poll_lock);
|
|
|
|
napi->poll_owner = smp_processor_id();
|
|
|
|
return napi;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
2005-08-11 20:27:43 -06:00
|
|
|
return NULL;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2005-08-11 20:27:43 -06:00
|
|
|
static inline void netpoll_poll_unlock(void *have)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 17:41:36 -06:00
|
|
|
struct napi_struct *napi = have;
|
2005-08-11 20:27:43 -06:00
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 17:41:36 -06:00
|
|
|
if (napi) {
|
|
|
|
napi->poll_owner = -1;
|
|
|
|
spin_unlock(&napi->poll_lock);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-09 19:24:46 -06:00
|
|
|
static inline bool netpoll_tx_running(struct net_device *dev)
|
2010-06-10 10:12:49 -06:00
|
|
|
{
|
|
|
|
return irqs_disabled();
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
#else
|
2010-08-10 17:24:41 -06:00
|
|
|
static inline bool netpoll_rx(struct sk_buff *skb)
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 17:41:36 -06:00
|
|
|
{
|
2012-08-09 19:24:46 -06:00
|
|
|
return false;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 17:41:36 -06:00
|
|
|
}
|
2012-08-09 19:24:46 -06:00
|
|
|
static inline bool netpoll_rx_on(struct sk_buff *skb)
|
2009-03-16 11:50:02 -06:00
|
|
|
{
|
2012-08-09 19:24:46 -06:00
|
|
|
return false;
|
2009-03-16 11:50:02 -06:00
|
|
|
}
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-03 17:41:36 -06:00
|
|
|
static inline int netpoll_receive_skb(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static inline void *netpoll_poll_lock(struct napi_struct *napi)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
static inline void netpoll_poll_unlock(void *have)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
static inline void netpoll_netdev_init(struct net_device *dev)
|
|
|
|
{
|
|
|
|
}
|
2012-08-09 19:24:46 -06:00
|
|
|
static inline bool netpoll_tx_running(struct net_device *dev)
|
2010-06-10 10:12:49 -06:00
|
|
|
{
|
2012-08-09 19:24:46 -06:00
|
|
|
return false;
|
2010-06-10 10:12:49 -06:00
|
|
|
}
|
2005-04-16 16:20:36 -06:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|