2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* Common code for low-level network console, dump, and debugger code
|
|
|
|
*
|
|
|
|
* Derived from netconsole, kgdb-over-ethernet, and netdump patches
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_NETPOLL_H
|
|
|
|
#define _LINUX_NETPOLL_H
|
|
|
|
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/interrupt.h>
|
2005-08-11 20:27:43 -06:00
|
|
|
#include <linux/rcupdate.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
#include <linux/list.h>
|
|
|
|
|
|
|
|
struct netpoll {
|
|
|
|
struct net_device *dev;
|
2006-10-26 16:46:56 -06:00
|
|
|
char dev_name[IFNAMSIZ];
|
|
|
|
const char *name;
|
2005-04-16 16:20:36 -06:00
|
|
|
void (*rx_hook)(struct netpoll *, int, char *, int);
|
2006-10-26 16:46:55 -06:00
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
u32 local_ip, remote_ip;
|
|
|
|
u16 local_port, remote_port;
|
2006-10-26 16:46:56 -06:00
|
|
|
u8 local_mac[ETH_ALEN], remote_mac[ETH_ALEN];
|
2005-06-22 23:05:31 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
struct netpoll_info {
|
2006-10-26 16:46:50 -06:00
|
|
|
atomic_t refcnt;
|
2005-04-16 16:20:36 -06:00
|
|
|
spinlock_t poll_lock;
|
|
|
|
int poll_owner;
|
2005-06-22 23:05:31 -06:00
|
|
|
int rx_flags;
|
2005-06-22 23:05:59 -06:00
|
|
|
spinlock_t rx_lock;
|
|
|
|
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
2006-06-26 01:04:27 -06:00
|
|
|
struct sk_buff_head arp_tx; /* list of arp requests to reply to */
|
2006-10-26 16:46:51 -06:00
|
|
|
struct sk_buff_head txq;
|
2006-12-05 12:36:26 -07:00
|
|
|
struct delayed_work tx_work;
|
2005-04-16 16:20:36 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
void netpoll_poll(struct netpoll *np);
|
|
|
|
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
|
|
|
|
int netpoll_parse_options(struct netpoll *np, char *opt);
|
|
|
|
int netpoll_setup(struct netpoll *np);
|
|
|
|
int netpoll_trap(void);
|
|
|
|
void netpoll_set_trap(int trap);
|
|
|
|
void netpoll_cleanup(struct netpoll *np);
|
|
|
|
int __netpoll_rx(struct sk_buff *skb);
|
2006-10-26 16:46:55 -06:00
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
#ifdef CONFIG_NETPOLL
|
|
|
|
static inline int netpoll_rx(struct sk_buff *skb)
|
|
|
|
{
|
2005-06-22 23:05:31 -06:00
|
|
|
struct netpoll_info *npinfo = skb->dev->npinfo;
|
2005-06-22 23:05:59 -06:00
|
|
|
unsigned long flags;
|
|
|
|
int ret = 0;
|
2005-06-22 23:05:31 -06:00
|
|
|
|
2005-06-22 23:05:59 -06:00
|
|
|
if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
|
2005-06-22 23:05:31 -06:00
|
|
|
return 0;
|
|
|
|
|
2005-06-22 23:05:59 -06:00
|
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
|
|
/* check rx_flags again with the lock held */
|
|
|
|
if (npinfo->rx_flags && __netpoll_rx(skb))
|
|
|
|
ret = 1;
|
|
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2005-08-11 20:27:43 -06:00
|
|
|
static inline void *netpoll_poll_lock(struct net_device *dev)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2005-08-11 20:27:43 -06:00
|
|
|
rcu_read_lock(); /* deal with race on ->npinfo */
|
2005-06-22 23:05:31 -06:00
|
|
|
if (dev->npinfo) {
|
|
|
|
spin_lock(&dev->npinfo->poll_lock);
|
|
|
|
dev->npinfo->poll_owner = smp_processor_id();
|
2005-08-11 20:27:43 -06:00
|
|
|
return dev->npinfo;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
2005-08-11 20:27:43 -06:00
|
|
|
return NULL;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2005-08-11 20:27:43 -06:00
|
|
|
static inline void netpoll_poll_unlock(void *have)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2005-08-11 20:27:43 -06:00
|
|
|
struct netpoll_info *npi = have;
|
|
|
|
|
|
|
|
if (npi) {
|
|
|
|
npi->poll_owner = -1;
|
|
|
|
spin_unlock(&npi->poll_lock);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
2005-08-11 20:27:43 -06:00
|
|
|
rcu_read_unlock();
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
#define netpoll_rx(a) 0
|
2005-10-12 16:12:21 -06:00
|
|
|
#define netpoll_poll_lock(a) NULL
|
2005-04-16 16:20:36 -06:00
|
|
|
#define netpoll_poll_unlock(a)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|