irda: convert to internal stats
Convert IRDA drivers to use already existing net_device_stats structure in network device. This is a pre-cursor to conversion to net_device ops. Compile tested only. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
46377bb311
commit
af0490810c
26 changed files with 230 additions and 391 deletions
|
@ -109,7 +109,6 @@ static int ali_ircc_net_open(struct net_device *dev);
|
|||
static int ali_ircc_net_close(struct net_device *dev);
|
||||
static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
||||
static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud);
|
||||
static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev);
|
||||
|
||||
/* SIR function */
|
||||
static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
@ -366,7 +365,6 @@ static int ali_ircc_open(int i, chipio_t *info)
|
|||
dev->open = ali_ircc_net_open;
|
||||
dev->stop = ali_ircc_net_close;
|
||||
dev->do_ioctl = ali_ircc_net_ioctl;
|
||||
dev->get_stats = ali_ircc_net_get_stats;
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
|
@ -876,7 +874,7 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
|
|||
* async_unwrap_char will deliver all found frames
|
||||
*/
|
||||
do {
|
||||
async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
|
||||
async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
|
||||
inb(iobase+UART_RX));
|
||||
|
||||
/* Make sure we don't stay here too long */
|
||||
|
@ -943,7 +941,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
|
|||
netif_wake_queue(self->netdev);
|
||||
}
|
||||
|
||||
self->stats.tx_packets++;
|
||||
self->netdev->stats.tx_packets++;
|
||||
|
||||
/* Turn on receive interrupts */
|
||||
outb(UART_IER_RDI, iobase+UART_IER);
|
||||
|
@ -1467,7 +1465,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
|
||||
self->tx_fifo.tail += skb->len;
|
||||
|
||||
self->stats.tx_bytes += skb->len;
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
|
||||
skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
|
||||
skb->len);
|
||||
|
@ -1661,12 +1659,12 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
|
|||
|
||||
{
|
||||
IRDA_ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __func__);
|
||||
self->stats.tx_errors++;
|
||||
self->stats.tx_fifo_errors++;
|
||||
self->netdev->stats.tx_errors++;
|
||||
self->netdev->stats.tx_fifo_errors++;
|
||||
}
|
||||
else
|
||||
{
|
||||
self->stats.tx_packets++;
|
||||
self->netdev->stats.tx_packets++;
|
||||
}
|
||||
|
||||
/* Check if we need to change the speed */
|
||||
|
@ -1831,35 +1829,35 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
|
|||
IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ );
|
||||
|
||||
/* Skip frame */
|
||||
self->stats.rx_errors++;
|
||||
self->netdev->stats.rx_errors++;
|
||||
|
||||
self->rx_buff.data += len;
|
||||
|
||||
if (status & LSR_FIFO_UR)
|
||||
{
|
||||
self->stats.rx_frame_errors++;
|
||||
self->netdev->stats.rx_frame_errors++;
|
||||
IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ );
|
||||
}
|
||||
if (status & LSR_FRAME_ERROR)
|
||||
{
|
||||
self->stats.rx_frame_errors++;
|
||||
self->netdev->stats.rx_frame_errors++;
|
||||
IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ );
|
||||
}
|
||||
|
||||
if (status & LSR_CRC_ERROR)
|
||||
{
|
||||
self->stats.rx_crc_errors++;
|
||||
self->netdev->stats.rx_crc_errors++;
|
||||
IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ );
|
||||
}
|
||||
|
||||
if(self->rcvFramesOverflow)
|
||||
{
|
||||
self->stats.rx_frame_errors++;
|
||||
self->netdev->stats.rx_frame_errors++;
|
||||
IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ );
|
||||
}
|
||||
if(len == 0)
|
||||
{
|
||||
self->stats.rx_frame_errors++;
|
||||
self->netdev->stats.rx_frame_errors++;
|
||||
IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ );
|
||||
}
|
||||
}
|
||||
|
@ -1910,7 +1908,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
|
|||
IRDA_WARNING("%s(), memory squeeze, "
|
||||
"dropping frame.\n",
|
||||
__func__);
|
||||
self->stats.rx_dropped++;
|
||||
self->netdev->stats.rx_dropped++;
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
|
@ -1924,8 +1922,8 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
|
|||
|
||||
/* Move to next frame */
|
||||
self->rx_buff.data += len;
|
||||
self->stats.rx_bytes += len;
|
||||
self->stats.rx_packets++;
|
||||
self->netdev->stats.rx_bytes += len;
|
||||
self->netdev->stats.rx_packets++;
|
||||
|
||||
skb->dev = self->netdev;
|
||||
skb_reset_mac_header(skb);
|
||||
|
@ -1994,7 +1992,7 @@ static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
|
||||
self->tx_buff.truesize);
|
||||
|
||||
self->stats.tx_bytes += self->tx_buff.len;
|
||||
self->netdev->stats.tx_bytes += self->tx_buff.len;
|
||||
|
||||
/* Turn on transmit finished interrupt. Will fire immediately! */
|
||||
outb(UART_IER_THRI, iobase+UART_IER);
|
||||
|
@ -2111,17 +2109,6 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
|
|||
return status;
|
||||
}
|
||||
|
||||
static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct ali_ircc_cb *self = netdev_priv(dev);
|
||||
|
||||
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
|
||||
|
||||
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __func__ );
|
||||
|
||||
return &self->stats;
|
||||
}
|
||||
|
||||
static int ali_ircc_suspend(struct platform_device *dev, pm_message_t state)
|
||||
{
|
||||
struct ali_ircc_cb *self = platform_get_drvdata(dev);
|
||||
|
|
|
@ -191,7 +191,6 @@ struct ali_ircc_cb {
|
|||
struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
|
||||
|
||||
struct net_device *netdev; /* Yes! we are some kind of netdevice */
|
||||
struct net_device_stats stats;
|
||||
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
struct qos_info qos; /* QoS capabilities for this device */
|
||||
|
|
|
@ -107,7 +107,6 @@ struct au1k_private {
|
|||
iobuff_t rx_buff;
|
||||
|
||||
struct net_device *netdev;
|
||||
struct net_device_stats stats;
|
||||
|
||||
struct timeval stamp;
|
||||
struct timeval now;
|
||||
|
|
|
@ -53,7 +53,6 @@ static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *);
|
|||
static int au1k_irda_rx(struct net_device *);
|
||||
static void au1k_irda_interrupt(int, void *);
|
||||
static void au1k_tx_timeout(struct net_device *);
|
||||
static struct net_device_stats *au1k_irda_stats(struct net_device *);
|
||||
static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int);
|
||||
static int au1k_irda_set_speed(struct net_device *dev, int speed);
|
||||
|
||||
|
@ -213,7 +212,6 @@ static int au1k_irda_net_init(struct net_device *dev)
|
|||
dev->open = au1k_irda_start;
|
||||
dev->hard_start_xmit = au1k_irda_hard_xmit;
|
||||
dev->stop = au1k_irda_stop;
|
||||
dev->get_stats = au1k_irda_stats;
|
||||
dev->do_ioctl = au1k_irda_ioctl;
|
||||
dev->tx_timeout = au1k_tx_timeout;
|
||||
|
||||
|
@ -832,13 +830,6 @@ au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static struct net_device_stats *au1k_irda_stats(struct net_device *dev)
|
||||
{
|
||||
struct au1k_private *aup = netdev_priv(dev);
|
||||
return &aup->stats;
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
|
||||
MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
|
||||
|
||||
|
|
|
@ -308,7 +308,6 @@ struct OboeRing
|
|||
struct toshoboe_cb
|
||||
{
|
||||
struct net_device *netdev; /* Yes! we are some kind of netdevice */
|
||||
struct net_device_stats stats;
|
||||
struct tty_driver ttydev;
|
||||
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
|
|
|
@ -122,7 +122,6 @@ static int irda_usb_net_open(struct net_device *dev);
|
|||
static int irda_usb_net_close(struct net_device *dev);
|
||||
static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
||||
static void irda_usb_net_timeout(struct net_device *dev);
|
||||
static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev);
|
||||
|
||||
/************************ TRANSMIT ROUTINES ************************/
|
||||
/*
|
||||
|
@ -525,13 +524,13 @@ static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
/* Ask USB to send the packet - Irq disabled -> GFP_ATOMIC */
|
||||
if ((res = usb_submit_urb(urb, GFP_ATOMIC))) {
|
||||
IRDA_WARNING("%s(), failed Tx URB\n", __func__);
|
||||
self->stats.tx_errors++;
|
||||
netdev->stats.tx_errors++;
|
||||
/* Let USB recover : We will catch that in the watchdog */
|
||||
/*netif_start_queue(netdev);*/
|
||||
} else {
|
||||
/* Increment packet stats */
|
||||
self->stats.tx_packets++;
|
||||
self->stats.tx_bytes += skb->len;
|
||||
netdev->stats.tx_packets++;
|
||||
netdev->stats.tx_bytes += skb->len;
|
||||
|
||||
netdev->trans_start = jiffies;
|
||||
}
|
||||
|
@ -677,7 +676,7 @@ static void irda_usb_net_timeout(struct net_device *netdev)
|
|||
IRDA_DEBUG(0, "%s: Tx timed out, urb->status=%d, urb->transfer_flags=0x%04X\n", netdev->name, urb->status, urb->transfer_flags);
|
||||
|
||||
/* Increase error count */
|
||||
self->stats.tx_errors++;
|
||||
netdev->stats.tx_errors++;
|
||||
|
||||
#ifdef IU_BUG_KICK_TIMEOUT
|
||||
/* Can't be a bad idea to reset the speed ;-) - Jean II */
|
||||
|
@ -826,7 +825,7 @@ static void irda_usb_receive(struct urb *urb)
|
|||
if (urb->status != 0) {
|
||||
switch (urb->status) {
|
||||
case -EILSEQ:
|
||||
self->stats.rx_crc_errors++;
|
||||
self->netdev->stats.rx_crc_errors++;
|
||||
/* Also precursor to a hot-unplug on UHCI. */
|
||||
/* Fallthrough... */
|
||||
case -ECONNRESET:
|
||||
|
@ -839,7 +838,7 @@ static void irda_usb_receive(struct urb *urb)
|
|||
case -ETIME:
|
||||
/* Usually precursor to a hot-unplug on OHCI. */
|
||||
default:
|
||||
self->stats.rx_errors++;
|
||||
self->netdev->stats.rx_errors++;
|
||||
IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags);
|
||||
break;
|
||||
}
|
||||
|
@ -890,7 +889,7 @@ static void irda_usb_receive(struct urb *urb)
|
|||
IRDA_SKB_MAX_MTU);
|
||||
|
||||
if (!newskb) {
|
||||
self->stats.rx_dropped++;
|
||||
self->netdev->stats.rx_dropped++;
|
||||
/* We could deliver the current skb, but this would stall
|
||||
* the Rx path. Better drop the packet... Jean II */
|
||||
goto done;
|
||||
|
@ -927,8 +926,8 @@ static void irda_usb_receive(struct urb *urb)
|
|||
netif_rx(dataskb);
|
||||
|
||||
/* Keep stats up to date */
|
||||
self->stats.rx_bytes += len;
|
||||
self->stats.rx_packets++;
|
||||
self->netdev->stats.rx_bytes += len;
|
||||
self->netdev->stats.rx_packets++;
|
||||
|
||||
done:
|
||||
/* Note : at this point, the URB we've just received (urb)
|
||||
|
@ -1342,14 +1341,6 @@ static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|||
}
|
||||
|
||||
/*------------------------------------------------------------------*/
|
||||
/*
|
||||
* Get device stats (for /proc/net/dev and ifconfig)
|
||||
*/
|
||||
static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct irda_usb_cb *self = netdev_priv(dev);
|
||||
return &self->stats;
|
||||
}
|
||||
|
||||
/********************* IRDA CONFIG SUBROUTINES *********************/
|
||||
/*
|
||||
|
@ -1428,7 +1419,6 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
|
|||
netdev->watchdog_timeo = 250*HZ/1000; /* 250 ms > USB timeout */
|
||||
netdev->open = irda_usb_net_open;
|
||||
netdev->stop = irda_usb_net_close;
|
||||
netdev->get_stats = irda_usb_net_get_stats;
|
||||
netdev->do_ioctl = irda_usb_net_ioctl;
|
||||
|
||||
return register_netdev(netdev);
|
||||
|
|
|
@ -152,7 +152,6 @@ struct irda_usb_cb {
|
|||
struct urb *speed_urb; /* URB used to send speed commands */
|
||||
|
||||
struct net_device *netdev; /* Yes! we are some kind of netdev. */
|
||||
struct net_device_stats stats;
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
struct qos_info qos;
|
||||
char *speed_buff; /* Buffer for speed changes */
|
||||
|
|
|
@ -105,7 +105,7 @@ struct kingsun_cb {
|
|||
struct usb_device *usbdev; /* init: probe_irda */
|
||||
struct net_device *netdev; /* network layer */
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
struct net_device_stats stats; /* network statistics */
|
||||
|
||||
struct qos_info qos;
|
||||
|
||||
__u8 *in_buf; /* receive buffer */
|
||||
|
@ -186,12 +186,12 @@ static int kingsun_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
case -EPIPE:
|
||||
break;
|
||||
default:
|
||||
kingsun->stats.tx_errors++;
|
||||
netdev->stats.tx_errors++;
|
||||
netif_start_queue(netdev);
|
||||
}
|
||||
} else {
|
||||
kingsun->stats.tx_packets++;
|
||||
kingsun->stats.tx_bytes += skb->len;
|
||||
netdev->stats.tx_packets++;
|
||||
netdev->stats.tx_bytes += skb->len;
|
||||
}
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
|
@ -232,7 +232,7 @@ static void kingsun_rcv_irq(struct urb *urb)
|
|||
if (bytes[0] >= 1 && bytes[0] < kingsun->max_rx) {
|
||||
for (i = 1; i <= bytes[0]; i++) {
|
||||
async_unwrap_char(kingsun->netdev,
|
||||
&kingsun->stats,
|
||||
&kingsun->netdev->stats,
|
||||
&kingsun->rx_buff, bytes[i]);
|
||||
}
|
||||
do_gettimeofday(&kingsun->rx_time);
|
||||
|
@ -418,15 +418,6 @@ static int kingsun_net_ioctl(struct net_device *netdev, struct ifreq *rq,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get device stats (for /proc/net/dev and ifconfig)
|
||||
*/
|
||||
static struct net_device_stats *
|
||||
kingsun_net_get_stats(struct net_device *netdev)
|
||||
{
|
||||
struct kingsun_cb *kingsun = netdev_priv(netdev);
|
||||
return &kingsun->stats;
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine is called by the USB subsystem for each new device
|
||||
|
@ -532,7 +523,6 @@ static int kingsun_probe(struct usb_interface *intf,
|
|||
net->hard_start_xmit = kingsun_hard_xmit;
|
||||
net->open = kingsun_net_open;
|
||||
net->stop = kingsun_net_close;
|
||||
net->get_stats = kingsun_net_get_stats;
|
||||
net->do_ioctl = kingsun_net_ioctl;
|
||||
|
||||
ret = register_netdev(net);
|
||||
|
|
|
@ -174,7 +174,7 @@ struct ks959_cb {
|
|||
struct usb_device *usbdev; /* init: probe_irda */
|
||||
struct net_device *netdev; /* network layer */
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
struct net_device_stats stats; /* network statistics */
|
||||
|
||||
struct qos_info qos;
|
||||
|
||||
struct usb_ctrlrequest *tx_setuprequest;
|
||||
|
@ -366,7 +366,7 @@ static void ks959_send_irq(struct urb *urb)
|
|||
case -EPIPE:
|
||||
break;
|
||||
default:
|
||||
kingsun->stats.tx_errors++;
|
||||
netdev->stats.tx_errors++;
|
||||
netif_start_queue(netdev);
|
||||
}
|
||||
}
|
||||
|
@ -416,12 +416,12 @@ static int ks959_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
case -EPIPE:
|
||||
break;
|
||||
default:
|
||||
kingsun->stats.tx_errors++;
|
||||
netdev->stats.tx_errors++;
|
||||
netif_start_queue(netdev);
|
||||
}
|
||||
} else {
|
||||
kingsun->stats.tx_packets++;
|
||||
kingsun->stats.tx_bytes += skb->len;
|
||||
netdev->stats.tx_packets++;
|
||||
netdev->stats.tx_bytes += skb->len;
|
||||
|
||||
}
|
||||
|
||||
|
@ -469,7 +469,7 @@ static void ks959_rcv_irq(struct urb *urb)
|
|||
*/
|
||||
if (kingsun->rx_variable_xormask != 0) {
|
||||
async_unwrap_char(kingsun->netdev,
|
||||
&kingsun->stats,
|
||||
&kingsun->netdev->stats,
|
||||
&kingsun->rx_unwrap_buff,
|
||||
bytes[i]);
|
||||
}
|
||||
|
@ -668,15 +668,6 @@ static int ks959_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get device stats (for /proc/net/dev and ifconfig)
|
||||
*/
|
||||
static struct net_device_stats *ks959_net_get_stats(struct net_device *netdev)
|
||||
{
|
||||
struct ks959_cb *kingsun = netdev_priv(netdev);
|
||||
return &kingsun->stats;
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine is called by the USB subsystem for each new device
|
||||
* in the system. We need to check if the device is ours, and in
|
||||
|
@ -792,7 +783,6 @@ static int ks959_probe(struct usb_interface *intf,
|
|||
net->hard_start_xmit = ks959_hard_xmit;
|
||||
net->open = ks959_net_open;
|
||||
net->stop = ks959_net_close;
|
||||
net->get_stats = ks959_net_get_stats;
|
||||
net->do_ioctl = ks959_net_ioctl;
|
||||
|
||||
ret = register_netdev(net);
|
||||
|
|
|
@ -140,7 +140,7 @@ struct ksdazzle_cb {
|
|||
struct usb_device *usbdev; /* init: probe_irda */
|
||||
struct net_device *netdev; /* network layer */
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
struct net_device_stats stats; /* network statistics */
|
||||
|
||||
struct qos_info qos;
|
||||
|
||||
struct urb *tx_urb;
|
||||
|
@ -278,7 +278,7 @@ static void ksdazzle_send_irq(struct urb *urb)
|
|||
case -EPIPE:
|
||||
break;
|
||||
default:
|
||||
kingsun->stats.tx_errors++;
|
||||
netdev->stats.tx_errors++;
|
||||
netif_start_queue(netdev);
|
||||
}
|
||||
}
|
||||
|
@ -329,12 +329,12 @@ static int ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
case -EPIPE:
|
||||
break;
|
||||
default:
|
||||
kingsun->stats.tx_errors++;
|
||||
netdev->stats.tx_errors++;
|
||||
netif_start_queue(netdev);
|
||||
}
|
||||
} else {
|
||||
kingsun->stats.tx_packets++;
|
||||
kingsun->stats.tx_bytes += skb->len;
|
||||
netdev->stats.tx_packets++;
|
||||
netdev->stats.tx_bytes += skb->len;
|
||||
|
||||
}
|
||||
|
||||
|
@ -348,9 +348,10 @@ static int ksdazzle_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
static void ksdazzle_rcv_irq(struct urb *urb)
|
||||
{
|
||||
struct ksdazzle_cb *kingsun = urb->context;
|
||||
struct net_device *netdev = kingsun->netdev;
|
||||
|
||||
/* in process of stopping, just drop data */
|
||||
if (!netif_running(kingsun->netdev)) {
|
||||
if (!netif_running(netdev)) {
|
||||
kingsun->receiving = 0;
|
||||
return;
|
||||
}
|
||||
|
@ -368,7 +369,7 @@ static void ksdazzle_rcv_irq(struct urb *urb)
|
|||
unsigned int i;
|
||||
|
||||
for (i = 0; i < urb->actual_length; i++) {
|
||||
async_unwrap_char(kingsun->netdev, &kingsun->stats,
|
||||
async_unwrap_char(netdev, &netdev->stats,
|
||||
&kingsun->rx_unwrap_buff, bytes[i]);
|
||||
}
|
||||
kingsun->receiving =
|
||||
|
@ -561,16 +562,6 @@ static int ksdazzle_net_ioctl(struct net_device *netdev, struct ifreq *rq,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get device stats (for /proc/net/dev and ifconfig)
|
||||
*/
|
||||
static struct net_device_stats *ksdazzle_net_get_stats(struct net_device
|
||||
*netdev)
|
||||
{
|
||||
struct ksdazzle_cb *kingsun = netdev_priv(netdev);
|
||||
return &kingsun->stats;
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine is called by the USB subsystem for each new device
|
||||
* in the system. We need to check if the device is ours, and in
|
||||
|
@ -696,7 +687,6 @@ static int ksdazzle_probe(struct usb_interface *intf,
|
|||
net->hard_start_xmit = ksdazzle_hard_xmit;
|
||||
net->open = ksdazzle_net_open;
|
||||
net->stop = ksdazzle_net_close;
|
||||
net->get_stats = ksdazzle_net_get_stats;
|
||||
net->do_ioctl = ksdazzle_net_ioctl;
|
||||
|
||||
ret = register_netdev(net);
|
||||
|
|
|
@ -403,8 +403,8 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
|
|||
if(unlikely(new_len <= 0)) {
|
||||
IRDA_ERROR("%s short frame length %d\n",
|
||||
mcs->netdev->name, new_len);
|
||||
++mcs->stats.rx_errors;
|
||||
++mcs->stats.rx_length_errors;
|
||||
++mcs->netdev->stats.rx_errors;
|
||||
++mcs->netdev->stats.rx_length_errors;
|
||||
return;
|
||||
}
|
||||
fcs = 0;
|
||||
|
@ -413,14 +413,14 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
|
|||
if(fcs != GOOD_FCS) {
|
||||
IRDA_ERROR("crc error calc 0x%x len %d\n",
|
||||
fcs, new_len);
|
||||
mcs->stats.rx_errors++;
|
||||
mcs->stats.rx_crc_errors++;
|
||||
mcs->netdev->stats.rx_errors++;
|
||||
mcs->netdev->stats.rx_crc_errors++;
|
||||
return;
|
||||
}
|
||||
|
||||
skb = dev_alloc_skb(new_len + 1);
|
||||
if(unlikely(!skb)) {
|
||||
++mcs->stats.rx_dropped;
|
||||
++mcs->netdev->stats.rx_dropped;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -433,8 +433,8 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
|
|||
|
||||
netif_rx(skb);
|
||||
|
||||
mcs->stats.rx_packets++;
|
||||
mcs->stats.rx_bytes += new_len;
|
||||
mcs->netdev->stats.rx_packets++;
|
||||
mcs->netdev->stats.rx_bytes += new_len;
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -458,22 +458,22 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
|
|||
if(unlikely(new_len <= 0)) {
|
||||
IRDA_ERROR("%s short frame length %d\n",
|
||||
mcs->netdev->name, new_len);
|
||||
++mcs->stats.rx_errors;
|
||||
++mcs->stats.rx_length_errors;
|
||||
++mcs->netdev->stats.rx_errors;
|
||||
++mcs->netdev->stats.rx_length_errors;
|
||||
return;
|
||||
}
|
||||
|
||||
fcs = ~(crc32_le(~0, buf, new_len));
|
||||
if(fcs != get_unaligned_le32(buf + new_len)) {
|
||||
IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len);
|
||||
mcs->stats.rx_errors++;
|
||||
mcs->stats.rx_crc_errors++;
|
||||
mcs->netdev->stats.rx_errors++;
|
||||
mcs->netdev->stats.rx_crc_errors++;
|
||||
return;
|
||||
}
|
||||
|
||||
skb = dev_alloc_skb(new_len + 1);
|
||||
if(unlikely(!skb)) {
|
||||
++mcs->stats.rx_dropped;
|
||||
++mcs->netdev->stats.rx_dropped;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -486,8 +486,8 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
|
|||
|
||||
netif_rx(skb);
|
||||
|
||||
mcs->stats.rx_packets++;
|
||||
mcs->stats.rx_bytes += new_len;
|
||||
mcs->netdev->stats.rx_packets++;
|
||||
mcs->netdev->stats.rx_bytes += new_len;
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -756,14 +756,6 @@ static int mcs_net_open(struct net_device *netdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/* Get device stats for /proc/net/dev and ifconfig */
|
||||
static struct net_device_stats *mcs_net_get_stats(struct net_device *netdev)
|
||||
{
|
||||
struct mcs_cb *mcs = netdev_priv(netdev);
|
||||
return &mcs->stats;
|
||||
}
|
||||
|
||||
/* Receive callback function. */
|
||||
static void mcs_receive_irq(struct urb *urb)
|
||||
{
|
||||
|
@ -786,14 +778,14 @@ static void mcs_receive_irq(struct urb *urb)
|
|||
*/
|
||||
/* SIR speed */
|
||||
if(mcs->speed < 576000) {
|
||||
async_unwrap_char(mcs->netdev, &mcs->stats,
|
||||
async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
|
||||
&mcs->rx_buff, 0xc0);
|
||||
|
||||
for (i = 0; i < urb->actual_length; i++)
|
||||
async_unwrap_char(mcs->netdev, &mcs->stats,
|
||||
async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
|
||||
&mcs->rx_buff, bytes[i]);
|
||||
|
||||
async_unwrap_char(mcs->netdev, &mcs->stats,
|
||||
async_unwrap_char(mcs->netdev, &mcs->netdev->stats,
|
||||
&mcs->rx_buff, 0xc1);
|
||||
}
|
||||
/* MIR speed */
|
||||
|
@ -868,12 +860,12 @@ static int mcs_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
case -EPIPE:
|
||||
break;
|
||||
default:
|
||||
mcs->stats.tx_errors++;
|
||||
mcs->netdev->stats.tx_errors++;
|
||||
netif_start_queue(ndev);
|
||||
}
|
||||
} else {
|
||||
mcs->stats.tx_packets++;
|
||||
mcs->stats.tx_bytes += skb->len;
|
||||
mcs->netdev->stats.tx_packets++;
|
||||
mcs->netdev->stats.tx_bytes += skb->len;
|
||||
}
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
|
@ -931,7 +923,6 @@ static int mcs_probe(struct usb_interface *intf,
|
|||
ndev->hard_start_xmit = mcs_hard_xmit;
|
||||
ndev->open = mcs_net_open;
|
||||
ndev->stop = mcs_net_close;
|
||||
ndev->get_stats = mcs_net_get_stats;
|
||||
ndev->do_ioctl = mcs_net_ioctl;
|
||||
|
||||
if (!intf->cur_altsetting)
|
||||
|
|
|
@ -104,7 +104,6 @@ struct mcs_cb {
|
|||
struct usb_device *usbdev; /* init: probe_irda */
|
||||
struct net_device *netdev; /* network layer */
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
struct net_device_stats stats; /* network statistics */
|
||||
struct qos_info qos;
|
||||
unsigned int speed; /* Current speed */
|
||||
unsigned int new_speed; /* new speed */
|
||||
|
@ -154,7 +153,6 @@ static int mcs_speed_change(struct mcs_cb *mcs);
|
|||
static int mcs_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd);
|
||||
static int mcs_net_close(struct net_device *netdev);
|
||||
static int mcs_net_open(struct net_device *netdev);
|
||||
static struct net_device_stats *mcs_net_get_stats(struct net_device *netdev);
|
||||
|
||||
static void mcs_receive_irq(struct urb *urb);
|
||||
static void mcs_send_irq(struct urb *urb);
|
||||
|
|
|
@ -185,7 +185,6 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id);
|
|||
static int nsc_ircc_net_open(struct net_device *dev);
|
||||
static int nsc_ircc_net_close(struct net_device *dev);
|
||||
static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
||||
static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev);
|
||||
|
||||
/* Globals */
|
||||
static int pnp_registered;
|
||||
|
@ -446,7 +445,6 @@ static int __init nsc_ircc_open(chipio_t *info)
|
|||
dev->open = nsc_ircc_net_open;
|
||||
dev->stop = nsc_ircc_net_close;
|
||||
dev->do_ioctl = nsc_ircc_net_ioctl;
|
||||
dev->get_stats = nsc_ircc_net_get_stats;
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
|
@ -1401,7 +1399,7 @@ static int nsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
|
|||
self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
|
||||
self->tx_buff.truesize);
|
||||
|
||||
self->stats.tx_bytes += self->tx_buff.len;
|
||||
dev->stats.tx_bytes += self->tx_buff.len;
|
||||
|
||||
/* Add interrupt on tx low level (will fire immediately) */
|
||||
switch_bank(iobase, BANK0);
|
||||
|
@ -1473,7 +1471,7 @@ static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
|
|||
self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
|
||||
self->tx_fifo.tail += skb->len;
|
||||
|
||||
self->stats.tx_bytes += skb->len;
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
|
||||
skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start,
|
||||
skb->len);
|
||||
|
@ -1652,13 +1650,13 @@ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
|
|||
|
||||
/* Check for underrrun! */
|
||||
if (inb(iobase+ASCR) & ASCR_TXUR) {
|
||||
self->stats.tx_errors++;
|
||||
self->stats.tx_fifo_errors++;
|
||||
self->netdev->stats.tx_errors++;
|
||||
self->netdev->stats.tx_fifo_errors++;
|
||||
|
||||
/* Clear bit, by writing 1 into it */
|
||||
outb(ASCR_TXUR, iobase+ASCR);
|
||||
} else {
|
||||
self->stats.tx_packets++;
|
||||
self->netdev->stats.tx_packets++;
|
||||
}
|
||||
|
||||
/* Finished with this frame, so prepare for next */
|
||||
|
@ -1793,28 +1791,28 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
|
|||
if (status & FRM_ST_ERR_MSK) {
|
||||
if (status & FRM_ST_LOST_FR) {
|
||||
/* Add number of lost frames to stats */
|
||||
self->stats.rx_errors += len;
|
||||
self->netdev->stats.rx_errors += len;
|
||||
} else {
|
||||
/* Skip frame */
|
||||
self->stats.rx_errors++;
|
||||
self->netdev->stats.rx_errors++;
|
||||
|
||||
self->rx_buff.data += len;
|
||||
|
||||
if (status & FRM_ST_MAX_LEN)
|
||||
self->stats.rx_length_errors++;
|
||||
self->netdev->stats.rx_length_errors++;
|
||||
|
||||
if (status & FRM_ST_PHY_ERR)
|
||||
self->stats.rx_frame_errors++;
|
||||
self->netdev->stats.rx_frame_errors++;
|
||||
|
||||
if (status & FRM_ST_BAD_CRC)
|
||||
self->stats.rx_crc_errors++;
|
||||
self->netdev->stats.rx_crc_errors++;
|
||||
}
|
||||
/* The errors below can be reported in both cases */
|
||||
if (status & FRM_ST_OVR1)
|
||||
self->stats.rx_fifo_errors++;
|
||||
self->netdev->stats.rx_fifo_errors++;
|
||||
|
||||
if (status & FRM_ST_OVR2)
|
||||
self->stats.rx_fifo_errors++;
|
||||
self->netdev->stats.rx_fifo_errors++;
|
||||
} else {
|
||||
/*
|
||||
* First we must make sure that the frame we
|
||||
|
@ -1863,7 +1861,7 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
|
|||
IRDA_WARNING("%s(), memory squeeze, "
|
||||
"dropping frame.\n",
|
||||
__func__);
|
||||
self->stats.rx_dropped++;
|
||||
self->netdev->stats.rx_dropped++;
|
||||
|
||||
/* Restore bank register */
|
||||
outb(bank, iobase+BSR);
|
||||
|
@ -1889,8 +1887,8 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
|
|||
|
||||
/* Move to next frame */
|
||||
self->rx_buff.data += len;
|
||||
self->stats.rx_bytes += len;
|
||||
self->stats.rx_packets++;
|
||||
self->netdev->stats.rx_bytes += len;
|
||||
self->netdev->stats.rx_packets++;
|
||||
|
||||
skb->dev = self->netdev;
|
||||
skb_reset_mac_header(skb);
|
||||
|
@ -1920,8 +1918,8 @@ static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self)
|
|||
/* Receive all characters in Rx FIFO */
|
||||
do {
|
||||
byte = inb(iobase+RXD);
|
||||
async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
|
||||
byte);
|
||||
async_unwrap_char(self->netdev, &self->netdev->stats,
|
||||
&self->rx_buff, byte);
|
||||
} while (inb(iobase+LSR) & LSR_RXDA); /* Data available */
|
||||
}
|
||||
|
||||
|
@ -1952,7 +1950,7 @@ static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir)
|
|||
self->ier = IER_TXLDL_IE;
|
||||
else {
|
||||
|
||||
self->stats.tx_packets++;
|
||||
self->netdev->stats.tx_packets++;
|
||||
netif_wake_queue(self->netdev);
|
||||
self->ier = IER_TXEMP_IE;
|
||||
}
|
||||
|
@ -2307,13 +2305,6 @@ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct nsc_ircc_cb *self = netdev_priv(dev);
|
||||
|
||||
return &self->stats;
|
||||
}
|
||||
|
||||
static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state)
|
||||
{
|
||||
struct nsc_ircc_cb *self = platform_get_drvdata(dev);
|
||||
|
|
|
@ -251,7 +251,6 @@ struct nsc_ircc_cb {
|
|||
struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
|
||||
|
||||
struct net_device *netdev; /* Yes! we are some kind of netdevice */
|
||||
struct net_device_stats stats;
|
||||
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
struct qos_info qos; /* QoS capabilities for this device */
|
||||
|
|
|
@ -108,7 +108,6 @@ struct pxa_irda {
|
|||
int txdma;
|
||||
int rxdma;
|
||||
|
||||
struct net_device_stats stats;
|
||||
struct irlap_cb *irlap;
|
||||
struct qos_info qos;
|
||||
|
||||
|
@ -258,14 +257,15 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
|
|||
data = STRBR;
|
||||
if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
|
||||
printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
|
||||
si->stats.rx_errors++;
|
||||
dev->stats.rx_errors++;
|
||||
if (lsr & LSR_FE)
|
||||
si->stats.rx_frame_errors++;
|
||||
dev->stats.rx_frame_errors++;
|
||||
if (lsr & LSR_OE)
|
||||
si->stats.rx_fifo_errors++;
|
||||
dev->stats.rx_fifo_errors++;
|
||||
} else {
|
||||
si->stats.rx_bytes++;
|
||||
async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
|
||||
dev->stats.rx_bytes++;
|
||||
async_unwrap_char(dev, &dev->stats,
|
||||
&si->rx_buff, data);
|
||||
}
|
||||
lsr = STLSR;
|
||||
}
|
||||
|
@ -277,8 +277,8 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
|
|||
|
||||
case 0x0C: /* Character Timeout Indication */
|
||||
do {
|
||||
si->stats.rx_bytes++;
|
||||
async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR);
|
||||
dev->stats.rx_bytes++;
|
||||
async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR);
|
||||
} while (STLSR & LSR_DR);
|
||||
si->last_oscr = OSCR;
|
||||
break;
|
||||
|
@ -290,9 +290,8 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
|
|||
}
|
||||
|
||||
if (si->tx_buff.len == 0) {
|
||||
si->stats.tx_packets++;
|
||||
si->stats.tx_bytes += si->tx_buff.data -
|
||||
si->tx_buff.head;
|
||||
dev->stats.tx_packets++;
|
||||
dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
|
||||
|
||||
/* We need to ensure that the transmitter has finished. */
|
||||
while ((STLSR & LSR_TEMT) == 0)
|
||||
|
@ -343,10 +342,10 @@ static void pxa_irda_fir_dma_tx_irq(int channel, void *data)
|
|||
DCSR(channel) = dcsr & ~DCSR_RUN;
|
||||
|
||||
if (dcsr & DCSR_ENDINTR) {
|
||||
si->stats.tx_packets++;
|
||||
si->stats.tx_bytes += si->dma_tx_buff_len;
|
||||
dev->stats.tx_packets++;
|
||||
dev->stats.tx_bytes += si->dma_tx_buff_len;
|
||||
} else {
|
||||
si->stats.tx_errors++;
|
||||
dev->stats.tx_errors++;
|
||||
}
|
||||
|
||||
while (ICSR1 & ICSR1_TBY)
|
||||
|
@ -392,14 +391,14 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in
|
|||
data = ICDR;
|
||||
|
||||
if (stat & (ICSR1_CRE | ICSR1_ROR)) {
|
||||
si->stats.rx_errors++;
|
||||
dev->stats.rx_errors++;
|
||||
if (stat & ICSR1_CRE) {
|
||||
printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
|
||||
si->stats.rx_crc_errors++;
|
||||
dev->stats.rx_crc_errors++;
|
||||
}
|
||||
if (stat & ICSR1_ROR) {
|
||||
printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
|
||||
si->stats.rx_over_errors++;
|
||||
dev->stats.rx_over_errors++;
|
||||
}
|
||||
} else {
|
||||
si->dma_rx_buff[len++] = data;
|
||||
|
@ -415,14 +414,14 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in
|
|||
|
||||
if (icsr0 & ICSR0_FRE) {
|
||||
printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
|
||||
si->stats.rx_dropped++;
|
||||
dev->stats.rx_dropped++;
|
||||
return;
|
||||
}
|
||||
|
||||
skb = alloc_skb(len+1,GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
|
||||
si->stats.rx_dropped++;
|
||||
dev->stats.rx_dropped++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -437,8 +436,8 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in
|
|||
skb->protocol = htons(ETH_P_IRDA);
|
||||
netif_rx(skb);
|
||||
|
||||
si->stats.rx_packets++;
|
||||
si->stats.rx_bytes += len;
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += len;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -457,10 +456,10 @@ static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
|
|||
if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
|
||||
if (icsr0 & ICSR0_FRE) {
|
||||
printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
|
||||
si->stats.rx_frame_errors++;
|
||||
dev->stats.rx_frame_errors++;
|
||||
} else {
|
||||
printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
|
||||
si->stats.rx_errors++;
|
||||
dev->stats.rx_errors++;
|
||||
}
|
||||
ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB);
|
||||
}
|
||||
|
@ -589,12 +588,6 @@ static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct net_device_stats *pxa_irda_stats(struct net_device *dev)
|
||||
{
|
||||
struct pxa_irda *si = netdev_priv(dev);
|
||||
return &si->stats;
|
||||
}
|
||||
|
||||
static void pxa_irda_startup(struct pxa_irda *si)
|
||||
{
|
||||
/* Disable STUART interrupts */
|
||||
|
@ -857,7 +850,6 @@ static int pxa_irda_probe(struct platform_device *pdev)
|
|||
dev->open = pxa_irda_start;
|
||||
dev->stop = pxa_irda_stop;
|
||||
dev->do_ioctl = pxa_irda_ioctl;
|
||||
dev->get_stats = pxa_irda_stats;
|
||||
|
||||
irda_init_max_qos_capabilies(&si->qos);
|
||||
|
||||
|
|
|
@ -60,7 +60,6 @@ struct sa1100_irda {
|
|||
dma_regs_t *txdma;
|
||||
dma_regs_t *rxdma;
|
||||
|
||||
struct net_device_stats stats;
|
||||
struct device *dev;
|
||||
struct irda_platform_data *pdata;
|
||||
struct irlap_cb *irlap;
|
||||
|
@ -375,13 +374,13 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
|
|||
data = Ser2UTDR;
|
||||
|
||||
if (stat & (UTSR1_FRE | UTSR1_ROR)) {
|
||||
si->stats.rx_errors++;
|
||||
dev->stats.rx_errors++;
|
||||
if (stat & UTSR1_FRE)
|
||||
si->stats.rx_frame_errors++;
|
||||
dev->stats.rx_frame_errors++;
|
||||
if (stat & UTSR1_ROR)
|
||||
si->stats.rx_fifo_errors++;
|
||||
dev->stats.rx_fifo_errors++;
|
||||
} else
|
||||
async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
|
||||
async_unwrap_char(dev, &dev->stats, &si->rx_buff, data);
|
||||
|
||||
status = Ser2UTSR0;
|
||||
}
|
||||
|
@ -396,9 +395,9 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
|
|||
* There are at least 4 bytes in the FIFO. Read 3 bytes
|
||||
* and leave the rest to the block below.
|
||||
*/
|
||||
async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
|
||||
async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
|
||||
async_unwrap_char(dev, &si->stats, &si->rx_buff, Ser2UTDR);
|
||||
async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
|
||||
async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
|
||||
async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR);
|
||||
}
|
||||
|
||||
if (status & (UTSR0_RFS | UTSR0_RID)) {
|
||||
|
@ -406,7 +405,7 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
|
|||
* Fifo contains more than 1 character.
|
||||
*/
|
||||
do {
|
||||
async_unwrap_char(dev, &si->stats, &si->rx_buff,
|
||||
async_unwrap_char(dev, &dev->stats, &si->rx_buff,
|
||||
Ser2UTDR);
|
||||
} while (Ser2UTSR1 & UTSR1_RNE);
|
||||
|
||||
|
@ -422,8 +421,8 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
|
|||
} while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len);
|
||||
|
||||
if (si->tx_buff.len == 0) {
|
||||
si->stats.tx_packets++;
|
||||
si->stats.tx_bytes += si->tx_buff.data -
|
||||
dev->stats.tx_packets++;
|
||||
dev->stats.tx_bytes += si->tx_buff.data -
|
||||
si->tx_buff.head;
|
||||
|
||||
/*
|
||||
|
@ -482,11 +481,11 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
|
|||
data = Ser2HSDR;
|
||||
|
||||
if (stat & (HSSR1_CRE | HSSR1_ROR)) {
|
||||
si->stats.rx_errors++;
|
||||
dev->stats.rx_errors++;
|
||||
if (stat & HSSR1_CRE)
|
||||
si->stats.rx_crc_errors++;
|
||||
dev->stats.rx_crc_errors++;
|
||||
if (stat & HSSR1_ROR)
|
||||
si->stats.rx_frame_errors++;
|
||||
dev->stats.rx_frame_errors++;
|
||||
} else
|
||||
skb->data[len++] = data;
|
||||
|
||||
|
@ -505,8 +504,8 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
|
|||
skb->dev = dev;
|
||||
skb_reset_mac_header(skb);
|
||||
skb->protocol = htons(ETH_P_IRDA);
|
||||
si->stats.rx_packets++;
|
||||
si->stats.rx_bytes += len;
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += len;
|
||||
|
||||
/*
|
||||
* Before we pass the buffer up, allocate a new one.
|
||||
|
@ -545,10 +544,10 @@ static void sa1100_irda_fir_irq(struct net_device *dev)
|
|||
* from the fifo.
|
||||
*/
|
||||
if (Ser2HSSR0 & (HSSR0_FRE | HSSR0_RAB)) {
|
||||
si->stats.rx_errors++;
|
||||
dev->stats.rx_errors++;
|
||||
|
||||
if (Ser2HSSR0 & HSSR0_FRE)
|
||||
si->stats.rx_frame_errors++;
|
||||
dev->stats.rx_frame_errors++;
|
||||
|
||||
/*
|
||||
* Clear out the DMA...
|
||||
|
@ -633,8 +632,8 @@ static void sa1100_irda_txdma_irq(void *id)
|
|||
*/
|
||||
if (skb) {
|
||||
dma_unmap_single(si->dev, si->txbuf_dma, skb->len, DMA_TO_DEVICE);
|
||||
si->stats.tx_packets ++;
|
||||
si->stats.tx_bytes += skb->len;
|
||||
dev->stats.tx_packets ++;
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
dev_kfree_skb_irq(skb);
|
||||
}
|
||||
|
||||
|
@ -762,12 +761,6 @@ sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct net_device_stats *sa1100_irda_stats(struct net_device *dev)
|
||||
{
|
||||
struct sa1100_irda *si = netdev_priv(dev);
|
||||
return &si->stats;
|
||||
}
|
||||
|
||||
static int sa1100_irda_start(struct net_device *dev)
|
||||
{
|
||||
struct sa1100_irda *si = netdev_priv(dev);
|
||||
|
@ -924,7 +917,6 @@ static int sa1100_irda_probe(struct platform_device *pdev)
|
|||
dev->open = sa1100_irda_start;
|
||||
dev->stop = sa1100_irda_stop;
|
||||
dev->do_ioctl = sa1100_irda_ioctl;
|
||||
dev->get_stats = sa1100_irda_stats;
|
||||
dev->irq = IRQ_Ser2ICP;
|
||||
|
||||
irda_init_max_qos_capabilies(&si->qos);
|
||||
|
|
|
@ -160,7 +160,6 @@ static inline int sirdev_schedule_mode(struct sir_dev *dev, int mode)
|
|||
|
||||
struct sir_dev {
|
||||
struct net_device *netdev;
|
||||
struct net_device_stats stats;
|
||||
|
||||
struct irlap_cb *irlap;
|
||||
|
||||
|
|
|
@ -455,8 +455,8 @@ void sirdev_write_complete(struct sir_dev *dev)
|
|||
if ((skb=dev->tx_skb) != NULL) {
|
||||
dev->tx_skb = NULL;
|
||||
dev_kfree_skb_any(skb);
|
||||
dev->stats.tx_errors++;
|
||||
dev->stats.tx_dropped++;
|
||||
dev->netdev->stats.tx_errors++;
|
||||
dev->netdev->stats.tx_dropped++;
|
||||
}
|
||||
dev->tx_buff.len = 0;
|
||||
}
|
||||
|
@ -493,8 +493,8 @@ void sirdev_write_complete(struct sir_dev *dev)
|
|||
|
||||
if ((skb=dev->tx_skb) != NULL) {
|
||||
dev->tx_skb = NULL;
|
||||
dev->stats.tx_packets++;
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
dev->netdev->stats.tx_packets++;
|
||||
dev->netdev->stats.tx_bytes += skb->len;
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
|
@ -548,7 +548,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
|
|||
* just update stats and set media busy
|
||||
*/
|
||||
irda_device_set_media_busy(dev->netdev, TRUE);
|
||||
dev->stats.rx_dropped++;
|
||||
dev->netdev->stats.rx_dropped++;
|
||||
IRDA_DEBUG(0, "%s; rx-drop: %zd\n", __func__, count);
|
||||
return 0;
|
||||
}
|
||||
|
@ -557,7 +557,7 @@ int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count)
|
|||
if (likely(atomic_read(&dev->enable_rx))) {
|
||||
while (count--)
|
||||
/* Unwrap and destuff one byte */
|
||||
async_unwrap_char(dev->netdev, &dev->stats,
|
||||
async_unwrap_char(dev->netdev, &dev->netdev->stats,
|
||||
&dev->rx_buff, *cp++);
|
||||
} else {
|
||||
while (count--) {
|
||||
|
@ -582,13 +582,6 @@ EXPORT_SYMBOL(sirdev_receive);
|
|||
|
||||
/* callbacks from network layer */
|
||||
|
||||
static struct net_device_stats *sirdev_get_stats(struct net_device *ndev)
|
||||
{
|
||||
struct sir_dev *dev = netdev_priv(ndev);
|
||||
|
||||
return (dev) ? &dev->stats : NULL;
|
||||
}
|
||||
|
||||
static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
{
|
||||
struct sir_dev *dev = netdev_priv(ndev);
|
||||
|
@ -654,7 +647,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
*/
|
||||
atomic_set(&dev->enable_rx, 0);
|
||||
if (unlikely(sirdev_is_receiving(dev)))
|
||||
dev->stats.collisions++;
|
||||
dev->netdev->stats.collisions++;
|
||||
|
||||
actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len);
|
||||
|
||||
|
@ -669,8 +662,8 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
IRDA_ERROR("%s: drv->do_write failed (%d)\n",
|
||||
__func__, actual);
|
||||
dev_kfree_skb_any(skb);
|
||||
dev->stats.tx_errors++;
|
||||
dev->stats.tx_dropped++;
|
||||
dev->netdev->stats.tx_errors++;
|
||||
dev->netdev->stats.tx_dropped++;
|
||||
netif_wake_queue(ndev);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->tx_lock, flags);
|
||||
|
@ -918,7 +911,6 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
|
|||
ndev->hard_start_xmit = sirdev_hard_xmit;
|
||||
ndev->open = sirdev_open;
|
||||
ndev->stop = sirdev_close;
|
||||
ndev->get_stats = sirdev_get_stats;
|
||||
ndev->do_ioctl = sirdev_ioctl;
|
||||
|
||||
if (register_netdev(ndev)) {
|
||||
|
|
|
@ -150,7 +150,6 @@ struct smsc_chip_address {
|
|||
/* Private data for each instance */
|
||||
struct smsc_ircc_cb {
|
||||
struct net_device *netdev; /* Yes! we are some kind of netdevice */
|
||||
struct net_device_stats stats;
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
|
||||
chipio_t io; /* IrDA controller information */
|
||||
|
@ -215,7 +214,6 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cm
|
|||
#if SMSC_IRCC2_C_NET_TIMEOUT
|
||||
static void smsc_ircc_timeout(struct net_device *dev);
|
||||
#endif
|
||||
static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev);
|
||||
static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self);
|
||||
static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self);
|
||||
static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed);
|
||||
|
@ -529,7 +527,6 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
|
|||
dev->open = smsc_ircc_net_open;
|
||||
dev->stop = smsc_ircc_net_close;
|
||||
dev->do_ioctl = smsc_ircc_net_ioctl;
|
||||
dev->get_stats = smsc_ircc_net_get_stats;
|
||||
|
||||
self = netdev_priv(dev);
|
||||
self->netdev = dev;
|
||||
|
@ -834,13 +831,6 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct smsc_ircc_cb *self = netdev_priv(dev);
|
||||
|
||||
return &self->stats;
|
||||
}
|
||||
|
||||
#if SMSC_IRCC2_C_NET_TIMEOUT
|
||||
/*
|
||||
* Function smsc_ircc_timeout (struct net_device *dev)
|
||||
|
@ -920,7 +910,7 @@ static int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
|
|||
self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
|
||||
self->tx_buff.truesize);
|
||||
|
||||
self->stats.tx_bytes += self->tx_buff.len;
|
||||
dev->stats.tx_bytes += self->tx_buff.len;
|
||||
|
||||
/* Turn on transmit finished interrupt. Will fire immediately! */
|
||||
outb(UART_IER_THRI, self->io.sir_base + UART_IER);
|
||||
|
@ -1320,16 +1310,16 @@ static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self)
|
|||
/* Check for underrun! */
|
||||
register_bank(iobase, 0);
|
||||
if (inb(iobase + IRCC_LSR) & IRCC_LSR_UNDERRUN) {
|
||||
self->stats.tx_errors++;
|
||||
self->stats.tx_fifo_errors++;
|
||||
self->netdev->stats.tx_errors++;
|
||||
self->netdev->stats.tx_fifo_errors++;
|
||||
|
||||
/* Reset error condition */
|
||||
register_bank(iobase, 0);
|
||||
outb(IRCC_MASTER_ERROR_RESET, iobase + IRCC_MASTER);
|
||||
outb(0x00, iobase + IRCC_MASTER);
|
||||
} else {
|
||||
self->stats.tx_packets++;
|
||||
self->stats.tx_bytes += self->tx_buff.len;
|
||||
self->netdev->stats.tx_packets++;
|
||||
self->netdev->stats.tx_bytes += self->tx_buff.len;
|
||||
}
|
||||
|
||||
/* Check if it's time to change the speed */
|
||||
|
@ -1429,15 +1419,15 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
|
|||
|
||||
/* Look for errors */
|
||||
if (lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {
|
||||
self->stats.rx_errors++;
|
||||
self->netdev->stats.rx_errors++;
|
||||
if (lsr & IRCC_LSR_FRAME_ERROR)
|
||||
self->stats.rx_frame_errors++;
|
||||
self->netdev->stats.rx_frame_errors++;
|
||||
if (lsr & IRCC_LSR_CRC_ERROR)
|
||||
self->stats.rx_crc_errors++;
|
||||
self->netdev->stats.rx_crc_errors++;
|
||||
if (lsr & IRCC_LSR_SIZE_ERROR)
|
||||
self->stats.rx_length_errors++;
|
||||
self->netdev->stats.rx_length_errors++;
|
||||
if (lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN))
|
||||
self->stats.rx_length_errors++;
|
||||
self->netdev->stats.rx_length_errors++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1460,8 +1450,8 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
|
|||
skb_reserve(skb, 1);
|
||||
|
||||
memcpy(skb_put(skb, len), self->rx_buff.data, len);
|
||||
self->stats.rx_packets++;
|
||||
self->stats.rx_bytes += len;
|
||||
self->netdev->stats.rx_packets++;
|
||||
self->netdev->stats.rx_bytes += len;
|
||||
|
||||
skb->dev = self->netdev;
|
||||
skb_reset_mac_header(skb);
|
||||
|
@ -1489,7 +1479,7 @@ static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
|
|||
* async_unwrap_char will deliver all found frames
|
||||
*/
|
||||
do {
|
||||
async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
|
||||
async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
|
||||
inb(iobase + UART_RX));
|
||||
|
||||
/* Make sure we don't stay here to long */
|
||||
|
@ -1992,7 +1982,7 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
|
|||
/* Tell network layer that we want more frames */
|
||||
netif_wake_queue(self->netdev);
|
||||
}
|
||||
self->stats.tx_packets++;
|
||||
self->netdev->stats.tx_packets++;
|
||||
|
||||
if (self->io.speed <= 115200) {
|
||||
/*
|
||||
|
|
|
@ -164,7 +164,7 @@ struct stir_cb {
|
|||
struct usb_device *usbdev; /* init: probe_irda */
|
||||
struct net_device *netdev; /* network layer */
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
struct net_device_stats stats; /* network statistics */
|
||||
|
||||
struct qos_info qos;
|
||||
unsigned speed; /* Current speed */
|
||||
|
||||
|
@ -323,16 +323,16 @@ static void fir_eof(struct stir_cb *stir)
|
|||
pr_debug("%s: short frame len %d\n",
|
||||
stir->netdev->name, len);
|
||||
|
||||
++stir->stats.rx_errors;
|
||||
++stir->stats.rx_length_errors;
|
||||
++stir->netdev->stats.rx_errors;
|
||||
++stir->netdev->stats.rx_length_errors;
|
||||
return;
|
||||
}
|
||||
|
||||
fcs = ~(crc32_le(~0, rx_buff->data, len));
|
||||
if (fcs != get_unaligned_le32(rx_buff->data + len)) {
|
||||
pr_debug("crc error calc 0x%x len %d\n", fcs, len);
|
||||
stir->stats.rx_errors++;
|
||||
stir->stats.rx_crc_errors++;
|
||||
stir->netdev->stats.rx_errors++;
|
||||
stir->netdev->stats.rx_crc_errors++;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -340,7 +340,7 @@ static void fir_eof(struct stir_cb *stir)
|
|||
if (len < IRDA_RX_COPY_THRESHOLD) {
|
||||
nskb = dev_alloc_skb(len + 1);
|
||||
if (unlikely(!nskb)) {
|
||||
++stir->stats.rx_dropped;
|
||||
++stir->netdev->stats.rx_dropped;
|
||||
return;
|
||||
}
|
||||
skb_reserve(nskb, 1);
|
||||
|
@ -349,7 +349,7 @@ static void fir_eof(struct stir_cb *stir)
|
|||
} else {
|
||||
nskb = dev_alloc_skb(rx_buff->truesize);
|
||||
if (unlikely(!nskb)) {
|
||||
++stir->stats.rx_dropped;
|
||||
++stir->netdev->stats.rx_dropped;
|
||||
return;
|
||||
}
|
||||
skb_reserve(nskb, 1);
|
||||
|
@ -366,8 +366,8 @@ static void fir_eof(struct stir_cb *stir)
|
|||
|
||||
netif_rx(skb);
|
||||
|
||||
stir->stats.rx_packets++;
|
||||
stir->stats.rx_bytes += len;
|
||||
stir->netdev->stats.rx_packets++;
|
||||
stir->netdev->stats.rx_bytes += len;
|
||||
|
||||
rx_buff->data = rx_buff->head;
|
||||
rx_buff->len = 0;
|
||||
|
@ -437,7 +437,7 @@ static void stir_fir_chars(struct stir_cb *stir,
|
|||
if (unlikely(rx_buff->len >= rx_buff->truesize)) {
|
||||
pr_debug("%s: fir frame exceeds %d\n",
|
||||
stir->netdev->name, rx_buff->truesize);
|
||||
++stir->stats.rx_over_errors;
|
||||
++stir->netdev->stats.rx_over_errors;
|
||||
goto error_recovery;
|
||||
}
|
||||
|
||||
|
@ -445,10 +445,10 @@ static void stir_fir_chars(struct stir_cb *stir,
|
|||
continue;
|
||||
|
||||
frame_error:
|
||||
++stir->stats.rx_frame_errors;
|
||||
++stir->netdev->stats.rx_frame_errors;
|
||||
|
||||
error_recovery:
|
||||
++stir->stats.rx_errors;
|
||||
++stir->netdev->stats.rx_errors;
|
||||
rx_buff->state = OUTSIDE_FRAME;
|
||||
rx_buff->in_frame = FALSE;
|
||||
}
|
||||
|
@ -461,7 +461,7 @@ static void stir_sir_chars(struct stir_cb *stir,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
async_unwrap_char(stir->netdev, &stir->stats,
|
||||
async_unwrap_char(stir->netdev, &stir->netdev->stats,
|
||||
&stir->rx_buff, bytes[i]);
|
||||
}
|
||||
|
||||
|
@ -692,7 +692,7 @@ static void receive_stop(struct stir_cb *stir)
|
|||
usb_kill_urb(stir->rx_urb);
|
||||
|
||||
if (stir->rx_buff.in_frame)
|
||||
stir->stats.collisions++;
|
||||
stir->netdev->stats.collisions++;
|
||||
}
|
||||
/*
|
||||
* Wrap data in socket buffer and send it.
|
||||
|
@ -718,15 +718,15 @@ static void stir_send(struct stir_cb *stir, struct sk_buff *skb)
|
|||
if (!first_frame)
|
||||
fifo_txwait(stir, wraplen);
|
||||
|
||||
stir->stats.tx_packets++;
|
||||
stir->stats.tx_bytes += skb->len;
|
||||
stir->netdev->stats.tx_packets++;
|
||||
stir->netdev->stats.tx_bytes += skb->len;
|
||||
stir->netdev->trans_start = jiffies;
|
||||
pr_debug("send %d (%d)\n", skb->len, wraplen);
|
||||
|
||||
if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1),
|
||||
stir->io_buf, wraplen,
|
||||
NULL, TRANSMIT_TIMEOUT))
|
||||
stir->stats.tx_errors++;
|
||||
stir->netdev->stats.tx_errors++;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1007,15 +1007,6 @@ static int stir_net_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get device stats (for /proc/net/dev and ifconfig)
|
||||
*/
|
||||
static struct net_device_stats *stir_net_get_stats(struct net_device *netdev)
|
||||
{
|
||||
struct stir_cb *stir = netdev_priv(netdev);
|
||||
return &stir->stats;
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine is called by the USB subsystem for each new device
|
||||
* in the system. We need to check if the device is ours, and in
|
||||
|
@ -1066,7 +1057,6 @@ static int stir_probe(struct usb_interface *intf,
|
|||
net->hard_start_xmit = stir_hard_xmit;
|
||||
net->open = stir_net_open;
|
||||
net->stop = stir_net_close;
|
||||
net->get_stats = stir_net_get_stats;
|
||||
net->do_ioctl = stir_net_ioctl;
|
||||
|
||||
ret = register_netdev(net);
|
||||
|
|
|
@ -101,8 +101,6 @@ static int via_ircc_net_open(struct net_device *dev);
|
|||
static int via_ircc_net_close(struct net_device *dev);
|
||||
static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
|
||||
int cmd);
|
||||
static struct net_device_stats *via_ircc_net_get_stats(struct net_device
|
||||
*dev);
|
||||
static void via_ircc_change_dongle_speed(int iobase, int speed,
|
||||
int dongle_id);
|
||||
static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
|
||||
|
@ -434,7 +432,6 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
|
|||
dev->open = via_ircc_net_open;
|
||||
dev->stop = via_ircc_net_close;
|
||||
dev->do_ioctl = via_ircc_net_ioctl;
|
||||
dev->get_stats = via_ircc_net_get_stats;
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err)
|
||||
|
@ -855,7 +852,7 @@ static int via_ircc_hard_xmit_sir(struct sk_buff *skb,
|
|||
async_wrap_skb(skb, self->tx_buff.data,
|
||||
self->tx_buff.truesize);
|
||||
|
||||
self->stats.tx_bytes += self->tx_buff.len;
|
||||
dev->stats.tx_bytes += self->tx_buff.len;
|
||||
/* Send this frame with old speed */
|
||||
SetBaudRate(iobase, self->io.speed);
|
||||
SetPulseWidth(iobase, 12);
|
||||
|
@ -921,7 +918,7 @@ static int via_ircc_hard_xmit_fir(struct sk_buff *skb,
|
|||
self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
|
||||
|
||||
self->tx_fifo.tail += skb->len;
|
||||
self->stats.tx_bytes += skb->len;
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
skb_copy_from_linear_data(skb,
|
||||
self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
|
||||
self->tx_fifo.len++;
|
||||
|
@ -990,12 +987,12 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
|
|||
/* Clear bit, by writing 1 into it */
|
||||
Tx_status = GetTXStatus(iobase);
|
||||
if (Tx_status & 0x08) {
|
||||
self->stats.tx_errors++;
|
||||
self->stats.tx_fifo_errors++;
|
||||
self->netdev->stats.tx_errors++;
|
||||
self->netdev->stats.tx_fifo_errors++;
|
||||
hwreset(self);
|
||||
// how to clear underrrun ?
|
||||
} else {
|
||||
self->stats.tx_packets++;
|
||||
self->netdev->stats.tx_packets++;
|
||||
ResetChip(iobase, 3);
|
||||
ResetChip(iobase, 4);
|
||||
}
|
||||
|
@ -1119,8 +1116,8 @@ static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
|
|||
}
|
||||
// Move to next frame
|
||||
self->rx_buff.data += len;
|
||||
self->stats.rx_bytes += len;
|
||||
self->stats.rx_packets++;
|
||||
self->netdev->stats.rx_bytes += len;
|
||||
self->netdev->stats.rx_packets++;
|
||||
skb->dev = self->netdev;
|
||||
skb_reset_mac_header(skb);
|
||||
skb->protocol = htons(ETH_P_IRDA);
|
||||
|
@ -1180,7 +1177,7 @@ F01_E */
|
|||
*/
|
||||
if ((skb == NULL) || (skb->data == NULL)
|
||||
|| (self->rx_buff.data == NULL) || (len < 6)) {
|
||||
self->stats.rx_dropped++;
|
||||
self->netdev->stats.rx_dropped++;
|
||||
return TRUE;
|
||||
}
|
||||
skb_reserve(skb, 1);
|
||||
|
@ -1192,8 +1189,8 @@ F01_E */
|
|||
|
||||
// Move to next frame
|
||||
self->rx_buff.data += len;
|
||||
self->stats.rx_bytes += len;
|
||||
self->stats.rx_packets++;
|
||||
self->netdev->stats.rx_bytes += len;
|
||||
self->netdev->stats.rx_packets++;
|
||||
skb->dev = self->netdev;
|
||||
skb_reset_mac_header(skb);
|
||||
skb->protocol = htons(ETH_P_IRDA);
|
||||
|
@ -1220,13 +1217,13 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
|
|||
IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
|
||||
|
||||
if ((len - 4) < 2) {
|
||||
self->stats.rx_dropped++;
|
||||
self->netdev->stats.rx_dropped++;
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
skb = dev_alloc_skb(len + 1);
|
||||
if (skb == NULL) {
|
||||
self->stats.rx_dropped++;
|
||||
self->netdev->stats.rx_dropped++;
|
||||
return FALSE;
|
||||
}
|
||||
skb_reserve(skb, 1);
|
||||
|
@ -1238,8 +1235,8 @@ static int upload_rxdata(struct via_ircc_cb *self, int iobase)
|
|||
st_fifo->tail = 0;
|
||||
// Move to next frame
|
||||
self->rx_buff.data += len;
|
||||
self->stats.rx_bytes += len;
|
||||
self->stats.rx_packets++;
|
||||
self->netdev->stats.rx_bytes += len;
|
||||
self->netdev->stats.rx_packets++;
|
||||
skb->dev = self->netdev;
|
||||
skb_reset_mac_header(skb);
|
||||
skb->protocol = htons(ETH_P_IRDA);
|
||||
|
@ -1295,7 +1292,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
|
|||
*/
|
||||
if ((skb == NULL) || (skb->data == NULL)
|
||||
|| (self->rx_buff.data == NULL) || (len < 6)) {
|
||||
self->stats.rx_dropped++;
|
||||
self->netdev->stats.rx_dropped++;
|
||||
continue;
|
||||
}
|
||||
skb_reserve(skb, 1);
|
||||
|
@ -1307,8 +1304,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
|
|||
|
||||
// Move to next frame
|
||||
self->rx_buff.data += len;
|
||||
self->stats.rx_bytes += len;
|
||||
self->stats.rx_packets++;
|
||||
self->netdev->stats.rx_bytes += len;
|
||||
self->netdev->stats.rx_packets++;
|
||||
skb->dev = self->netdev;
|
||||
skb_reset_mac_header(skb);
|
||||
skb->protocol = htons(ETH_P_IRDA);
|
||||
|
@ -1523,7 +1520,7 @@ static int via_ircc_net_open(struct net_device *dev)
|
|||
|
||||
IRDA_ASSERT(dev != NULL, return -1;);
|
||||
self = netdev_priv(dev);
|
||||
self->stats.rx_packets = 0;
|
||||
dev->stats.rx_packets = 0;
|
||||
IRDA_ASSERT(self != NULL, return 0;);
|
||||
iobase = self->io.fir_base;
|
||||
if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
|
||||
|
@ -1660,14 +1657,6 @@ static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct net_device_stats *via_ircc_net_get_stats(struct net_device
|
||||
*dev)
|
||||
{
|
||||
struct via_ircc_cb *self = netdev_priv(dev);
|
||||
|
||||
return &self->stats;
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("VIA Technologies,inc");
|
||||
MODULE_DESCRIPTION("VIA IrDA Device Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -95,7 +95,6 @@ struct via_ircc_cb {
|
|||
struct tx_fifo tx_fifo; /* Info about frames to be transmitted */
|
||||
|
||||
struct net_device *netdev; /* Yes! we are some kind of netdevice */
|
||||
struct net_device_stats stats;
|
||||
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
struct qos_info qos; /* QoS capabilities for this device */
|
||||
|
|
|
@ -291,14 +291,14 @@ static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
|
|||
now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);
|
||||
|
||||
seq_printf(seq, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
|
||||
idev->stats.rx_packets, idev->stats.rx_bytes, idev->stats.rx_errors,
|
||||
idev->stats.rx_dropped);
|
||||
ndev->stats.rx_packets, ndev->stats.rx_bytes, ndev->stats.rx_errors,
|
||||
ndev->stats.rx_dropped);
|
||||
seq_printf(seq, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n",
|
||||
idev->stats.rx_over_errors, idev->stats.rx_length_errors,
|
||||
idev->stats.rx_frame_errors, idev->stats.rx_crc_errors);
|
||||
ndev->stats.rx_over_errors, ndev->stats.rx_length_errors,
|
||||
ndev->stats.rx_frame_errors, ndev->stats.rx_crc_errors);
|
||||
seq_printf(seq, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n",
|
||||
idev->stats.tx_packets, idev->stats.tx_bytes, idev->stats.tx_errors,
|
||||
idev->stats.tx_dropped, idev->stats.tx_fifo_errors);
|
||||
ndev->stats.tx_packets, ndev->stats.tx_bytes, ndev->stats.tx_errors,
|
||||
ndev->stats.tx_dropped, ndev->stats.tx_fifo_errors);
|
||||
|
||||
}
|
||||
|
||||
|
@ -651,21 +651,21 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
|
|||
|
||||
if (ret < 0) {
|
||||
ret = -ret;
|
||||
idev->stats.rx_errors++;
|
||||
ndev->stats.rx_errors++;
|
||||
if (ret & VLSI_RX_DROP)
|
||||
idev->stats.rx_dropped++;
|
||||
ndev->stats.rx_dropped++;
|
||||
if (ret & VLSI_RX_OVER)
|
||||
idev->stats.rx_over_errors++;
|
||||
ndev->stats.rx_over_errors++;
|
||||
if (ret & VLSI_RX_LENGTH)
|
||||
idev->stats.rx_length_errors++;
|
||||
ndev->stats.rx_length_errors++;
|
||||
if (ret & VLSI_RX_FRAME)
|
||||
idev->stats.rx_frame_errors++;
|
||||
ndev->stats.rx_frame_errors++;
|
||||
if (ret & VLSI_RX_CRC)
|
||||
idev->stats.rx_crc_errors++;
|
||||
ndev->stats.rx_crc_errors++;
|
||||
}
|
||||
else if (ret > 0) {
|
||||
idev->stats.rx_packets++;
|
||||
idev->stats.rx_bytes += ret;
|
||||
ndev->stats.rx_packets++;
|
||||
ndev->stats.rx_bytes += ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -686,6 +686,7 @@ static void vlsi_rx_interrupt(struct net_device *ndev)
|
|||
|
||||
static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
|
||||
{
|
||||
struct net_device *ndev = pci_get_drvdata(idev->pdev);
|
||||
struct vlsi_ring *r = idev->rx_ring;
|
||||
struct ring_descr *rd;
|
||||
int ret;
|
||||
|
@ -711,21 +712,21 @@ static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
|
|||
|
||||
if (ret < 0) {
|
||||
ret = -ret;
|
||||
idev->stats.rx_errors++;
|
||||
ndev->stats.rx_errors++;
|
||||
if (ret & VLSI_RX_DROP)
|
||||
idev->stats.rx_dropped++;
|
||||
ndev->stats.rx_dropped++;
|
||||
if (ret & VLSI_RX_OVER)
|
||||
idev->stats.rx_over_errors++;
|
||||
ndev->stats.rx_over_errors++;
|
||||
if (ret & VLSI_RX_LENGTH)
|
||||
idev->stats.rx_length_errors++;
|
||||
ndev->stats.rx_length_errors++;
|
||||
if (ret & VLSI_RX_FRAME)
|
||||
idev->stats.rx_frame_errors++;
|
||||
ndev->stats.rx_frame_errors++;
|
||||
if (ret & VLSI_RX_CRC)
|
||||
idev->stats.rx_crc_errors++;
|
||||
ndev->stats.rx_crc_errors++;
|
||||
}
|
||||
else if (ret > 0) {
|
||||
idev->stats.rx_packets++;
|
||||
idev->stats.rx_bytes += ret;
|
||||
ndev->stats.rx_packets++;
|
||||
ndev->stats.rx_bytes += ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1050,8 +1051,8 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
drop:
|
||||
IRDA_WARNING("%s: dropping packet - %s\n", __func__, msg);
|
||||
dev_kfree_skb_any(skb);
|
||||
idev->stats.tx_errors++;
|
||||
idev->stats.tx_dropped++;
|
||||
ndev->stats.tx_errors++;
|
||||
ndev->stats.tx_dropped++;
|
||||
/* Don't even think about returning NET_XMIT_DROP (=1) here!
|
||||
* In fact any retval!=0 causes the packet scheduler to requeue the
|
||||
* packet for later retry of transmission - which isn't exactly
|
||||
|
@ -1078,15 +1079,15 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
|
|||
|
||||
if (ret < 0) {
|
||||
ret = -ret;
|
||||
idev->stats.tx_errors++;
|
||||
ndev->stats.tx_errors++;
|
||||
if (ret & VLSI_TX_DROP)
|
||||
idev->stats.tx_dropped++;
|
||||
ndev->stats.tx_dropped++;
|
||||
if (ret & VLSI_TX_FIFO)
|
||||
idev->stats.tx_fifo_errors++;
|
||||
ndev->stats.tx_fifo_errors++;
|
||||
}
|
||||
else if (ret > 0){
|
||||
idev->stats.tx_packets++;
|
||||
idev->stats.tx_bytes += ret;
|
||||
ndev->stats.tx_packets++;
|
||||
ndev->stats.tx_bytes += ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1122,6 +1123,7 @@ static void vlsi_tx_interrupt(struct net_device *ndev)
|
|||
|
||||
static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
|
||||
{
|
||||
struct net_device *ndev = pci_get_drvdata(idev->pdev);
|
||||
struct vlsi_ring *r = idev->tx_ring;
|
||||
struct ring_descr *rd;
|
||||
int ret;
|
||||
|
@ -1145,15 +1147,15 @@ static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
|
|||
|
||||
if (ret < 0) {
|
||||
ret = -ret;
|
||||
idev->stats.tx_errors++;
|
||||
ndev->stats.tx_errors++;
|
||||
if (ret & VLSI_TX_DROP)
|
||||
idev->stats.tx_dropped++;
|
||||
ndev->stats.tx_dropped++;
|
||||
if (ret & VLSI_TX_FIFO)
|
||||
idev->stats.tx_fifo_errors++;
|
||||
ndev->stats.tx_fifo_errors++;
|
||||
}
|
||||
else if (ret > 0){
|
||||
idev->stats.tx_packets++;
|
||||
idev->stats.tx_bytes += ret;
|
||||
ndev->stats.tx_packets++;
|
||||
ndev->stats.tx_bytes += ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1373,13 +1375,6 @@ static int vlsi_stop_hw(vlsi_irda_dev_t *idev)
|
|||
|
||||
/**************************************************************/
|
||||
|
||||
static struct net_device_stats * vlsi_get_stats(struct net_device *ndev)
|
||||
{
|
||||
vlsi_irda_dev_t *idev = netdev_priv(ndev);
|
||||
|
||||
return &idev->stats;
|
||||
}
|
||||
|
||||
static void vlsi_tx_timeout(struct net_device *ndev)
|
||||
{
|
||||
vlsi_irda_dev_t *idev = netdev_priv(ndev);
|
||||
|
@ -1615,7 +1610,6 @@ static int vlsi_irda_init(struct net_device *ndev)
|
|||
|
||||
ndev->open = vlsi_open;
|
||||
ndev->stop = vlsi_close;
|
||||
ndev->get_stats = vlsi_get_stats;
|
||||
ndev->hard_start_xmit = vlsi_hard_start_xmit;
|
||||
ndev->do_ioctl = vlsi_ioctl;
|
||||
ndev->tx_timeout = vlsi_tx_timeout;
|
||||
|
|
|
@ -712,7 +712,6 @@ static inline struct ring_descr *ring_get(struct vlsi_ring *r)
|
|||
|
||||
typedef struct vlsi_irda_dev {
|
||||
struct pci_dev *pdev;
|
||||
struct net_device_stats stats;
|
||||
|
||||
struct irlap_cb *irlap;
|
||||
|
||||
|
|
|
@ -102,7 +102,6 @@ static int w83977af_is_receiving(struct w83977af_ir *self);
|
|||
static int w83977af_net_open(struct net_device *dev);
|
||||
static int w83977af_net_close(struct net_device *dev);
|
||||
static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
||||
static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev);
|
||||
|
||||
/*
|
||||
* Function w83977af_init ()
|
||||
|
@ -237,7 +236,6 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
|
|||
dev->open = w83977af_net_open;
|
||||
dev->stop = w83977af_net_close;
|
||||
dev->do_ioctl = w83977af_net_ioctl;
|
||||
dev->get_stats = w83977af_net_get_stats;
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
|
@ -702,13 +700,13 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
|
|||
if (inb(iobase+AUDR) & AUDR_UNDR) {
|
||||
IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
|
||||
|
||||
self->stats.tx_errors++;
|
||||
self->stats.tx_fifo_errors++;
|
||||
self->netdev->stats.tx_errors++;
|
||||
self->netdev->stats.tx_fifo_errors++;
|
||||
|
||||
/* Clear bit, by writing 1 to it */
|
||||
outb(AUDR_UNDR, iobase+AUDR);
|
||||
} else
|
||||
self->stats.tx_packets++;
|
||||
self->netdev->stats.tx_packets++;
|
||||
|
||||
|
||||
if (self->new_speed) {
|
||||
|
@ -846,28 +844,28 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
|
|||
if (status & FS_FO_ERR_MSK) {
|
||||
if (status & FS_FO_LST_FR) {
|
||||
/* Add number of lost frames to stats */
|
||||
self->stats.rx_errors += len;
|
||||
self->netdev->stats.rx_errors += len;
|
||||
} else {
|
||||
/* Skip frame */
|
||||
self->stats.rx_errors++;
|
||||
self->netdev->stats.rx_errors++;
|
||||
|
||||
self->rx_buff.data += len;
|
||||
|
||||
if (status & FS_FO_MX_LEX)
|
||||
self->stats.rx_length_errors++;
|
||||
self->netdev->stats.rx_length_errors++;
|
||||
|
||||
if (status & FS_FO_PHY_ERR)
|
||||
self->stats.rx_frame_errors++;
|
||||
self->netdev->stats.rx_frame_errors++;
|
||||
|
||||
if (status & FS_FO_CRC_ERR)
|
||||
self->stats.rx_crc_errors++;
|
||||
self->netdev->stats.rx_crc_errors++;
|
||||
}
|
||||
/* The errors below can be reported in both cases */
|
||||
if (status & FS_FO_RX_OV)
|
||||
self->stats.rx_fifo_errors++;
|
||||
self->netdev->stats.rx_fifo_errors++;
|
||||
|
||||
if (status & FS_FO_FSF_OV)
|
||||
self->stats.rx_fifo_errors++;
|
||||
self->netdev->stats.rx_fifo_errors++;
|
||||
|
||||
} else {
|
||||
/* Check if we have transferred all data to memory */
|
||||
|
@ -917,7 +915,7 @@ static int w83977af_dma_receive_complete(struct w83977af_ir *self)
|
|||
|
||||
/* Move to next frame */
|
||||
self->rx_buff.data += len;
|
||||
self->stats.rx_packets++;
|
||||
self->netdev->stats.rx_packets++;
|
||||
|
||||
skb->dev = self->netdev;
|
||||
skb_reset_mac_header(skb);
|
||||
|
@ -951,7 +949,7 @@ static void w83977af_pio_receive(struct w83977af_ir *self)
|
|||
/* Receive all characters in Rx FIFO */
|
||||
do {
|
||||
byte = inb(iobase+RBR);
|
||||
async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
|
||||
async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
|
||||
byte);
|
||||
} while (inb(iobase+USR) & USR_RDR); /* Data available */
|
||||
}
|
||||
|
@ -994,7 +992,7 @@ static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
|
|||
outb(AUDR_SFEND, iobase+AUDR);
|
||||
outb(set, iobase+SSR);
|
||||
|
||||
self->stats.tx_packets++;
|
||||
self->netdev->stats.tx_packets++;
|
||||
|
||||
/* Feed me more packets */
|
||||
netif_wake_queue(self->netdev);
|
||||
|
@ -1336,13 +1334,6 @@ static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct w83977af_ir *self = netdev_priv(dev);
|
||||
|
||||
return &self->stats;
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
|
||||
MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
|
@ -172,7 +172,6 @@ struct w83977af_ir {
|
|||
int tx_len; /* Number of frames in tx_buff */
|
||||
|
||||
struct net_device *netdev; /* Yes! we are some kind of netdevice */
|
||||
struct net_device_stats stats;
|
||||
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
struct qos_info qos; /* QoS capabilities for this device */
|
||||
|
|
Loading…
Reference in a new issue