netdev: Make netif_schedule() routines work with netdev_queue objects.
Only plain netif_schedule() remains taking a net_device, mostly as a compatability item while we transition the rest of these interfaces. Everything else calls netif_schedule_queue() or __netif_schedule(), both of which take a netdev_queue pointer. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
970565bbad
commit
86d804e10a
6 changed files with 27 additions and 19 deletions
|
@ -952,12 +952,19 @@ DECLARE_PER_CPU(struct softnet_data,softnet_data);
|
|||
|
||||
#define HAVE_NETIF_QUEUE
|
||||
|
||||
extern void __netif_schedule(struct net_device *dev);
|
||||
extern void __netif_schedule(struct netdev_queue *txq);
|
||||
|
||||
static inline void netif_schedule_queue(struct netdev_queue *txq)
|
||||
{
|
||||
struct net_device *dev = txq->dev;
|
||||
|
||||
if (!test_bit(__LINK_STATE_XOFF, &dev->state))
|
||||
__netif_schedule(txq);
|
||||
}
|
||||
|
||||
static inline void netif_schedule(struct net_device *dev)
|
||||
{
|
||||
if (!test_bit(__LINK_STATE_XOFF, &dev->state))
|
||||
__netif_schedule(dev);
|
||||
netif_schedule_queue(&dev->tx_queue);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -987,7 +994,7 @@ static inline void netif_wake_queue(struct net_device *dev)
|
|||
}
|
||||
#endif
|
||||
if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
|
||||
__netif_schedule(dev);
|
||||
__netif_schedule(&dev->tx_queue);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1103,7 +1110,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
|
|||
#endif
|
||||
if (test_and_clear_bit(__LINK_STATE_XOFF,
|
||||
&dev->egress_subqueue[queue_index].state))
|
||||
__netif_schedule(dev);
|
||||
__netif_schedule(&dev->tx_queue);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -1320,12 +1320,13 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
|
||||
|
||||
void __netif_schedule(struct net_device *dev)
|
||||
void __netif_schedule(struct netdev_queue *txq)
|
||||
{
|
||||
struct net_device *dev = txq->dev;
|
||||
|
||||
if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
|
||||
struct netdev_queue *txq = &dev->tx_queue;
|
||||
unsigned long flags;
|
||||
struct softnet_data *sd;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
sd = &__get_cpu_var(softnet_data);
|
||||
|
@ -1932,7 +1933,7 @@ static void net_tx_action(struct softirq_action *h)
|
|||
qdisc_run(dev);
|
||||
spin_unlock(&txq->lock);
|
||||
} else {
|
||||
netif_schedule(dev);
|
||||
netif_schedule_queue(txq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -885,10 +885,10 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
|
|||
spin_unlock_bh(&txq->lock);
|
||||
|
||||
/* we just requeued the all the frames that were in the removed
|
||||
* queue, and since we might miss a softirq we do netif_schedule.
|
||||
* queue, and since we might miss a softirq we do netif_schedule_queue.
|
||||
* ieee80211_wake_queue is not used here as this queue is not
|
||||
* necessarily stopped */
|
||||
netif_schedule(local->mdev);
|
||||
netif_schedule_queue(txq);
|
||||
spin_lock_bh(&sta->lock);
|
||||
*state = HT_AGG_STATE_IDLE;
|
||||
sta->ampdu_mlme.addba_req_num[tid] = 0;
|
||||
|
|
|
@ -282,11 +282,11 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
|
|||
{
|
||||
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
|
||||
timer);
|
||||
struct net_device *dev = qdisc_dev(wd->qdisc);
|
||||
struct netdev_queue *txq = wd->qdisc->dev_queue;
|
||||
|
||||
wd->qdisc->flags &= ~TCQ_F_THROTTLED;
|
||||
smp_wmb();
|
||||
netif_schedule(dev);
|
||||
netif_schedule_queue(txq);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
|
|
@ -650,7 +650,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
|
|||
}
|
||||
|
||||
sch->flags &= ~TCQ_F_THROTTLED;
|
||||
netif_schedule(qdisc_dev(sch));
|
||||
netif_schedule_queue(sch->dev_queue);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
|
|||
return q->q.qlen;
|
||||
}
|
||||
|
||||
static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
static inline int dev_requeue_skb(struct sk_buff *skb,
|
||||
struct netdev_queue *dev_queue,
|
||||
struct Qdisc *q)
|
||||
{
|
||||
|
@ -71,7 +71,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev,
|
|||
else
|
||||
q->ops->requeue(skb, q);
|
||||
|
||||
netif_schedule(dev);
|
||||
netif_schedule_queue(dev_queue);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
|
|||
* some time.
|
||||
*/
|
||||
__get_cpu_var(netdev_rx_stat).cpu_collision++;
|
||||
ret = dev_requeue_skb(skb, dev, dev_queue, q);
|
||||
ret = dev_requeue_skb(skb, dev_queue, q);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -179,7 +179,7 @@ static inline int qdisc_restart(struct net_device *dev)
|
|||
printk(KERN_WARNING "BUG %s code %d qlen %d\n",
|
||||
dev->name, ret, q->q.qlen);
|
||||
|
||||
ret = dev_requeue_skb(skb, dev, txq, q);
|
||||
ret = dev_requeue_skb(skb, txq, q);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -200,7 +200,7 @@ void __qdisc_run(struct net_device *dev)
|
|||
* 2. we've been doing it for too long.
|
||||
*/
|
||||
if (need_resched() || jiffies != start_time) {
|
||||
netif_schedule(dev);
|
||||
netif_schedule_queue(&dev->tx_queue);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue