pkt_sched: Make qdisc_run take a netdev_queue.
This allows us to use this calling convention all the way down into qdisc_restart(). Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
86d804e10a
commit
eb6aafe3f8
3 changed files with 21 additions and 17 deletions
|
@ -84,13 +84,15 @@ extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
|
|||
struct nlattr *tab);
|
||||
extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
|
||||
|
||||
extern void __qdisc_run(struct net_device *dev);
|
||||
extern void __qdisc_run(struct netdev_queue *txq);
|
||||
|
||||
static inline void qdisc_run(struct net_device *dev)
|
||||
static inline void qdisc_run(struct netdev_queue *txq)
|
||||
{
|
||||
struct net_device *dev = txq->dev;
|
||||
|
||||
if (!netif_queue_stopped(dev) &&
|
||||
!test_and_set_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
|
||||
__qdisc_run(dev);
|
||||
__qdisc_run(txq);
|
||||
}
|
||||
|
||||
extern int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
|
||||
|
|
|
@ -1734,7 +1734,7 @@ int dev_queue_xmit(struct sk_buff *skb)
|
|||
/* reset queue_mapping to zero */
|
||||
skb_set_queue_mapping(skb, 0);
|
||||
rc = q->enqueue(skb, q);
|
||||
qdisc_run(dev);
|
||||
qdisc_run(txq);
|
||||
spin_unlock(&txq->lock);
|
||||
|
||||
rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
|
||||
|
@ -1930,7 +1930,7 @@ static void net_tx_action(struct softirq_action *h)
|
|||
clear_bit(__LINK_STATE_SCHED, &dev->state);
|
||||
|
||||
if (spin_trylock(&txq->lock)) {
|
||||
qdisc_run(dev);
|
||||
qdisc_run(txq);
|
||||
spin_unlock(&txq->lock);
|
||||
} else {
|
||||
netif_schedule_queue(txq);
|
||||
|
|
|
@ -75,9 +75,8 @@ static inline int dev_requeue_skb(struct sk_buff *skb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev,
|
||||
struct netdev_queue *dev_queue,
|
||||
struct Qdisc *q)
|
||||
static inline struct sk_buff *dequeue_skb(struct netdev_queue *dev_queue,
|
||||
struct Qdisc *q)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
|
@ -90,10 +89,10 @@ static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev,
|
|||
}
|
||||
|
||||
static inline int handle_dev_cpu_collision(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
struct netdev_queue *dev_queue,
|
||||
struct Qdisc *q)
|
||||
{
|
||||
struct net_device *dev = dev_queue->dev;
|
||||
int ret;
|
||||
|
||||
if (unlikely(dev->xmit_lock_owner == smp_processor_id())) {
|
||||
|
@ -139,21 +138,23 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
|
|||
* >0 - queue is not empty.
|
||||
*
|
||||
*/
|
||||
static inline int qdisc_restart(struct net_device *dev)
|
||||
static inline int qdisc_restart(struct netdev_queue *txq)
|
||||
{
|
||||
struct netdev_queue *txq = &dev->tx_queue;
|
||||
struct Qdisc *q = txq->qdisc;
|
||||
struct sk_buff *skb;
|
||||
int ret = NETDEV_TX_BUSY;
|
||||
struct net_device *dev;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Dequeue packet */
|
||||
if (unlikely((skb = dev_dequeue_skb(dev, txq, q)) == NULL))
|
||||
if (unlikely((skb = dequeue_skb(txq, q)) == NULL))
|
||||
return 0;
|
||||
|
||||
|
||||
/* And release queue */
|
||||
spin_unlock(&txq->lock);
|
||||
|
||||
dev = txq->dev;
|
||||
|
||||
HARD_TX_LOCK(dev, smp_processor_id());
|
||||
if (!netif_subqueue_stopped(dev, skb))
|
||||
ret = dev_hard_start_xmit(skb, dev);
|
||||
|
@ -170,7 +171,7 @@ static inline int qdisc_restart(struct net_device *dev)
|
|||
|
||||
case NETDEV_TX_LOCKED:
|
||||
/* Driver try lock failed */
|
||||
ret = handle_dev_cpu_collision(skb, dev, txq, q);
|
||||
ret = handle_dev_cpu_collision(skb, txq, q);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -186,11 +187,12 @@ static inline int qdisc_restart(struct net_device *dev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void __qdisc_run(struct net_device *dev)
|
||||
void __qdisc_run(struct netdev_queue *txq)
|
||||
{
|
||||
struct net_device *dev = txq->dev;
|
||||
unsigned long start_time = jiffies;
|
||||
|
||||
while (qdisc_restart(dev)) {
|
||||
while (qdisc_restart(txq)) {
|
||||
if (netif_queue_stopped(dev))
|
||||
break;
|
||||
|
||||
|
@ -200,7 +202,7 @@ void __qdisc_run(struct net_device *dev)
|
|||
* 2. we've been doing it for too long.
|
||||
*/
|
||||
if (need_resched() || jiffies != start_time) {
|
||||
netif_schedule_queue(&dev->tx_queue);
|
||||
netif_schedule_queue(txq);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue