#include <linux/moduleparam.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
-#include <linux/lockdep.h>
#define TX_TIMEOUT (2*HZ)
module_param(numifbs, int, 0);
MODULE_PARM_DESC(numifbs, "Number of ifb devices");
-/*
- * dev_ifb's TX queue lock is usually taken after dev->rx_queue.lock,
- * reversely to e.g. qdisc_lock_tree(). It should be safe until
- * ifb doesn't take dev's TX queue lock with dev_ifb->rx_queue.lock.
- * But lockdep should know that ifb has different locks from dev.
- */
-static struct lock_class_key ifb_tx_queue_lock_key;
-static struct lock_class_key ifb_rx_queue_lock_key;
-
-static void set_tx_lockdep_key(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->lock, &ifb_tx_queue_lock_key);
-}
-
static int __init ifb_init_one(int index)
{
struct net_device *dev_ifb;
if (err < 0)
goto err;
- netdev_for_each_tx_queue(dev_ifb, set_tx_lockdep_key, NULL);
- lockdep_set_class(&dev_ifb->rx_queue.lock, &ifb_rx_queue_lock_key);
-
return 0;
err:
};
struct netdev_queue {
- spinlock_t lock;
struct net_device *dev;
struct Qdisc *qdisc;
unsigned long state;
struct tcf_proto_ops *ops;
};
+static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
+{
+ return &qdisc->q.lock;
+}
+
static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc;
{
struct Qdisc *root = qdisc_root(qdisc);
- return &root->dev_queue->lock;
+ return qdisc_lock(root);
}
static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
rxq = &dev->rx_queue;
- spin_lock(&rxq->lock);
- if ((q = rxq->qdisc) != NULL)
+ q = rxq->qdisc;
+ if (q) {
+ spin_lock(qdisc_lock(q));
result = q->enqueue(skb, q);
- spin_unlock(&rxq->lock);
+ spin_unlock(qdisc_lock(q));
+ }
return result;
}
struct netdev_queue *queue,
void *_unused)
{
- spin_lock_init(&queue->lock);
queue->dev = dev;
}
ieee80211_requeue(local, agg_queue);
} else {
struct netdev_queue *txq;
+ spinlock_t *root_lock;
txq = netdev_get_tx_queue(local->mdev, agg_queue);
+ root_lock = qdisc_root_lock(txq->qdisc);
- spin_lock_bh(&txq->lock);
+ spin_lock_bh(root_lock);
qdisc_reset(txq->qdisc);
- spin_unlock_bh(&txq->lock);
+ spin_unlock_bh(root_lock);
}
}
{
struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue);
struct sk_buff_head list;
+ spinlock_t *root_lock;
struct Qdisc *qdisc;
u32 len;
skb_queue_head_init(&list);
- spin_lock(&txq->lock);
+ root_lock = qdisc_root_lock(qdisc);
+ spin_lock(root_lock);
for (len = qdisc->q.qlen; len > 0; len--) {
struct sk_buff *skb = qdisc->dequeue(qdisc);
if (skb)
__skb_queue_tail(&list, skb);
}
- spin_unlock(&txq->lock);
+ spin_unlock(root_lock);
for (len = list.qlen; len > 0; len--) {
struct sk_buff *skb = __skb_dequeue(&list);
txq = netdev_get_tx_queue(local->mdev, new_queue);
- spin_lock(&txq->lock);
qdisc = rcu_dereference(txq->qdisc);
- qdisc->enqueue(skb, qdisc);
+ root_lock = qdisc_root_lock(qdisc);
- spin_unlock(&txq->lock);
+ spin_lock(root_lock);
+ qdisc->enqueue(skb, qdisc);
+ spin_unlock(root_lock);
}
out_unlock:
}
/*
- * NOTE: Called under queue->lock with locally disabled BH.
+ * NOTE: Called under qdisc_lock(q) with locally disabled BH.
*
* __QDISC_STATE_RUNNING guarantees only one CPU can process
- * this qdisc at a time. queue->lock serializes queue accesses for
- * this queue AND txq->qdisc pointer itself.
+ * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
+ * this queue.
*
* netif_tx_lock serializes accesses to device driver.
*
- * queue->lock and netif_tx_lock are mutually exclusive,
+ * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
* if one is grabbed, another must be free.
*
* Note, that this procedure can be called by a watchdog timer
};
static struct netdev_queue noop_netdev_queue = {
- .lock = __SPIN_LOCK_UNLOCKED(noop_netdev_queue.lock),
.qdisc = &noop_qdisc,
};
.flags = TCQ_F_BUILTIN,
.ops = &noop_qdisc_ops,
.list = LIST_HEAD_INIT(noop_qdisc.list),
+ .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue,
};
EXPORT_SYMBOL(noop_qdisc);
}
EXPORT_SYMBOL(qdisc_create_dflt);
-/* Under queue->lock and BH! */
+/* Under qdisc_root_lock(qdisc) and BH! */
void qdisc_reset(struct Qdisc *qdisc)
{
module_put(ops->owner);
dev_put(qdisc_dev(qdisc));
+ kfree_skb(qdisc->gso_skb);
+
kfree((char *) qdisc - qdisc->padded);
}
-/* Under queue->lock and BH! */
+/* Under qdisc_root_lock(qdisc) and BH! */
void qdisc_destroy(struct Qdisc *qdisc)
{
struct netdev_queue *dev_queue,
void *_need_watchdog)
{
+ struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
int *need_watchdog_p = _need_watchdog;
- spin_lock_bh(&dev_queue->lock);
- rcu_assign_pointer(dev_queue->qdisc, dev_queue->qdisc_sleeping);
- if (dev_queue->qdisc != &noqueue_qdisc)
+ rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
+ if (new_qdisc != &noqueue_qdisc)
*need_watchdog_p = 1;
- spin_unlock_bh(&dev_queue->lock);
}
void dev_activate(struct net_device *dev)
struct sk_buff *skb = NULL;
struct Qdisc *qdisc;
- spin_lock_bh(&dev_queue->lock);
-
qdisc = dev_queue->qdisc;
if (qdisc) {
+ spin_lock_bh(qdisc_lock(qdisc));
+
dev_queue->qdisc = qdisc_default;
qdisc_reset(qdisc);
- skb = qdisc->gso_skb;
- qdisc->gso_skb = NULL;
+ spin_unlock_bh(qdisc_lock(qdisc));
}
- spin_unlock_bh(&dev_queue->lock);
-
kfree_skb(skb);
}
master->slaves = NEXT_SLAVE(q);
if (q == master->slaves) {
struct netdev_queue *txq;
+ spinlock_t *root_lock;
txq = netdev_get_tx_queue(master->dev, 0);
master->slaves = NULL;
- spin_lock_bh(&txq->lock);
+
+ root_lock = qdisc_root_lock(txq->qdisc);
+ spin_lock_bh(root_lock);
qdisc_reset(txq->qdisc);
- spin_unlock_bh(&txq->lock);
+ spin_unlock_bh(root_lock);
}
}
skb_queue_purge(&dat->q);