return skb;
}
-struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
{
netdev_features_t features;
return NULL;
}
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sk_buff *next, *head = NULL, *tail;
+
+ while (skb) {
+ next = skb->next;
+ skb->next = NULL;
+ skb = validate_xmit_skb(skb, dev);
+ if (skb) {
+ struct sk_buff *end = skb;
+
+ while (end->next)
+ end = end->next;
+ if (!head)
+ head = skb;
+ else
+ tail->next = skb;
+ tail = end;
+ }
+ skb = next;
+ }
+ return head;
+}
+
static void qdisc_pkt_len_init(struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
qdisc_bstats_update(q, skb);
- skb = validate_xmit_skb(skb, dev);
- if (skb && sch_direct_xmit(skb, q, dev, txq, root_lock)) {
+ if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
contended = false;
return 0;
}
-static struct sk_buff *try_bulk_dequeue_skb(struct Qdisc *q,
- struct sk_buff *head_skb,
- int bytelimit)
+static void try_bulk_dequeue_skb(struct Qdisc *q,
+ struct sk_buff *skb,
+ const struct netdev_queue *txq)
{
- struct sk_buff *skb, *tail_skb = head_skb;
+ int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
while (bytelimit > 0) {
- skb = q->dequeue(q);
- if (!skb)
- break;
+ struct sk_buff *nskb = q->dequeue(q);
- bytelimit -= skb->len; /* covers GSO len */
- skb = validate_xmit_skb(skb, qdisc_dev(q));
- if (!skb)
+ if (!nskb)
break;
- while (tail_skb->next) /* GSO list goto tail */
- tail_skb = tail_skb->next;
-
- tail_skb->next = skb;
- tail_skb = skb;
+ bytelimit -= nskb->len; /* covers GSO len */
+ skb->next = nskb;
+ skb = nskb;
}
-
- return head_skb;
+ skb->next = NULL;
}
/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
* A requeued skb (via q->gso_skb) can also be a SKB list.
*/
-static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
+static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate)
{
struct sk_buff *skb = q->gso_skb;
const struct netdev_queue *txq = q->dev_queue;
+ *validate = true;
if (unlikely(skb)) {
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
q->q.qlen--;
} else
skb = NULL;
+ /* skb in gso_skb were already validated */
+ *validate = false;
} else {
if (!(q->flags & TCQ_F_ONETXQUEUE) ||
!netif_xmit_frozen_or_stopped(txq)) {
- int bytelimit = qdisc_avail_bulklimit(txq);
-
skb = q->dequeue(q);
- if (skb) {
- bytelimit -= skb->len;
- skb = validate_xmit_skb(skb, qdisc_dev(q));
- }
if (skb && qdisc_may_bulk(q))
- skb = try_bulk_dequeue_skb(q, skb, bytelimit);
+ try_bulk_dequeue_skb(q, skb, txq);
}
}
-
return skb;
}
*/
int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev, struct netdev_queue *txq,
- spinlock_t *root_lock)
+ spinlock_t *root_lock, bool validate)
{
int ret = NETDEV_TX_BUSY;
/* And release qdisc */
spin_unlock(root_lock);
- HARD_TX_LOCK(dev, txq, smp_processor_id());
- if (!netif_xmit_frozen_or_stopped(txq))
- skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ /* Note that we validate skb (GSO, checksum, ...) outside of locks */
+ if (validate)
+ skb = validate_xmit_skb_list(skb, dev);
- HARD_TX_UNLOCK(dev, txq);
+ if (skb) {
+ HARD_TX_LOCK(dev, txq, smp_processor_id());
+ if (!netif_xmit_frozen_or_stopped(txq))
+ skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ HARD_TX_UNLOCK(dev, txq);
+ }
spin_lock(root_lock);
if (dev_xmit_complete(ret)) {
struct net_device *dev;
spinlock_t *root_lock;
struct sk_buff *skb;
+ bool validate;
/* Dequeue packet */
- skb = dequeue_skb(q);
+ skb = dequeue_skb(q, &validate);
if (unlikely(!skb))
return 0;
dev = qdisc_dev(q);
txq = skb_get_tx_queue(dev, skb);
- return sch_direct_xmit(skb, q, dev, txq, root_lock);
+ return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
}
void __qdisc_run(struct Qdisc *q)