net: add rb_to_skb() and other rb tree helpers
authorEric Dumazet <edumazet@google.com>
Thu, 13 Sep 2018 14:58:57 +0000 (07:58 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 Sep 2018 20:43:47 +0000 (22:43 +0200)
Geeralize private netem_rb_to_skb()

TCP rtx queue will soon be converted to rb-tree,
so we will need skb_rbtree_walk() helpers.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
(cherry picked from commit 18a4c0eab2623cc95be98a1e6af1ad18e7695977)
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/skbuff.h
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/sched/sch_netem.c

index 758084b434c86bb62ca7b5c6e75c04f74f6c3d6e..2837e55df03e1254e40eaf843cb51fad38482578 100644 (file)
@@ -3169,6 +3169,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
 
 #define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
 
+#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+#define skb_rb_first(root) rb_to_skb(rb_first(root))
+#define skb_rb_last(root)  rb_to_skb(rb_last(root))
+#define skb_rb_next(skb)   rb_to_skb(rb_next(&(skb)->rbnode))
+#define skb_rb_prev(skb)   rb_to_skb(rb_prev(&(skb)->rbnode))
+
 #define skb_queue_walk(queue, skb) \
                for (skb = (queue)->next;                                       \
                     skb != (struct sk_buff *)(queue);                          \
@@ -3183,6 +3189,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
                for (; skb != (struct sk_buff *)(queue);                        \
                     skb = skb->next)
 
+#define skb_rbtree_walk(skb, root)                                             \
+               for (skb = skb_rb_first(root); skb != NULL;                     \
+                    skb = skb_rb_next(skb))
+
+#define skb_rbtree_walk_from(skb)                                              \
+               for (; skb != NULL;                                             \
+                    skb = skb_rb_next(skb))
+
+#define skb_rbtree_walk_from_safe(skb, tmp)                                    \
+               for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL);      \
+                    skb = tmp)
+
 #define skb_queue_walk_from_safe(queue, skb, tmp)                              \
                for (tmp = skb->next;                                           \
                     skb != (struct sk_buff *)(queue);                          \
index fbbeda64777406d16beb0c2905bedb633c28d33a..0567edb76522cbeb84943cada6335556719c8fc0 100644 (file)
@@ -458,17 +458,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
 void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct rb_node *p;
-       struct sk_buff *skb;
        struct dst_entry *dst;
+       struct sk_buff *skb;
 
        if (!tp->syn_fastopen)
                return;
 
        if (!tp->data_segs_in) {
-               p = rb_first(&tp->out_of_order_queue);
-               if (p && !rb_next(p)) {
-                       skb = rb_entry(p, struct sk_buff, rbnode);
+               skb = skb_rb_first(&tp->out_of_order_queue);
+               if (skb && !skb_rb_next(skb)) {
                        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
                                tcp_fastopen_active_disable(sk);
                                return;
index bdabd748f4bcf24a4c980f4351044c69691a5304..991f382afc1b077f6005cffa7a80be98c52c9eb5 100644 (file)
@@ -4372,7 +4372,7 @@ static void tcp_ofo_queue(struct sock *sk)
 
        p = rb_first(&tp->out_of_order_queue);
        while (p) {
-               skb = rb_entry(p, struct sk_buff, rbnode);
+               skb = rb_to_skb(p);
                if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
                        break;
 
@@ -4440,7 +4440,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
 static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct rb_node **p, *q, *parent;
+       struct rb_node **p, *parent;
        struct sk_buff *skb1;
        u32 seq, end_seq;
        bool fragstolen;
@@ -4503,7 +4503,7 @@ coalesce_done:
        parent = NULL;
        while (*p) {
                parent = *p;
-               skb1 = rb_entry(parent, struct sk_buff, rbnode);
+               skb1 = rb_to_skb(parent);
                if (before(seq, TCP_SKB_CB(skb1)->seq)) {
                        p = &parent->rb_left;
                        continue;
@@ -4548,9 +4548,7 @@ insert:
 
 merge_right:
        /* Remove other segments covered by skb. */
-       while ((q = rb_next(&skb->rbnode)) != NULL) {
-               skb1 = rb_entry(q, struct sk_buff, rbnode);
-
+       while ((skb1 = skb_rb_next(skb)) != NULL) {
                if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
                        break;
                if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
@@ -4565,7 +4563,7 @@ merge_right:
                tcp_drop(sk, skb1);
        }
        /* If there is no skb after us, we are the last_skb ! */
-       if (!q)
+       if (!skb1)
                tp->ooo_last_skb = skb;
 
 add_sack:
@@ -4749,7 +4747,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
        if (list)
                return !skb_queue_is_last(list, skb) ? skb->next : NULL;
 
-       return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
+       return skb_rb_next(skb);
 }
 
 static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
@@ -4778,7 +4776,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
 
        while (*p) {
                parent = *p;
-               skb1 = rb_entry(parent, struct sk_buff, rbnode);
+               skb1 = rb_to_skb(parent);
                if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
                        p = &parent->rb_left;
                else
@@ -4898,19 +4896,12 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        u32 range_truesize, sum_tiny = 0;
        struct sk_buff *skb, *head;
-       struct rb_node *p;
        u32 start, end;
 
-       p = rb_first(&tp->out_of_order_queue);
-       skb = rb_entry_safe(p, struct sk_buff, rbnode);
+       skb = skb_rb_first(&tp->out_of_order_queue);
 new_range:
        if (!skb) {
-               p = rb_last(&tp->out_of_order_queue);
-               /* Note: This is possible p is NULL here. We do not
-                * use rb_entry_safe(), as ooo_last_skb is valid only
-                * if rbtree is not empty.
-                */
-               tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
+               tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
                return;
        }
        start = TCP_SKB_CB(skb)->seq;
@@ -4918,7 +4909,7 @@ new_range:
        range_truesize = skb->truesize;
 
        for (head = skb;;) {
-               skb = tcp_skb_next(skb, NULL);
+               skb = skb_rb_next(skb);
 
                /* Range is terminated when we see a gap or when
                 * we are at the queue end.
@@ -4974,7 +4965,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
                prev = rb_prev(node);
                rb_erase(node, &tp->out_of_order_queue);
                goal -= rb_to_skb(node)->truesize;
-               tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
+               tcp_drop(sk, rb_to_skb(node));
                if (!prev || goal <= 0) {
                        sk_mem_reclaim(sk);
                        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
@@ -4984,7 +4975,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
                }
                node = prev;
        } while (node);
-       tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
+       tp->ooo_last_skb = rb_to_skb(prev);
 
        /* Reset SACK state.  A conforming SACK implementation will
         * do the same at a timeout based retransmit.  When a connection
index 8c8df75dbeadd2f555d2e4d9e9c2b2a03ee0c89d..2a2ab6bfe5d85d93380165211affcf1acbeaacc8 100644 (file)
@@ -149,12 +149,6 @@ struct netem_skb_cb {
        ktime_t         tstamp_save;
 };
 
-
-static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
-{
-       return rb_entry(rb, struct sk_buff, rbnode);
-}
-
 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
 {
        /* we assume we can use skb next/prev/tstamp as storage for rb_node */
@@ -365,7 +359,7 @@ static void tfifo_reset(struct Qdisc *sch)
        struct rb_node *p;
 
        while ((p = rb_first(&q->t_root))) {
-               struct sk_buff *skb = netem_rb_to_skb(p);
+               struct sk_buff *skb = rb_to_skb(p);
 
                rb_erase(p, &q->t_root);
                rtnl_kfree_skbs(skb, skb);
@@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
                struct sk_buff *skb;
 
                parent = *p;
-               skb = netem_rb_to_skb(parent);
+               skb = rb_to_skb(parent);
                if (tnext >= netem_skb_cb(skb)->time_to_send)
                        p = &parent->rb_right;
                else
@@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                                struct sk_buff *t_skb;
                                struct netem_skb_cb *t_last;
 
-                               t_skb = netem_rb_to_skb(rb_last(&q->t_root));
+                               t_skb = skb_rb_last(&q->t_root);
                                t_last = netem_skb_cb(t_skb);
                                if (!last ||
                                    t_last->time_to_send > last->time_to_send) {
@@ -618,7 +612,7 @@ deliver:
        if (p) {
                psched_time_t time_to_send;
 
-               skb = netem_rb_to_skb(p);
+               skb = rb_to_skb(p);
 
                /* if more time remaining? */
                time_to_send = netem_skb_cb(skb)->time_to_send;