net: convert sk_buff.users from atomic_t to refcount_t
authorReshetova, Elena <elena.reshetova@intel.com>
Fri, 30 Jun 2017 10:07:58 +0000 (13:07 +0300)
committerDavid S. Miller <davem@davemloft.net>
Sat, 1 Jul 2017 14:39:07 +0000 (07:39 -0700)
refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.

Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: David Windsor <dwindsor@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
21 files changed:
drivers/infiniband/hw/nes/nes_cm.c
drivers/isdn/mISDN/socket.c
drivers/net/rionet.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core_main.c
include/linux/skbuff.h
net/core/datagram.c
net/core/dev.c
net/core/netpoll.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/skbuff.c
net/dccp/ipv6.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/key/af_key.c
net/netlink/af_netlink.c
net/rxrpc/skbuff.c
net/sctp/outqueue.c
net/sctp/socket.c

index 30b256a2c54ec42dd97b29ff0f0cb15be6d44510..de4025deaa4ad384db5b86f15165aa991dcb45b4 100644 (file)
@@ -742,7 +742,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
 
        if (type == NES_TIMER_TYPE_SEND) {
                new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
-               atomic_inc(&new_send->skb->users);
+               refcount_inc(&new_send->skb->users);
                spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
                cm_node->send_entry = new_send;
                add_ref_cm_node(cm_node);
@@ -924,7 +924,7 @@ static void nes_cm_timer_tick(unsigned long pass)
                                                  flags);
                                break;
                        }
-                       atomic_inc(&send_entry->skb->users);
+                       refcount_inc(&send_entry->skb->users);
                        cm_packets_retrans++;
                        nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
                                  "for node %p, jiffies = %lu, time to send = "
index 99e5f9751e8b1746835b28c4bd4e2a5d1b53fd14..c5603d1a07d6e86ce7f6913168daacb56b134ea3 100644 (file)
@@ -155,7 +155,7 @@ mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
        copied = skb->len + MISDN_HEADER_LEN;
        if (len < copied) {
                if (flags & MSG_PEEK)
-                       atomic_dec(&skb->users);
+                       refcount_dec(&skb->users);
                else
                        skb_queue_head(&sk->sk_receive_queue, skb);
                return -ENOSPC;
index 300bb1479b3a45bc919d053a799ea058164a0e2b..e9f101c9bae2ce1d9bde5dbe0d473119ead760e6 100644 (file)
@@ -201,7 +201,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                                rionet_queue_tx_msg(skb, ndev,
                                        nets[rnet->mport->id].active[i]);
                                if (count)
-                                       atomic_inc(&skb->users);
+                                       refcount_inc(&skb->users);
                                count++;
                        }
        } else if (RIONET_MAC_MATCH(eth->h_dest)) {
index 99121352c57beec4bceb976f7d06e4e262dd4585..e8782a8619f79276b6876870af80494b502ce142 100644 (file)
@@ -483,7 +483,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
                        spin_unlock_irqrestore(&ch->collect_lock, saveflags);
                        return -EBUSY;
                } else {
-                       atomic_inc(&skb->users);
+                       refcount_inc(&skb->users);
                        header.length = l;
                        header.type = be16_to_cpu(skb->protocol);
                        header.unused = 0;
@@ -500,7 +500,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
         * Protect skb against beeing free'd by upper
         * layers.
         */
-       atomic_inc(&skb->users);
+       refcount_inc(&skb->users);
        ch->prof.txlen += skb->len;
        header.length = skb->len + LL_HEADER_LENGTH;
        header.type = be16_to_cpu(skb->protocol);
@@ -517,14 +517,14 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
        if (hi) {
                nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
                if (!nskb) {
-                       atomic_dec(&skb->users);
+                       refcount_dec(&skb->users);
                        skb_pull(skb, LL_HEADER_LENGTH + 2);
                        ctcm_clear_busy(ch->netdev);
                        return -ENOMEM;
                } else {
                        skb_put_data(nskb, skb->data, skb->len);
-                       atomic_inc(&nskb->users);
-                       atomic_dec(&skb->users);
+                       refcount_inc(&nskb->users);
+                       refcount_dec(&skb->users);
                        dev_kfree_skb_irq(skb);
                        skb = nskb;
                }
@@ -542,7 +542,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
                         * Remove our header. It gets added
                         * again on retransmit.
                         */
-                       atomic_dec(&skb->users);
+                       refcount_dec(&skb->users);
                        skb_pull(skb, LL_HEADER_LENGTH + 2);
                        ctcm_clear_busy(ch->netdev);
                        return -ENOMEM;
@@ -553,7 +553,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
                ch->ccw[1].count = skb->len;
                skb_copy_from_linear_data(skb,
                                skb_put(ch->trans_skb, skb->len), skb->len);
-               atomic_dec(&skb->users);
+               refcount_dec(&skb->users);
                dev_kfree_skb_irq(skb);
                ccw_idx = 0;
        } else {
@@ -679,7 +679,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
 
        if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
                spin_lock_irqsave(&ch->collect_lock, saveflags);
-               atomic_inc(&skb->users);
+               refcount_inc(&skb->users);
                p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
 
                if (!p_header) {
@@ -716,7 +716,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
         * Protect skb against beeing free'd by upper
         * layers.
         */
-       atomic_inc(&skb->users);
+       refcount_inc(&skb->users);
 
        /*
         * IDAL support in CTCM is broken, so we have to
@@ -729,8 +729,8 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
                        goto nomem_exit;
                } else {
                        skb_put_data(nskb, skb->data, skb->len);
-                       atomic_inc(&nskb->users);
-                       atomic_dec(&skb->users);
+                       refcount_inc(&nskb->users);
+                       refcount_dec(&skb->users);
                        dev_kfree_skb_irq(skb);
                        skb = nskb;
                }
@@ -810,7 +810,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
                ch->trans_skb->len = 0;
                ch->ccw[1].count = skb->len;
                skb_put_data(ch->trans_skb, skb->data, skb->len);
-               atomic_dec(&skb->users);
+               refcount_dec(&skb->users);
                dev_kfree_skb_irq(skb);
                ccw_idx = 0;
                CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
@@ -855,7 +855,7 @@ nomem_exit:
                        "%s(%s): MEMORY allocation ERROR\n",
                        CTCM_FUNTAIL, ch->id);
        rc = -ENOMEM;
-       atomic_dec(&skb->users);
+       refcount_dec(&skb->users);
        dev_kfree_skb_any(skb);
        fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
 done:
index 7db427c0a6a46bf8190ba3b9ccb0453422a1d337..1579695f4e640428296bfb88898b20553c56f2bd 100644 (file)
@@ -743,7 +743,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
        conn->prof.tx_pending--;
        if (single_flag) {
                if ((skb = skb_dequeue(&conn->commit_queue))) {
-                       atomic_dec(&skb->users);
+                       refcount_dec(&skb->users);
                        if (privptr) {
                                privptr->stats.tx_packets++;
                                privptr->stats.tx_bytes +=
@@ -766,7 +766,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
                txbytes += skb->len;
                txpackets++;
                stat_maxcq++;
-               atomic_dec(&skb->users);
+               refcount_dec(&skb->users);
                dev_kfree_skb_any(skb);
        }
        if (conn->collect_len > conn->prof.maxmulti)
@@ -958,7 +958,7 @@ static void netiucv_purge_skb_queue(struct sk_buff_head *q)
        struct sk_buff *skb;
 
        while ((skb = skb_dequeue(q))) {
-               atomic_dec(&skb->users);
+               refcount_dec(&skb->users);
                dev_kfree_skb_any(skb);
        }
 }
@@ -1176,7 +1176,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
                        IUCV_DBF_TEXT(data, 2,
                                      "EBUSY from netiucv_transmit_skb\n");
                } else {
-                       atomic_inc(&skb->users);
+                       refcount_inc(&skb->users);
                        skb_queue_tail(&conn->collect_queue, skb);
                        conn->collect_len += l;
                        rc = 0;
@@ -1245,7 +1245,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
                } else {
                        if (copied)
                                dev_kfree_skb(skb);
-                       atomic_inc(&nskb->users);
+                       refcount_inc(&nskb->users);
                        skb_queue_tail(&conn->commit_queue, nskb);
                }
        }
index 3b657d5b7e491bc604366b210cd9f180ad4794da..aec06e10b96911b88402edcb87c7a867cbc3bc3a 100644 (file)
@@ -1242,7 +1242,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
                                iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
                        }
                }
-               atomic_dec(&skb->users);
+               refcount_dec(&skb->users);
                dev_kfree_skb_any(skb);
                skb = skb_dequeue(&buf->skb_list);
        }
@@ -3975,7 +3975,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
        int flush_cnt = 0, hdr_len, large_send = 0;
 
        buffer = buf->buffer;
-       atomic_inc(&skb->users);
+       refcount_inc(&skb->users);
        skb_queue_tail(&buf->skb_list, skb);
 
        /*check first on TSO ....*/
index 005793e01bd28982fa77ebe3193688c00d3d8cfe..90cbd86152dabb2a194bfe5601f16619f2f916d0 100644 (file)
@@ -761,7 +761,7 @@ struct sk_buff {
        unsigned char           *head,
                                *data;
        unsigned int            truesize;
-       atomic_t                users;
+       refcount_t              users;
 };
 
 #ifdef __KERNEL__
@@ -872,9 +872,9 @@ static inline bool skb_unref(struct sk_buff *skb)
 {
        if (unlikely(!skb))
                return false;
-       if (likely(atomic_read(&skb->users) == 1))
+       if (likely(refcount_read(&skb->users) == 1))
                smp_rmb();
-       else if (likely(!atomic_dec_and_test(&skb->users)))
+       else if (likely(!refcount_dec_and_test(&skb->users)))
                return false;
 
        return true;
@@ -1283,7 +1283,7 @@ static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
  */
 static inline struct sk_buff *skb_get(struct sk_buff *skb)
 {
-       atomic_inc(&skb->users);
+       refcount_inc(&skb->users);
        return skb;
 }
 
@@ -1384,7 +1384,7 @@ static inline void __skb_header_release(struct sk_buff *skb)
  */
 static inline int skb_shared(const struct sk_buff *skb)
 {
-       return atomic_read(&skb->users) != 1;
+       return refcount_read(&skb->users) != 1;
 }
 
 /**
index e5311a7c70daf1d6c60d67fc6cf383e85cacc82e..95d43543ac9103f96ac13ac561f053bfd04643f0 100644 (file)
@@ -188,7 +188,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
                                }
                        }
                        *peeked = 1;
-                       atomic_inc(&skb->users);
+                       refcount_inc(&skb->users);
                } else {
                        __skb_unlink(skb, queue);
                        if (destructor)
@@ -358,7 +358,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
                spin_lock_bh(&sk_queue->lock);
                if (skb == skb_peek(sk_queue)) {
                        __skb_unlink(skb, sk_queue);
-                       atomic_dec(&skb->users);
+                       refcount_dec(&skb->users);
                        if (destructor)
                                destructor(sk, skb);
                        err = 0;
index 88927f1a3e4f838bea54d1dc8cf6ee06f79477c8..b9994898d11ba7b6dfd9b94ca663a74192ccbd47 100644 (file)
@@ -1862,7 +1862,7 @@ static inline int deliver_skb(struct sk_buff *skb,
 {
        if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
                return -ENOMEM;
-       atomic_inc(&skb->users);
+       refcount_inc(&skb->users);
        return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 }
 
@@ -2484,10 +2484,10 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
        if (unlikely(!skb))
                return;
 
-       if (likely(atomic_read(&skb->users) == 1)) {
+       if (likely(refcount_read(&skb->users) == 1)) {
                smp_rmb();
-               atomic_set(&skb->users, 0);
-       } else if (likely(!atomic_dec_and_test(&skb->users))) {
+               refcount_set(&skb->users, 0);
+       } else if (likely(!refcount_dec_and_test(&skb->users))) {
                return;
        }
        get_kfree_skb_cb(skb)->reason = reason;
@@ -3955,7 +3955,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
 
                        clist = clist->next;
 
-                       WARN_ON(atomic_read(&skb->users));
+                       WARN_ON(refcount_read(&skb->users));
                        if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
                                trace_consume_skb(skb);
                        else
index 37c1e34ddd8551d5b91b54d1f8161b6520b2623e..a835155c85f9ec7c01b0bd8efc879ade3d733d7b 100644 (file)
@@ -277,7 +277,7 @@ static void zap_completion_queue(void)
                        struct sk_buff *skb = clist;
                        clist = clist->next;
                        if (!skb_irq_freeable(skb)) {
-                               atomic_inc(&skb->users);
+                               refcount_inc(&skb->users);
                                dev_kfree_skb_any(skb); /* put this one back */
                        } else {
                                __kfree_skb(skb);
@@ -309,7 +309,7 @@ repeat:
                return NULL;
        }
 
-       atomic_set(&skb->users, 1);
+       refcount_set(&skb->users, 1);
        skb_reserve(skb, reserve);
        return skb;
 }
index 2dd42c5b0366f2c0206402d58e95d79b9ff50469..6e1e10ff433a5f4097d1d4b33848ab13d4e005c6 100644 (file)
@@ -3363,7 +3363,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
 {
        ktime_t idle_start = ktime_get();
 
-       while (atomic_read(&(pkt_dev->skb->users)) != 1) {
+       while (refcount_read(&(pkt_dev->skb->users)) != 1) {
                if (signal_pending(current))
                        break;
 
@@ -3420,7 +3420,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
        if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
                skb = pkt_dev->skb;
                skb->protocol = eth_type_trans(skb, skb->dev);
-               atomic_add(burst, &skb->users);
+               refcount_add(burst, &skb->users);
                local_bh_disable();
                do {
                        ret = netif_receive_skb(skb);
@@ -3428,11 +3428,11 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
                                pkt_dev->errors++;
                        pkt_dev->sofar++;
                        pkt_dev->seq_num++;
-                       if (atomic_read(&skb->users) != burst) {
+                       if (refcount_read(&skb->users) != burst) {
                                /* skb was queued by rps/rfs or taps,
                                 * so cannot reuse this skb
                                 */
-                               atomic_sub(burst - 1, &skb->users);
+                               WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));
                                /* get out of the loop and wait
                                 * until skb is consumed
                                 */
@@ -3446,7 +3446,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
                goto out; /* Skips xmit_mode M_START_XMIT */
        } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
                local_bh_disable();
-               atomic_inc(&pkt_dev->skb->users);
+               refcount_inc(&pkt_dev->skb->users);
 
                ret = dev_queue_xmit(pkt_dev->skb);
                switch (ret) {
@@ -3487,7 +3487,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
                pkt_dev->last_ok = 0;
                goto unlock;
        }
-       atomic_add(burst, &pkt_dev->skb->users);
+       refcount_add(burst, &pkt_dev->skb->users);
 
 xmit_more:
        ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
@@ -3513,11 +3513,11 @@ xmit_more:
                /* fallthru */
        case NETDEV_TX_BUSY:
                /* Retry it next time */
-               atomic_dec(&(pkt_dev->skb->users));
+               refcount_dec(&(pkt_dev->skb->users));
                pkt_dev->last_ok = 0;
        }
        if (unlikely(burst))
-               atomic_sub(burst, &pkt_dev->skb->users);
+               WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));
 unlock:
        HARD_TX_UNLOCK(odev, txq);
 
index ed51de525a8869a16f9a5aa01c60bb130dd6e1c9..d1ba90980be1325e86836795a354214cb96a4079 100644 (file)
@@ -649,7 +649,7 @@ int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int g
 
        NETLINK_CB(skb).dst_group = group;
        if (echo)
-               atomic_inc(&skb->users);
+               refcount_inc(&skb->users);
        netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
        if (echo)
                err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
index f75897a33fa4a181e145a2f6473375eaaeceb9bc..45dc6620dd748afcc1cd229e9631dd69dd0efa83 100644 (file)
@@ -176,7 +176,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
        memset(skb, 0, offsetof(struct sk_buff, tail));
        skb->head = NULL;
        skb->truesize = sizeof(struct sk_buff);
-       atomic_set(&skb->users, 1);
+       refcount_set(&skb->users, 1);
 
        skb->mac_header = (typeof(skb->mac_header))~0U;
 out:
@@ -247,7 +247,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
        /* Account for allocated memory : skb + skb->head */
        skb->truesize = SKB_TRUESIZE(size);
        skb->pfmemalloc = pfmemalloc;
-       atomic_set(&skb->users, 1);
+       refcount_set(&skb->users, 1);
        skb->head = data;
        skb->data = data;
        skb_reset_tail_pointer(skb);
@@ -314,7 +314,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
 
        memset(skb, 0, offsetof(struct sk_buff, tail));
        skb->truesize = SKB_TRUESIZE(size);
-       atomic_set(&skb->users, 1);
+       refcount_set(&skb->users, 1);
        skb->head = data;
        skb->data = data;
        skb_reset_tail_pointer(skb);
@@ -915,7 +915,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
        C(head_frag);
        C(data);
        C(truesize);
-       atomic_set(&n->users, 1);
+       refcount_set(&n->users, 1);
 
        atomic_inc(&(skb_shinfo(skb)->dataref));
        skb->cloned = 1;
index 4fccc0c37fbdb4406d1370802d57a8f0aa9d376f..c376af5bfdfb34774d82de6858ec0e4b6e3380bb 100644 (file)
@@ -353,7 +353,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
            np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
-               atomic_inc(&skb->users);
+               refcount_inc(&skb->users);
                ireq->pktopts = skb;
        }
        ireq->ir_iif = sk->sk_bound_dev_if;
index 2f7e99af67dbfd2324d39086881b9475045d7e1f..7b75b062073087b7b715629de839dcc107144402 100644 (file)
@@ -194,7 +194,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
        if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
            np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
-               atomic_inc(&skb->users);
+               refcount_inc(&skb->users);
                ireq->pktopts = skb;
        }
 
index f85cbfc183d61e4fa9f0cfebbf16cac6675e3fe3..f1a4881d9835abe8f41d17740bb3bc53e3ac7100 100644 (file)
@@ -734,7 +734,7 @@ static void tcp_v6_init_req(struct request_sock *req,
             np->rxopt.bits.rxinfo ||
             np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
             np->rxopt.bits.rxohlim || np->repflow)) {
-               atomic_inc(&skb->users);
+               refcount_inc(&skb->users);
                ireq->pktopts = skb;
        }
 }
index 376fdcf7a6b9ba598c0f14e20647f54902380861..287964a570e95d509726424807832e43113ebdd3 100644 (file)
@@ -203,11 +203,11 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
 
        sock_hold(sk);
        if (*skb2 == NULL) {
-               if (atomic_read(&skb->users) != 1) {
+               if (refcount_read(&skb->users) != 1) {
                        *skb2 = skb_clone(skb, allocation);
                } else {
                        *skb2 = skb;
-                       atomic_inc(&skb->users);
+                       refcount_inc(&skb->users);
                }
        }
        if (*skb2 != NULL) {
index a88745e4b7df79264a80b3ee4150298bed4aab9f..05030ad1a36c64f6a19892eb9d0438be006c2099 100644 (file)
@@ -1848,7 +1848,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        }
 
        if (dst_group) {
-               atomic_inc(&skb->users);
+               refcount_inc(&skb->users);
                netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
        }
        err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
@@ -2226,7 +2226,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        struct netlink_sock *nlk;
        int ret;
 
-       atomic_inc(&skb->users);
+       refcount_inc(&skb->users);
 
        sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
        if (sk == NULL) {
@@ -2431,7 +2431,7 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
                int exclude_portid = 0;
 
                if (report) {
-                       atomic_inc(&skb->users);
+                       refcount_inc(&skb->users);
                        exclude_portid = portid;
                }
 
index 67b02c45271ba80be022c77c9c84abe4de2b1c47..b8985d01876a27168fbdc411208aa2984535557b 100644 (file)
@@ -27,7 +27,7 @@ void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
 {
        const void *here = __builtin_return_address(0);
        int n = atomic_inc_return(select_skb_count(op));
-       trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+       trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
 }
 
 /*
@@ -38,7 +38,7 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
        const void *here = __builtin_return_address(0);
        if (skb) {
                int n = atomic_read(select_skb_count(op));
-               trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+               trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
        }
 }
 
@@ -49,7 +49,7 @@ void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
 {
        const void *here = __builtin_return_address(0);
        int n = atomic_inc_return(select_skb_count(op));
-       trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+       trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
        skb_get(skb);
 }
 
@@ -63,7 +63,7 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
                int n;
                CHECK_SLAB_OKAY(&skb->users);
                n = atomic_dec_return(select_skb_count(op));
-               trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+               trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
                kfree_skb(skb);
        }
 }
@@ -78,7 +78,7 @@ void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
                int n;
                CHECK_SLAB_OKAY(&skb->users);
                n = atomic_dec_return(select_skb_count(op));
-               trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+               trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
                kfree_skb(skb);
        }
 }
@@ -93,7 +93,7 @@ void rxrpc_purge_queue(struct sk_buff_head *list)
        while ((skb = skb_dequeue((list))) != NULL) {
                int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
                trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
-                               atomic_read(&skb->users), n, here);
+                               refcount_read(&skb->users), n, here);
                kfree_skb(skb);
        }
 }
index 20299df163b98ededf0d8f87ccdeb8d08ca648f0..e8762702a3138cae4fd7192ecc490407b9b19a06 100644 (file)
@@ -1102,7 +1102,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                                 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
                                 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
                                 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
-                                atomic_read(&chunk->skb->users) : -1);
+                                refcount_read(&chunk->skb->users) : -1);
 
                        /* Add the chunk to the packet.  */
                        status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp);
index 7b6e20eb9451a070340ae461b6dc5162588da408..b497ee8ae2790f437083cc284024fa6ff63b8a7b 100644 (file)
@@ -7563,7 +7563,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
                if (flags & MSG_PEEK) {
                        skb = skb_peek(&sk->sk_receive_queue);
                        if (skb)
-                               atomic_inc(&skb->users);
+                               refcount_inc(&skb->users);
                } else {
                        skb = __skb_dequeue(&sk->sk_receive_queue);
                }