if (type == NES_TIMER_TYPE_SEND) {
new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
- atomic_inc(&new_send->skb->users);
+ refcount_inc(&new_send->skb->users);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
cm_node->send_entry = new_send;
add_ref_cm_node(cm_node);
flags);
break;
}
- atomic_inc(&send_entry->skb->users);
+ refcount_inc(&send_entry->skb->users);
cm_packets_retrans++;
nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
"for node %p, jiffies = %lu, time to send = "
copied = skb->len + MISDN_HEADER_LEN;
if (len < copied) {
if (flags & MSG_PEEK)
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
else
skb_queue_head(&sk->sk_receive_queue, skb);
return -ENOSPC;
rionet_queue_tx_msg(skb, ndev,
nets[rnet->mport->id].active[i]);
if (count)
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
count++;
}
} else if (RIONET_MAC_MATCH(eth->h_dest)) {
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
return -EBUSY;
} else {
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
header.length = l;
header.type = be16_to_cpu(skb->protocol);
header.unused = 0;
* Protect skb against beeing free'd by upper
* layers.
*/
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
ch->prof.txlen += skb->len;
header.length = skb->len + LL_HEADER_LENGTH;
header.type = be16_to_cpu(skb->protocol);
if (hi) {
nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!nskb) {
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
skb_pull(skb, LL_HEADER_LENGTH + 2);
ctcm_clear_busy(ch->netdev);
return -ENOMEM;
} else {
skb_put_data(nskb, skb->data, skb->len);
- atomic_inc(&nskb->users);
- atomic_dec(&skb->users);
+ refcount_inc(&nskb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
skb = nskb;
}
* Remove our header. It gets added
* again on retransmit.
*/
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
skb_pull(skb, LL_HEADER_LENGTH + 2);
ctcm_clear_busy(ch->netdev);
return -ENOMEM;
ch->ccw[1].count = skb->len;
skb_copy_from_linear_data(skb,
skb_put(ch->trans_skb, skb->len), skb->len);
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
ccw_idx = 0;
} else {
if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
spin_lock_irqsave(&ch->collect_lock, saveflags);
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
if (!p_header) {
* Protect skb against beeing free'd by upper
* layers.
*/
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
/*
* IDAL support in CTCM is broken, so we have to
goto nomem_exit;
} else {
skb_put_data(nskb, skb->data, skb->len);
- atomic_inc(&nskb->users);
- atomic_dec(&skb->users);
+ refcount_inc(&nskb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
skb = nskb;
}
ch->trans_skb->len = 0;
ch->ccw[1].count = skb->len;
skb_put_data(ch->trans_skb, skb->data, skb->len);
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
ccw_idx = 0;
CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
"%s(%s): MEMORY allocation ERROR\n",
CTCM_FUNTAIL, ch->id);
rc = -ENOMEM;
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
done:
conn->prof.tx_pending--;
if (single_flag) {
if ((skb = skb_dequeue(&conn->commit_queue))) {
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
if (privptr) {
privptr->stats.tx_packets++;
privptr->stats.tx_bytes +=
txbytes += skb->len;
txpackets++;
stat_maxcq++;
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
}
if (conn->collect_len > conn->prof.maxmulti)
struct sk_buff *skb;
while ((skb = skb_dequeue(q))) {
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
}
}
IUCV_DBF_TEXT(data, 2,
"EBUSY from netiucv_transmit_skb\n");
} else {
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
skb_queue_tail(&conn->collect_queue, skb);
conn->collect_len += l;
rc = 0;
} else {
if (copied)
dev_kfree_skb(skb);
- atomic_inc(&nskb->users);
+ refcount_inc(&nskb->users);
skb_queue_tail(&conn->commit_queue, nskb);
}
}
iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
}
}
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
}
int flush_cnt = 0, hdr_len, large_send = 0;
buffer = buf->buffer;
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
skb_queue_tail(&buf->skb_list, skb);
/*check first on TSO ....*/
unsigned char *head,
*data;
unsigned int truesize;
- atomic_t users;
+ refcount_t users;
};
#ifdef __KERNEL__
{
if (unlikely(!skb))
return false;
- if (likely(atomic_read(&skb->users) == 1))
+ if (likely(refcount_read(&skb->users) == 1))
smp_rmb();
- else if (likely(!atomic_dec_and_test(&skb->users)))
+ else if (likely(!refcount_dec_and_test(&skb->users)))
return false;
return true;
*/
static inline struct sk_buff *skb_get(struct sk_buff *skb)
{
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
return skb;
}
*/
static inline int skb_shared(const struct sk_buff *skb)
{
- return atomic_read(&skb->users) != 1;
+ return refcount_read(&skb->users) != 1;
}
/**
}
}
*peeked = 1;
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
} else {
__skb_unlink(skb, queue);
if (destructor)
spin_lock_bh(&sk_queue->lock);
if (skb == skb_peek(sk_queue)) {
__skb_unlink(skb, sk_queue);
- atomic_dec(&skb->users);
+ refcount_dec(&skb->users);
if (destructor)
destructor(sk, skb);
err = 0;
{
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
return -ENOMEM;
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
if (unlikely(!skb))
return;
- if (likely(atomic_read(&skb->users) == 1)) {
+ if (likely(refcount_read(&skb->users) == 1)) {
smp_rmb();
- atomic_set(&skb->users, 0);
- } else if (likely(!atomic_dec_and_test(&skb->users))) {
+ refcount_set(&skb->users, 0);
+ } else if (likely(!refcount_dec_and_test(&skb->users))) {
return;
}
get_kfree_skb_cb(skb)->reason = reason;
clist = clist->next;
- WARN_ON(atomic_read(&skb->users));
+ WARN_ON(refcount_read(&skb->users));
if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
trace_consume_skb(skb);
else
struct sk_buff *skb = clist;
clist = clist->next;
if (!skb_irq_freeable(skb)) {
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
dev_kfree_skb_any(skb); /* put this one back */
} else {
__kfree_skb(skb);
return NULL;
}
- atomic_set(&skb->users, 1);
+ refcount_set(&skb->users, 1);
skb_reserve(skb, reserve);
return skb;
}
{
ktime_t idle_start = ktime_get();
- while (atomic_read(&(pkt_dev->skb->users)) != 1) {
+ while (refcount_read(&(pkt_dev->skb->users)) != 1) {
if (signal_pending(current))
break;
if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
skb = pkt_dev->skb;
skb->protocol = eth_type_trans(skb, skb->dev);
- atomic_add(burst, &skb->users);
+ refcount_add(burst, &skb->users);
local_bh_disable();
do {
ret = netif_receive_skb(skb);
pkt_dev->errors++;
pkt_dev->sofar++;
pkt_dev->seq_num++;
- if (atomic_read(&skb->users) != burst) {
+ if (refcount_read(&skb->users) != burst) {
/* skb was queued by rps/rfs or taps,
* so cannot reuse this skb
*/
- atomic_sub(burst - 1, &skb->users);
+ WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));
/* get out of the loop and wait
* until skb is consumed
*/
goto out; /* Skips xmit_mode M_START_XMIT */
} else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
local_bh_disable();
- atomic_inc(&pkt_dev->skb->users);
+ refcount_inc(&pkt_dev->skb->users);
ret = dev_queue_xmit(pkt_dev->skb);
switch (ret) {
pkt_dev->last_ok = 0;
goto unlock;
}
- atomic_add(burst, &pkt_dev->skb->users);
+ refcount_add(burst, &pkt_dev->skb->users);
xmit_more:
ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
/* fallthru */
case NETDEV_TX_BUSY:
/* Retry it next time */
- atomic_dec(&(pkt_dev->skb->users));
+ refcount_dec(&(pkt_dev->skb->users));
pkt_dev->last_ok = 0;
}
if (unlikely(burst))
- atomic_sub(burst, &pkt_dev->skb->users);
+ WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));
unlock:
HARD_TX_UNLOCK(odev, txq);
NETLINK_CB(skb).dst_group = group;
if (echo)
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
if (echo)
err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->head = NULL;
skb->truesize = sizeof(struct sk_buff);
- atomic_set(&skb->users, 1);
+ refcount_set(&skb->users, 1);
skb->mac_header = (typeof(skb->mac_header))~0U;
out:
/* Account for allocated memory : skb + skb->head */
skb->truesize = SKB_TRUESIZE(size);
skb->pfmemalloc = pfmemalloc;
- atomic_set(&skb->users, 1);
+ refcount_set(&skb->users, 1);
skb->head = data;
skb->data = data;
skb_reset_tail_pointer(skb);
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->truesize = SKB_TRUESIZE(size);
- atomic_set(&skb->users, 1);
+ refcount_set(&skb->users, 1);
skb->head = data;
skb->data = data;
skb_reset_tail_pointer(skb);
C(head_frag);
C(data);
C(truesize);
- atomic_set(&n->users, 1);
+ refcount_set(&n->users, 1);
atomic_inc(&(skb_shinfo(skb)->dataref));
skb->cloned = 1;
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
ireq->pktopts = skb;
}
ireq->ir_iif = sk->sk_bound_dev_if;
if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
ireq->pktopts = skb;
}
np->rxopt.bits.rxinfo ||
np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
np->rxopt.bits.rxohlim || np->repflow)) {
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
ireq->pktopts = skb;
}
}
sock_hold(sk);
if (*skb2 == NULL) {
- if (atomic_read(&skb->users) != 1) {
+ if (refcount_read(&skb->users) != 1) {
*skb2 = skb_clone(skb, allocation);
} else {
*skb2 = skb;
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
}
}
if (*skb2 != NULL) {
}
if (dst_group) {
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
}
err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
struct netlink_sock *nlk;
int ret;
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
if (sk == NULL) {
int exclude_portid = 0;
if (report) {
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
exclude_portid = portid;
}
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(select_skb_count(op));
- trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
}
/*
const void *here = __builtin_return_address(0);
if (skb) {
int n = atomic_read(select_skb_count(op));
- trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
}
}
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(select_skb_count(op));
- trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
skb_get(skb);
}
int n;
CHECK_SLAB_OKAY(&skb->users);
n = atomic_dec_return(select_skb_count(op));
- trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
kfree_skb(skb);
}
}
int n;
CHECK_SLAB_OKAY(&skb->users);
n = atomic_dec_return(select_skb_count(op));
- trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
+ trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
kfree_skb(skb);
}
}
while ((skb = skb_dequeue((list))) != NULL) {
int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
- atomic_read(&skb->users), n, here);
+ refcount_read(&skb->users), n, here);
kfree_skb(skb);
}
}
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
"illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
- atomic_read(&chunk->skb->users) : -1);
+ refcount_read(&chunk->skb->users) : -1);
/* Add the chunk to the packet. */
status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp);
if (flags & MSG_PEEK) {
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
- atomic_inc(&skb->users);
+ refcount_inc(&skb->users);
} else {
skb = __skb_dequeue(&sk->sk_receive_queue);
}