#include <linux/percpu.h>
#include <linux/dynamic_queue_limits.h>
#include <linux/list.h>
+#include <linux/refcount.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
struct sk_buff *skb_bad_txq;
struct rcu_head rcu_head;
int padded;
- atomic_t refcnt;
+ refcount_t refcnt;
spinlock_t busylock ____cacheline_aligned_in_smp;
};
old = dev_graft_qdisc(dev_queue, new);
if (new && i > 0)
- atomic_inc(&new->refcnt);
+ refcount_inc(&new->refcnt);
if (!ingress)
qdisc_destroy(old);
notify_and_destroy(net, skb, n, classid,
dev->qdisc, new);
if (new && !new->ops->attach)
- atomic_inc(&new->refcnt);
+ refcount_inc(&new->refcnt);
dev->qdisc = new ? : &noop_qdisc;
if (new && new->ops->attach)
if (q == p ||
(p && check_loop(q, p, 0)))
return -ELOOP;
- atomic_inc(&q->refcnt);
+ refcount_inc(&q->refcnt);
goto graft;
} else {
if (!q)
tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
tcm->tcm_parent = clid;
tcm->tcm_handle = q->handle;
- tcm->tcm_info = atomic_read(&q->refcnt);
+ tcm->tcm_info = refcount_read(&q->refcnt);
if (nla_put_string(skb, TCA_KIND, q->ops->id))
goto nla_put_failure;
if (q->ops->dump && q->ops->dump(q, skb) < 0)
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
dev_hold(dev);
- atomic_set(&sch->refcnt, 1);
+ refcount_set(&sch->refcnt, 1);
return sch;
errout:
const struct Qdisc_ops *ops = qdisc->ops;
if (qdisc->flags & TCQ_F_BUILTIN ||
- !atomic_dec_and_test(&qdisc->refcnt))
+ !refcount_dec_and_test(&qdisc->refcnt))
return;
#ifdef CONFIG_NET_SCHED
spin_lock_bh(root_lock);
/* Prune old scheduler */
- if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
+ if (oqdisc && refcount_read(&oqdisc->refcnt) <= 1)
qdisc_reset(oqdisc);
/* ... and graft new one */
dev->priv_flags & IFF_NO_QUEUE) {
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
dev->qdisc = txq->qdisc_sleeping;
- atomic_inc(&dev->qdisc->refcnt);
+ refcount_inc(&dev->qdisc->refcnt);
} else {
qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
if (qdisc) {