#include <linux/percpu_counter.h>
struct netns_frags {
- struct list_head lru_list;
- spinlock_t lru_lock;
-
/* The percpu_counter "mem" need to be cacheline aligned.
* mem.count must not share cacheline with other writers
*/
struct inet_frag_queue {
spinlock_t lock;
struct timer_list timer; /* when will this queue expire? */
- struct list_head lru_list; /* lru list member */
struct hlist_node list;
atomic_t refcnt;
struct sk_buff *fragments; /* list of received fragments */
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
-void inet_frag_destroy(struct inet_frag_queue *q,
- struct inet_frags *f, int *work);
+void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
struct inet_frags *f, void *key, unsigned int hash)
__releases(&f->lock);
static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
{
if (atomic_dec_and_test(&q->refcnt))
- inet_frag_destroy(q, f, NULL);
+ inet_frag_destroy(q, f);
}
/* Memory Tracking Functions. */
return res;
}
-static inline void inet_frag_lru_move(struct inet_frag_queue *q)
-{
- spin_lock(&q->net->lru_lock);
- if (!list_empty(&q->lru_list))
- list_move_tail(&q->lru_list, &q->net->lru_list);
- spin_unlock(&q->net->lru_lock);
-}
-
-static inline void inet_frag_lru_del(struct inet_frag_queue *q)
-{
- spin_lock(&q->net->lru_lock);
- list_del_init(&q->lru_list);
- spin_unlock(&q->net->lru_lock);
-}
-
-static inline void inet_frag_lru_add(struct netns_frags *nf,
- struct inet_frag_queue *q)
-{
- spin_lock(&nf->lru_lock);
- list_add_tail(&q->lru_list, &nf->lru_list);
- spin_unlock(&nf->lru_lock);
-}
-
/* RFC 3168 support :
* We want to check ECN values of all fragments, do detect invalid combinations.
* In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
void inet_frags_init_net(struct netns_frags *nf)
{
init_frag_mem_limit(nf);
- INIT_LIST_HEAD(&nf->lru_list);
- spin_lock_init(&nf->lru_lock);
}
EXPORT_SYMBOL(inet_frags_init_net);
spin_unlock(&hb->chain_lock);
read_unlock(&f->lock);
- inet_frag_lru_del(fq);
}
void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
kfree_skb(skb);
}
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
- int *work)
+void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
{
struct sk_buff *fp;
struct netns_frags *nf;
fp = xp;
}
sum = sum_truesize + f->qsize;
- if (work)
- *work -= sum;
sub_frag_mem_limit(q, sum);
if (f->destructor)
f->destructor(q);
kfree(q);
-
}
EXPORT_SYMBOL(inet_frag_destroy);
atomic_inc(&qp->refcnt);
hlist_add_head(&qp->list, &hb->chain);
- inet_frag_lru_add(nf, qp);
+
spin_unlock(&hb->chain_lock);
read_unlock(&f->lock);
setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
spin_lock_init(&q->lock);
atomic_set(&q->refcnt, 1);
- INIT_LIST_HEAD(&q->lru_list);
return q;
}