struct flow_cache flow_cache_global;
atomic_t flow_cache_genid;
struct list_head flow_cache_gc_list;
+ atomic_t flow_cache_gc_count;
spinlock_t flow_cache_gc_lock;
struct work_struct flow_cache_gc_work;
struct work_struct flow_cache_flush_work;
list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
spin_unlock_bh(&xfrm->flow_cache_gc_lock);
- list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
+ list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
flow_entry_kill(fce, xfrm);
+ atomic_dec(&xfrm->flow_cache_gc_count);
+ WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
+ }
}
static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
struct netns_xfrm *xfrm)
{
if (deleted) {
+ atomic_add(deleted, &xfrm->flow_cache_gc_count);
fcp->hash_count -= deleted;
spin_lock_bh(&xfrm->flow_cache_gc_lock);
list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
if (fcp->hash_count > fc->high_watermark)
flow_cache_shrink(fc, fcp);
+ if (fcp->hash_count > 2 * fc->high_watermark ||
+ atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
+ atomic_inc(&net->xfrm.flow_cache_genid);
+ flo = ERR_PTR(-ENOBUFS);
+ goto ret_object;
+ }
+
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
if (fle) {
fle->net = net;
INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
mutex_init(&net->xfrm.flow_flush_sem);
+ atomic_set(&net->xfrm.flow_cache_gc_count, 0);
fc->hash_shift = 10;
fc->low_watermark = 2 * flow_cache_hash_size(fc);