netfilter: ctnetlink: add support for user-space expectation helpers
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / nf_conntrack_expect.c
1 /* Expectation handling for nf_conntrack. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/types.h>
13 #include <linux/netfilter.h>
14 #include <linux/skbuff.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/stddef.h>
18 #include <linux/slab.h>
19 #include <linux/err.h>
20 #include <linux/percpu.h>
21 #include <linux/kernel.h>
22 #include <linux/jhash.h>
23 #include <net/net_namespace.h>
24
25 #include <net/netfilter/nf_conntrack.h>
26 #include <net/netfilter/nf_conntrack_core.h>
27 #include <net/netfilter/nf_conntrack_expect.h>
28 #include <net/netfilter/nf_conntrack_helper.h>
29 #include <net/netfilter/nf_conntrack_tuple.h>
30 #include <net/netfilter/nf_conntrack_zones.h>
31
32 unsigned int nf_ct_expect_hsize __read_mostly;
33 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
34
35 static unsigned int nf_ct_expect_hash_rnd __read_mostly;
36 unsigned int nf_ct_expect_max __read_mostly;
37 static int nf_ct_expect_hash_rnd_initted __read_mostly;
38
39 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
40
41 static HLIST_HEAD(nf_ct_userspace_expect_list);
42
43 /* nf_conntrack_expect helper functions */
44 void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
45 {
46 struct nf_conn_help *master_help = nfct_help(exp->master);
47 struct net *net = nf_ct_exp_net(exp);
48
49 NF_CT_ASSERT(!timer_pending(&exp->timeout));
50
51 hlist_del_rcu(&exp->hnode);
52 net->ct.expect_count--;
53
54 hlist_del(&exp->lnode);
55 if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
56 master_help->expecting[exp->class]--;
57
58 nf_ct_expect_put(exp);
59
60 NF_CT_STAT_INC(net, expect_delete);
61 }
62 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect);
63
64 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
65 {
66 struct nf_conntrack_expect *exp = (void *)ul_expect;
67
68 spin_lock_bh(&nf_conntrack_lock);
69 nf_ct_unlink_expect(exp);
70 spin_unlock_bh(&nf_conntrack_lock);
71 nf_ct_expect_put(exp);
72 }
73
74 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
75 {
76 unsigned int hash;
77
78 if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
79 get_random_bytes(&nf_ct_expect_hash_rnd,
80 sizeof(nf_ct_expect_hash_rnd));
81 nf_ct_expect_hash_rnd_initted = 1;
82 }
83
84 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
85 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
86 (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd);
87 return ((u64)hash * nf_ct_expect_hsize) >> 32;
88 }
89
90 struct nf_conntrack_expect *
91 __nf_ct_expect_find(struct net *net, u16 zone,
92 const struct nf_conntrack_tuple *tuple)
93 {
94 struct nf_conntrack_expect *i;
95 struct hlist_node *n;
96 unsigned int h;
97
98 if (!net->ct.expect_count)
99 return NULL;
100
101 h = nf_ct_expect_dst_hash(tuple);
102 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
103 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
104 nf_ct_zone(i->master) == zone)
105 return i;
106 }
107 return NULL;
108 }
109 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
110
111 /* Just find a expectation corresponding to a tuple. */
112 struct nf_conntrack_expect *
113 nf_ct_expect_find_get(struct net *net, u16 zone,
114 const struct nf_conntrack_tuple *tuple)
115 {
116 struct nf_conntrack_expect *i;
117
118 rcu_read_lock();
119 i = __nf_ct_expect_find(net, zone, tuple);
120 if (i && !atomic_inc_not_zero(&i->use))
121 i = NULL;
122 rcu_read_unlock();
123
124 return i;
125 }
126 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
127
128 /* If an expectation for this connection is found, it gets delete from
129 * global list then returned. */
130 struct nf_conntrack_expect *
131 nf_ct_find_expectation(struct net *net, u16 zone,
132 const struct nf_conntrack_tuple *tuple)
133 {
134 struct nf_conntrack_expect *i, *exp = NULL;
135 struct hlist_node *n;
136 unsigned int h;
137
138 if (!net->ct.expect_count)
139 return NULL;
140
141 h = nf_ct_expect_dst_hash(tuple);
142 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
143 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
144 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
145 nf_ct_zone(i->master) == zone) {
146 exp = i;
147 break;
148 }
149 }
150 if (!exp)
151 return NULL;
152
153 /* If master is not in hash table yet (ie. packet hasn't left
154 this machine yet), how can other end know about expected?
155 Hence these are not the droids you are looking for (if
156 master ct never got confirmed, we'd hold a reference to it
157 and weird things would happen to future packets). */
158 if (!nf_ct_is_confirmed(exp->master))
159 return NULL;
160
161 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
162 atomic_inc(&exp->use);
163 return exp;
164 } else if (del_timer(&exp->timeout)) {
165 nf_ct_unlink_expect(exp);
166 return exp;
167 }
168
169 return NULL;
170 }
171
172 /* delete all expectations for this conntrack */
173 void nf_ct_remove_expectations(struct nf_conn *ct)
174 {
175 struct nf_conn_help *help = nfct_help(ct);
176 struct nf_conntrack_expect *exp;
177 struct hlist_node *n, *next;
178
179 /* Optimization: most connection never expect any others. */
180 if (!help)
181 return;
182
183 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
184 if (del_timer(&exp->timeout)) {
185 nf_ct_unlink_expect(exp);
186 nf_ct_expect_put(exp);
187 }
188 }
189 }
190 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
191
192 /* Would two expected things clash? */
193 static inline int expect_clash(const struct nf_conntrack_expect *a,
194 const struct nf_conntrack_expect *b)
195 {
196 /* Part covered by intersection of masks must be unequal,
197 otherwise they clash */
198 struct nf_conntrack_tuple_mask intersect_mask;
199 int count;
200
201 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
202
203 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
204 intersect_mask.src.u3.all[count] =
205 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
206 }
207
208 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
209 }
210
211 static inline int expect_matches(const struct nf_conntrack_expect *a,
212 const struct nf_conntrack_expect *b)
213 {
214 return a->master == b->master && a->class == b->class &&
215 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
216 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
217 nf_ct_zone(a->master) == nf_ct_zone(b->master);
218 }
219
220 /* Generally a bad idea to call this: could have matched already. */
221 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
222 {
223 spin_lock_bh(&nf_conntrack_lock);
224 if (del_timer(&exp->timeout)) {
225 nf_ct_unlink_expect(exp);
226 nf_ct_expect_put(exp);
227 }
228 spin_unlock_bh(&nf_conntrack_lock);
229 }
230 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
231
232 /* We don't increase the master conntrack refcount for non-fulfilled
233 * conntracks. During the conntrack destruction, the expectations are
234 * always killed before the conntrack itself */
235 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
236 {
237 struct nf_conntrack_expect *new;
238
239 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
240 if (!new)
241 return NULL;
242
243 new->master = me;
244 atomic_set(&new->use, 1);
245 return new;
246 }
247 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
248
249 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
250 u_int8_t family,
251 const union nf_inet_addr *saddr,
252 const union nf_inet_addr *daddr,
253 u_int8_t proto, const __be16 *src, const __be16 *dst)
254 {
255 int len;
256
257 if (family == AF_INET)
258 len = 4;
259 else
260 len = 16;
261
262 exp->flags = 0;
263 exp->class = class;
264 exp->expectfn = NULL;
265 exp->helper = NULL;
266 exp->tuple.src.l3num = family;
267 exp->tuple.dst.protonum = proto;
268
269 if (saddr) {
270 memcpy(&exp->tuple.src.u3, saddr, len);
271 if (sizeof(exp->tuple.src.u3) > len)
272 /* address needs to be cleared for nf_ct_tuple_equal */
273 memset((void *)&exp->tuple.src.u3 + len, 0x00,
274 sizeof(exp->tuple.src.u3) - len);
275 memset(&exp->mask.src.u3, 0xFF, len);
276 if (sizeof(exp->mask.src.u3) > len)
277 memset((void *)&exp->mask.src.u3 + len, 0x00,
278 sizeof(exp->mask.src.u3) - len);
279 } else {
280 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
281 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
282 }
283
284 if (src) {
285 exp->tuple.src.u.all = *src;
286 exp->mask.src.u.all = htons(0xFFFF);
287 } else {
288 exp->tuple.src.u.all = 0;
289 exp->mask.src.u.all = 0;
290 }
291
292 memcpy(&exp->tuple.dst.u3, daddr, len);
293 if (sizeof(exp->tuple.dst.u3) > len)
294 /* address needs to be cleared for nf_ct_tuple_equal */
295 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
296 sizeof(exp->tuple.dst.u3) - len);
297
298 exp->tuple.dst.u.all = *dst;
299 }
300 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
301
302 static void nf_ct_expect_free_rcu(struct rcu_head *head)
303 {
304 struct nf_conntrack_expect *exp;
305
306 exp = container_of(head, struct nf_conntrack_expect, rcu);
307 kmem_cache_free(nf_ct_expect_cachep, exp);
308 }
309
310 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
311 {
312 if (atomic_dec_and_test(&exp->use))
313 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
314 }
315 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
316
317 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
318 {
319 struct nf_conn_help *master_help = nfct_help(exp->master);
320 struct net *net = nf_ct_exp_net(exp);
321 const struct nf_conntrack_expect_policy *p;
322 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
323
324 atomic_inc(&exp->use);
325
326 if (master_help) {
327 hlist_add_head(&exp->lnode, &master_help->expectations);
328 master_help->expecting[exp->class]++;
329 } else if (exp->flags & NF_CT_EXPECT_USERSPACE)
330 hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
331
332 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
333 net->ct.expect_count++;
334
335 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
336 (unsigned long)exp);
337 if (master_help) {
338 p = &master_help->helper->expect_policy[exp->class];
339 exp->timeout.expires = jiffies + p->timeout * HZ;
340 }
341 add_timer(&exp->timeout);
342
343 atomic_inc(&exp->use);
344 NF_CT_STAT_INC(net, expect_create);
345 }
346
347 /* Race with expectations being used means we could have none to find; OK. */
348 static void evict_oldest_expect(struct nf_conn *master,
349 struct nf_conntrack_expect *new)
350 {
351 struct nf_conn_help *master_help = nfct_help(master);
352 struct nf_conntrack_expect *exp, *last = NULL;
353 struct hlist_node *n;
354
355 hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
356 if (exp->class == new->class)
357 last = exp;
358 }
359
360 if (last && del_timer(&last->timeout)) {
361 nf_ct_unlink_expect(last);
362 nf_ct_expect_put(last);
363 }
364 }
365
366 static inline int refresh_timer(struct nf_conntrack_expect *i)
367 {
368 struct nf_conn_help *master_help = nfct_help(i->master);
369 const struct nf_conntrack_expect_policy *p;
370
371 if (!del_timer(&i->timeout))
372 return 0;
373
374 p = &master_help->helper->expect_policy[i->class];
375 i->timeout.expires = jiffies + p->timeout * HZ;
376 add_timer(&i->timeout);
377 return 1;
378 }
379
380 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
381 {
382 const struct nf_conntrack_expect_policy *p;
383 struct nf_conntrack_expect *i;
384 struct nf_conn *master = expect->master;
385 struct nf_conn_help *master_help = nfct_help(master);
386 struct net *net = nf_ct_exp_net(expect);
387 struct hlist_node *n;
388 unsigned int h;
389 int ret = 1;
390
391 /* Don't allow expectations created from kernel-space with no helper */
392 if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
393 (!master_help || (master_help && !master_help->helper))) {
394 ret = -ESHUTDOWN;
395 goto out;
396 }
397 h = nf_ct_expect_dst_hash(&expect->tuple);
398 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
399 if (expect_matches(i, expect)) {
400 /* Refresh timer: if it's dying, ignore.. */
401 if (refresh_timer(i)) {
402 ret = 0;
403 goto out;
404 }
405 } else if (expect_clash(i, expect)) {
406 ret = -EBUSY;
407 goto out;
408 }
409 }
410 /* Will be over limit? */
411 if (master_help) {
412 p = &master_help->helper->expect_policy[expect->class];
413 if (p->max_expected &&
414 master_help->expecting[expect->class] >= p->max_expected) {
415 evict_oldest_expect(master, expect);
416 if (master_help->expecting[expect->class]
417 >= p->max_expected) {
418 ret = -EMFILE;
419 goto out;
420 }
421 }
422 }
423
424 if (net->ct.expect_count >= nf_ct_expect_max) {
425 if (net_ratelimit())
426 printk(KERN_WARNING
427 "nf_conntrack: expectation table full\n");
428 ret = -EMFILE;
429 }
430 out:
431 return ret;
432 }
433
434 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
435 u32 pid, int report)
436 {
437 int ret;
438
439 spin_lock_bh(&nf_conntrack_lock);
440 ret = __nf_ct_expect_check(expect);
441 if (ret <= 0)
442 goto out;
443
444 ret = 0;
445 nf_ct_expect_insert(expect);
446 spin_unlock_bh(&nf_conntrack_lock);
447 nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
448 return ret;
449 out:
450 spin_unlock_bh(&nf_conntrack_lock);
451 return ret;
452 }
453 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
454
455 void nf_ct_remove_userspace_expectations(void)
456 {
457 struct nf_conntrack_expect *exp;
458 struct hlist_node *n, *next;
459
460 hlist_for_each_entry_safe(exp, n, next,
461 &nf_ct_userspace_expect_list, lnode) {
462 if (del_timer(&exp->timeout)) {
463 nf_ct_unlink_expect(exp);
464 nf_ct_expect_put(exp);
465 }
466 }
467 }
468 EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
469
470 #ifdef CONFIG_PROC_FS
471 struct ct_expect_iter_state {
472 struct seq_net_private p;
473 unsigned int bucket;
474 };
475
476 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
477 {
478 struct net *net = seq_file_net(seq);
479 struct ct_expect_iter_state *st = seq->private;
480 struct hlist_node *n;
481
482 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
483 n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
484 if (n)
485 return n;
486 }
487 return NULL;
488 }
489
490 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
491 struct hlist_node *head)
492 {
493 struct net *net = seq_file_net(seq);
494 struct ct_expect_iter_state *st = seq->private;
495
496 head = rcu_dereference(head->next);
497 while (head == NULL) {
498 if (++st->bucket >= nf_ct_expect_hsize)
499 return NULL;
500 head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
501 }
502 return head;
503 }
504
505 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
506 {
507 struct hlist_node *head = ct_expect_get_first(seq);
508
509 if (head)
510 while (pos && (head = ct_expect_get_next(seq, head)))
511 pos--;
512 return pos ? NULL : head;
513 }
514
515 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
516 __acquires(RCU)
517 {
518 rcu_read_lock();
519 return ct_expect_get_idx(seq, *pos);
520 }
521
522 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
523 {
524 (*pos)++;
525 return ct_expect_get_next(seq, v);
526 }
527
528 static void exp_seq_stop(struct seq_file *seq, void *v)
529 __releases(RCU)
530 {
531 rcu_read_unlock();
532 }
533
534 static int exp_seq_show(struct seq_file *s, void *v)
535 {
536 struct nf_conntrack_expect *expect;
537 struct nf_conntrack_helper *helper;
538 struct hlist_node *n = v;
539 char *delim = "";
540
541 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
542
543 if (expect->timeout.function)
544 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
545 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
546 else
547 seq_printf(s, "- ");
548 seq_printf(s, "l3proto = %u proto=%u ",
549 expect->tuple.src.l3num,
550 expect->tuple.dst.protonum);
551 print_tuple(s, &expect->tuple,
552 __nf_ct_l3proto_find(expect->tuple.src.l3num),
553 __nf_ct_l4proto_find(expect->tuple.src.l3num,
554 expect->tuple.dst.protonum));
555
556 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
557 seq_printf(s, "PERMANENT");
558 delim = ",";
559 }
560 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
561 seq_printf(s, "%sINACTIVE", delim);
562 delim = ",";
563 }
564 if (expect->flags & NF_CT_EXPECT_USERSPACE)
565 seq_printf(s, "%sUSERSPACE", delim);
566
567 helper = rcu_dereference(nfct_help(expect->master)->helper);
568 if (helper) {
569 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
570 if (helper->expect_policy[expect->class].name)
571 seq_printf(s, "/%s",
572 helper->expect_policy[expect->class].name);
573 }
574
575 return seq_putc(s, '\n');
576 }
577
578 static const struct seq_operations exp_seq_ops = {
579 .start = exp_seq_start,
580 .next = exp_seq_next,
581 .stop = exp_seq_stop,
582 .show = exp_seq_show
583 };
584
585 static int exp_open(struct inode *inode, struct file *file)
586 {
587 return seq_open_net(inode, file, &exp_seq_ops,
588 sizeof(struct ct_expect_iter_state));
589 }
590
591 static const struct file_operations exp_file_ops = {
592 .owner = THIS_MODULE,
593 .open = exp_open,
594 .read = seq_read,
595 .llseek = seq_lseek,
596 .release = seq_release_net,
597 };
598 #endif /* CONFIG_PROC_FS */
599
600 static int exp_proc_init(struct net *net)
601 {
602 #ifdef CONFIG_PROC_FS
603 struct proc_dir_entry *proc;
604
605 proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
606 if (!proc)
607 return -ENOMEM;
608 #endif /* CONFIG_PROC_FS */
609 return 0;
610 }
611
612 static void exp_proc_remove(struct net *net)
613 {
614 #ifdef CONFIG_PROC_FS
615 proc_net_remove(net, "nf_conntrack_expect");
616 #endif /* CONFIG_PROC_FS */
617 }
618
619 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
620
621 int nf_conntrack_expect_init(struct net *net)
622 {
623 int err = -ENOMEM;
624
625 if (net_eq(net, &init_net)) {
626 if (!nf_ct_expect_hsize) {
627 nf_ct_expect_hsize = net->ct.htable_size / 256;
628 if (!nf_ct_expect_hsize)
629 nf_ct_expect_hsize = 1;
630 }
631 nf_ct_expect_max = nf_ct_expect_hsize * 4;
632 }
633
634 net->ct.expect_count = 0;
635 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
636 &net->ct.expect_vmalloc, 0);
637 if (net->ct.expect_hash == NULL)
638 goto err1;
639
640 if (net_eq(net, &init_net)) {
641 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
642 sizeof(struct nf_conntrack_expect),
643 0, 0, NULL);
644 if (!nf_ct_expect_cachep)
645 goto err2;
646 }
647
648 err = exp_proc_init(net);
649 if (err < 0)
650 goto err3;
651
652 return 0;
653
654 err3:
655 if (net_eq(net, &init_net))
656 kmem_cache_destroy(nf_ct_expect_cachep);
657 err2:
658 nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
659 nf_ct_expect_hsize);
660 err1:
661 return err;
662 }
663
664 void nf_conntrack_expect_fini(struct net *net)
665 {
666 exp_proc_remove(net);
667 if (net_eq(net, &init_net)) {
668 rcu_barrier(); /* Wait for call_rcu() before destroy */
669 kmem_cache_destroy(nf_ct_expect_cachep);
670 }
671 nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
672 nf_ct_expect_hsize);
673 }