Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
f229f6ce 6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
77ab9cff
MJ
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/types.h>
14#include <linux/netfilter.h>
15#include <linux/skbuff.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/stddef.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/percpu.h>
22#include <linux/kernel.h>
a71c0855 23#include <linux/jhash.h>
d9b93842 24#include <linux/moduleparam.h>
bc3b2d7f 25#include <linux/export.h>
457c4cbc 26#include <net/net_namespace.h>
77ab9cff
MJ
27
28#include <net/netfilter/nf_conntrack.h>
29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_expect.h>
31#include <net/netfilter/nf_conntrack_helper.h>
32#include <net/netfilter/nf_conntrack_tuple.h>
5d0aa2cc 33#include <net/netfilter/nf_conntrack_zones.h>
77ab9cff 34
a71c0855
PM
35unsigned int nf_ct_expect_hsize __read_mostly;
36EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
37
f264a7df 38unsigned int nf_ct_expect_max __read_mostly;
a71c0855 39
e9c1b084 40static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
77ab9cff
MJ
41
42/* nf_conntrack_expect helper functions */
ebbf41df 43void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
ec464e5d 44 u32 portid, int report)
77ab9cff
MJ
45{
46 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 47 struct net *net = nf_ct_exp_net(exp);
77ab9cff 48
3d058d7b 49 NF_CT_ASSERT(master_help);
77ab9cff
MJ
50 NF_CT_ASSERT(!timer_pending(&exp->timeout));
51
7d0742da 52 hlist_del_rcu(&exp->hnode);
9b03f38d 53 net->ct.expect_count--;
a71c0855 54
b560580a 55 hlist_del(&exp->lnode);
3d058d7b 56 master_help->expecting[exp->class]--;
bc01befd 57
ec464e5d 58 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
6823645d 59 nf_ct_expect_put(exp);
b560580a 60
0d55af87 61 NF_CT_STAT_INC(net, expect_delete);
77ab9cff 62}
ebbf41df 63EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
77ab9cff 64
6823645d 65static void nf_ct_expectation_timed_out(unsigned long ul_expect)
77ab9cff
MJ
66{
67 struct nf_conntrack_expect *exp = (void *)ul_expect;
68
f8ba1aff 69 spin_lock_bh(&nf_conntrack_lock);
77ab9cff 70 nf_ct_unlink_expect(exp);
f8ba1aff 71 spin_unlock_bh(&nf_conntrack_lock);
6823645d 72 nf_ct_expect_put(exp);
77ab9cff
MJ
73}
74
a71c0855
PM
75static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
76{
34498825
PM
77 unsigned int hash;
78
f682cefa
CG
79 if (unlikely(!nf_conntrack_hash_rnd)) {
80 init_nf_conntrack_hash_rnd();
a71c0855
PM
81 }
82
34498825 83 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
a71c0855 84 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
f682cefa 85 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
34498825 86 return ((u64)hash * nf_ct_expect_hsize) >> 32;
a71c0855
PM
87}
88
77ab9cff 89struct nf_conntrack_expect *
5d0aa2cc
PM
90__nf_ct_expect_find(struct net *net, u16 zone,
91 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
92{
93 struct nf_conntrack_expect *i;
a71c0855
PM
94 unsigned int h;
95
9b03f38d 96 if (!net->ct.expect_count)
a71c0855 97 return NULL;
77ab9cff 98
a71c0855 99 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 100 hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
5d0aa2cc
PM
101 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 nf_ct_zone(i->master) == zone)
77ab9cff
MJ
103 return i;
104 }
105 return NULL;
106}
6823645d 107EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
77ab9cff
MJ
108
109/* Just find a expectation corresponding to a tuple. */
110struct nf_conntrack_expect *
5d0aa2cc
PM
111nf_ct_expect_find_get(struct net *net, u16 zone,
112 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
113{
114 struct nf_conntrack_expect *i;
115
7d0742da 116 rcu_read_lock();
5d0aa2cc 117 i = __nf_ct_expect_find(net, zone, tuple);
7d0742da
PM
118 if (i && !atomic_inc_not_zero(&i->use))
119 i = NULL;
120 rcu_read_unlock();
77ab9cff
MJ
121
122 return i;
123}
6823645d 124EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
77ab9cff
MJ
125
126/* If an expectation for this connection is found, it gets delete from
127 * global list then returned. */
128struct nf_conntrack_expect *
5d0aa2cc
PM
129nf_ct_find_expectation(struct net *net, u16 zone,
130 const struct nf_conntrack_tuple *tuple)
77ab9cff 131{
359b9ab6 132 struct nf_conntrack_expect *i, *exp = NULL;
359b9ab6
PM
133 unsigned int h;
134
9b03f38d 135 if (!net->ct.expect_count)
359b9ab6 136 return NULL;
ece00641 137
359b9ab6 138 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 139 hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
359b9ab6 140 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
5d0aa2cc
PM
141 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
142 nf_ct_zone(i->master) == zone) {
359b9ab6
PM
143 exp = i;
144 break;
145 }
146 }
ece00641
YK
147 if (!exp)
148 return NULL;
77ab9cff 149
77ab9cff
MJ
150 /* If master is not in hash table yet (ie. packet hasn't left
151 this machine yet), how can other end know about expected?
152 Hence these are not the droids you are looking for (if
153 master ct never got confirmed, we'd hold a reference to it
154 and weird things would happen to future packets). */
ece00641
YK
155 if (!nf_ct_is_confirmed(exp->master))
156 return NULL;
157
158 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
159 atomic_inc(&exp->use);
160 return exp;
161 } else if (del_timer(&exp->timeout)) {
162 nf_ct_unlink_expect(exp);
163 return exp;
77ab9cff 164 }
ece00641 165
77ab9cff
MJ
166 return NULL;
167}
168
169/* delete all expectations for this conntrack */
170void nf_ct_remove_expectations(struct nf_conn *ct)
171{
77ab9cff 172 struct nf_conn_help *help = nfct_help(ct);
b560580a 173 struct nf_conntrack_expect *exp;
b67bfe0d 174 struct hlist_node *next;
77ab9cff
MJ
175
176 /* Optimization: most connection never expect any others. */
6002f266 177 if (!help)
77ab9cff
MJ
178 return;
179
b67bfe0d 180 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
b560580a
PM
181 if (del_timer(&exp->timeout)) {
182 nf_ct_unlink_expect(exp);
183 nf_ct_expect_put(exp);
601e68e1 184 }
77ab9cff
MJ
185 }
186}
13b18339 187EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
77ab9cff
MJ
188
189/* Would two expected things clash? */
190static inline int expect_clash(const struct nf_conntrack_expect *a,
191 const struct nf_conntrack_expect *b)
192{
193 /* Part covered by intersection of masks must be unequal,
194 otherwise they clash */
d4156e8c 195 struct nf_conntrack_tuple_mask intersect_mask;
77ab9cff
MJ
196 int count;
197
77ab9cff 198 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
77ab9cff
MJ
199
200 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
201 intersect_mask.src.u3.all[count] =
202 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
203 }
204
77ab9cff
MJ
205 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
206}
207
208static inline int expect_matches(const struct nf_conntrack_expect *a,
209 const struct nf_conntrack_expect *b)
210{
f64f9e71
JP
211 return a->master == b->master && a->class == b->class &&
212 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
5d0aa2cc
PM
213 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
214 nf_ct_zone(a->master) == nf_ct_zone(b->master);
77ab9cff
MJ
215}
216
217/* Generally a bad idea to call this: could have matched already. */
6823645d 218void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
77ab9cff 219{
f8ba1aff 220 spin_lock_bh(&nf_conntrack_lock);
4e1d4e6c
PM
221 if (del_timer(&exp->timeout)) {
222 nf_ct_unlink_expect(exp);
223 nf_ct_expect_put(exp);
77ab9cff 224 }
f8ba1aff 225 spin_unlock_bh(&nf_conntrack_lock);
77ab9cff 226}
6823645d 227EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
77ab9cff
MJ
228
229/* We don't increase the master conntrack refcount for non-fulfilled
230 * conntracks. During the conntrack destruction, the expectations are
231 * always killed before the conntrack itself */
6823645d 232struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
77ab9cff
MJ
233{
234 struct nf_conntrack_expect *new;
235
6823645d 236 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
77ab9cff
MJ
237 if (!new)
238 return NULL;
239
240 new->master = me;
241 atomic_set(&new->use, 1);
242 return new;
243}
6823645d 244EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
77ab9cff 245
6002f266 246void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
76108cea 247 u_int8_t family,
1d9d7522
PM
248 const union nf_inet_addr *saddr,
249 const union nf_inet_addr *daddr,
250 u_int8_t proto, const __be16 *src, const __be16 *dst)
d6a9b650
PM
251{
252 int len;
253
254 if (family == AF_INET)
255 len = 4;
256 else
257 len = 16;
258
259 exp->flags = 0;
6002f266 260 exp->class = class;
d6a9b650
PM
261 exp->expectfn = NULL;
262 exp->helper = NULL;
263 exp->tuple.src.l3num = family;
264 exp->tuple.dst.protonum = proto;
d6a9b650
PM
265
266 if (saddr) {
267 memcpy(&exp->tuple.src.u3, saddr, len);
268 if (sizeof(exp->tuple.src.u3) > len)
269 /* address needs to be cleared for nf_ct_tuple_equal */
270 memset((void *)&exp->tuple.src.u3 + len, 0x00,
271 sizeof(exp->tuple.src.u3) - len);
272 memset(&exp->mask.src.u3, 0xFF, len);
273 if (sizeof(exp->mask.src.u3) > len)
274 memset((void *)&exp->mask.src.u3 + len, 0x00,
275 sizeof(exp->mask.src.u3) - len);
276 } else {
277 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
278 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
279 }
280
d6a9b650 281 if (src) {
a34c4589
AV
282 exp->tuple.src.u.all = *src;
283 exp->mask.src.u.all = htons(0xFFFF);
d6a9b650
PM
284 } else {
285 exp->tuple.src.u.all = 0;
286 exp->mask.src.u.all = 0;
287 }
288
d4156e8c
PM
289 memcpy(&exp->tuple.dst.u3, daddr, len);
290 if (sizeof(exp->tuple.dst.u3) > len)
291 /* address needs to be cleared for nf_ct_tuple_equal */
292 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
293 sizeof(exp->tuple.dst.u3) - len);
294
a34c4589 295 exp->tuple.dst.u.all = *dst;
d6a9b650 296}
6823645d 297EXPORT_SYMBOL_GPL(nf_ct_expect_init);
d6a9b650 298
7d0742da
PM
299static void nf_ct_expect_free_rcu(struct rcu_head *head)
300{
301 struct nf_conntrack_expect *exp;
302
303 exp = container_of(head, struct nf_conntrack_expect, rcu);
304 kmem_cache_free(nf_ct_expect_cachep, exp);
305}
306
6823645d 307void nf_ct_expect_put(struct nf_conntrack_expect *exp)
77ab9cff
MJ
308{
309 if (atomic_dec_and_test(&exp->use))
7d0742da 310 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
77ab9cff 311}
6823645d 312EXPORT_SYMBOL_GPL(nf_ct_expect_put);
77ab9cff 313
3d058d7b 314static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
77ab9cff
MJ
315{
316 struct nf_conn_help *master_help = nfct_help(exp->master);
3d058d7b 317 struct nf_conntrack_helper *helper;
9b03f38d 318 struct net *net = nf_ct_exp_net(exp);
a71c0855 319 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
77ab9cff 320
3bfd45f9
ED
321 /* two references : one for hash insert, one for the timer */
322 atomic_add(2, &exp->use);
b560580a 323
3d058d7b
PNA
324 hlist_add_head(&exp->lnode, &master_help->expectations);
325 master_help->expecting[exp->class]++;
a71c0855 326
9b03f38d
AD
327 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
328 net->ct.expect_count++;
77ab9cff 329
6823645d
PM
330 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
331 (unsigned long)exp);
3d058d7b
PNA
332 helper = rcu_dereference_protected(master_help->helper,
333 lockdep_is_held(&nf_conntrack_lock));
334 if (helper) {
335 exp->timeout.expires = jiffies +
336 helper->expect_policy[exp->class].timeout * HZ;
bc01befd 337 }
77ab9cff
MJ
338 add_timer(&exp->timeout);
339
0d55af87 340 NF_CT_STAT_INC(net, expect_create);
3d058d7b 341 return 0;
77ab9cff
MJ
342}
343
344/* Race with expectations being used means we could have none to find; OK. */
6002f266
PM
345static void evict_oldest_expect(struct nf_conn *master,
346 struct nf_conntrack_expect *new)
77ab9cff 347{
b560580a 348 struct nf_conn_help *master_help = nfct_help(master);
6002f266 349 struct nf_conntrack_expect *exp, *last = NULL;
77ab9cff 350
b67bfe0d 351 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
6002f266
PM
352 if (exp->class == new->class)
353 last = exp;
354 }
b560580a 355
6002f266
PM
356 if (last && del_timer(&last->timeout)) {
357 nf_ct_unlink_expect(last);
358 nf_ct_expect_put(last);
77ab9cff
MJ
359 }
360}
361
19abb7b0 362static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
77ab9cff 363{
6002f266 364 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
365 struct nf_conntrack_expect *i;
366 struct nf_conn *master = expect->master;
367 struct nf_conn_help *master_help = nfct_help(master);
3d058d7b 368 struct nf_conntrack_helper *helper;
9b03f38d 369 struct net *net = nf_ct_exp_net(expect);
b67bfe0d 370 struct hlist_node *next;
a71c0855 371 unsigned int h;
83731671 372 int ret = 1;
77ab9cff 373
3d058d7b 374 if (!master_help) {
3c158f7f
PM
375 ret = -ESHUTDOWN;
376 goto out;
377 }
a71c0855 378 h = nf_ct_expect_dst_hash(&expect->tuple);
b67bfe0d 379 hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
77ab9cff 380 if (expect_matches(i, expect)) {
2614f864
PNA
381 if (del_timer(&i->timeout)) {
382 nf_ct_unlink_expect(i);
383 nf_ct_expect_put(i);
384 break;
77ab9cff
MJ
385 }
386 } else if (expect_clash(i, expect)) {
387 ret = -EBUSY;
388 goto out;
389 }
390 }
391 /* Will be over limit? */
3d058d7b
PNA
392 helper = rcu_dereference_protected(master_help->helper,
393 lockdep_is_held(&nf_conntrack_lock));
394 if (helper) {
395 p = &helper->expect_policy[expect->class];
bc01befd
PNA
396 if (p->max_expected &&
397 master_help->expecting[expect->class] >= p->max_expected) {
398 evict_oldest_expect(master, expect);
399 if (master_help->expecting[expect->class]
400 >= p->max_expected) {
401 ret = -EMFILE;
402 goto out;
403 }
6002f266
PM
404 }
405 }
77ab9cff 406
9b03f38d 407 if (net->ct.expect_count >= nf_ct_expect_max) {
e87cc472 408 net_warn_ratelimited("nf_conntrack: expectation table full\n");
f264a7df 409 ret = -EMFILE;
f264a7df 410 }
19abb7b0
PNA
411out:
412 return ret;
413}
414
83731671 415int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
ec464e5d 416 u32 portid, int report)
19abb7b0
PNA
417{
418 int ret;
419
420 spin_lock_bh(&nf_conntrack_lock);
421 ret = __nf_ct_expect_check(expect);
83731671 422 if (ret <= 0)
19abb7b0 423 goto out;
f264a7df 424
3d058d7b
PNA
425 ret = nf_ct_expect_insert(expect);
426 if (ret < 0)
427 goto out;
f8ba1aff 428 spin_unlock_bh(&nf_conntrack_lock);
ec464e5d 429 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
77ab9cff 430 return ret;
19abb7b0
PNA
431out:
432 spin_unlock_bh(&nf_conntrack_lock);
19abb7b0
PNA
433 return ret;
434}
435EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
436
54b07dca 437#ifdef CONFIG_NF_CONNTRACK_PROCFS
5d08ad44 438struct ct_expect_iter_state {
dc5129f8 439 struct seq_net_private p;
5d08ad44
PM
440 unsigned int bucket;
441};
442
443static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
77ab9cff 444{
dc5129f8 445 struct net *net = seq_file_net(seq);
5d08ad44 446 struct ct_expect_iter_state *st = seq->private;
7d0742da 447 struct hlist_node *n;
77ab9cff 448
5d08ad44 449 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
0e60ebe0 450 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
7d0742da
PM
451 if (n)
452 return n;
5d08ad44
PM
453 }
454 return NULL;
455}
77ab9cff 456
5d08ad44
PM
457static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
458 struct hlist_node *head)
459{
dc5129f8 460 struct net *net = seq_file_net(seq);
5d08ad44 461 struct ct_expect_iter_state *st = seq->private;
77ab9cff 462
0e60ebe0 463 head = rcu_dereference(hlist_next_rcu(head));
5d08ad44
PM
464 while (head == NULL) {
465 if (++st->bucket >= nf_ct_expect_hsize)
77ab9cff 466 return NULL;
0e60ebe0 467 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
77ab9cff 468 }
5d08ad44 469 return head;
77ab9cff
MJ
470}
471
5d08ad44 472static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
77ab9cff 473{
5d08ad44 474 struct hlist_node *head = ct_expect_get_first(seq);
77ab9cff 475
5d08ad44
PM
476 if (head)
477 while (pos && (head = ct_expect_get_next(seq, head)))
478 pos--;
479 return pos ? NULL : head;
480}
77ab9cff 481
5d08ad44 482static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
7d0742da 483 __acquires(RCU)
5d08ad44 484{
7d0742da 485 rcu_read_lock();
5d08ad44
PM
486 return ct_expect_get_idx(seq, *pos);
487}
77ab9cff 488
5d08ad44
PM
489static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
490{
491 (*pos)++;
492 return ct_expect_get_next(seq, v);
77ab9cff
MJ
493}
494
5d08ad44 495static void exp_seq_stop(struct seq_file *seq, void *v)
7d0742da 496 __releases(RCU)
77ab9cff 497{
7d0742da 498 rcu_read_unlock();
77ab9cff
MJ
499}
500
501static int exp_seq_show(struct seq_file *s, void *v)
502{
5d08ad44 503 struct nf_conntrack_expect *expect;
b87921bd 504 struct nf_conntrack_helper *helper;
5d08ad44 505 struct hlist_node *n = v;
359b9ab6 506 char *delim = "";
5d08ad44
PM
507
508 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
77ab9cff
MJ
509
510 if (expect->timeout.function)
511 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
512 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
513 else
514 seq_printf(s, "- ");
515 seq_printf(s, "l3proto = %u proto=%u ",
516 expect->tuple.src.l3num,
517 expect->tuple.dst.protonum);
518 print_tuple(s, &expect->tuple,
519 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 520 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff 521 expect->tuple.dst.protonum));
4bb119ea 522
359b9ab6
PM
523 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
524 seq_printf(s, "PERMANENT");
525 delim = ",";
526 }
bc01befd 527 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
359b9ab6 528 seq_printf(s, "%sINACTIVE", delim);
bc01befd
PNA
529 delim = ",";
530 }
531 if (expect->flags & NF_CT_EXPECT_USERSPACE)
532 seq_printf(s, "%sUSERSPACE", delim);
4bb119ea 533
b87921bd
PM
534 helper = rcu_dereference(nfct_help(expect->master)->helper);
535 if (helper) {
536 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
537 if (helper->expect_policy[expect->class].name)
538 seq_printf(s, "/%s",
539 helper->expect_policy[expect->class].name);
540 }
541
77ab9cff
MJ
542 return seq_putc(s, '\n');
543}
544
56b3d975 545static const struct seq_operations exp_seq_ops = {
77ab9cff
MJ
546 .start = exp_seq_start,
547 .next = exp_seq_next,
548 .stop = exp_seq_stop,
549 .show = exp_seq_show
550};
551
552static int exp_open(struct inode *inode, struct file *file)
553{
dc5129f8 554 return seq_open_net(inode, file, &exp_seq_ops,
e2da5913 555 sizeof(struct ct_expect_iter_state));
77ab9cff
MJ
556}
557
5d08ad44 558static const struct file_operations exp_file_ops = {
77ab9cff
MJ
559 .owner = THIS_MODULE,
560 .open = exp_open,
561 .read = seq_read,
562 .llseek = seq_lseek,
dc5129f8 563 .release = seq_release_net,
77ab9cff 564};
54b07dca 565#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084 566
dc5129f8 567static int exp_proc_init(struct net *net)
e9c1b084 568{
54b07dca 569#ifdef CONFIG_NF_CONNTRACK_PROCFS
e9c1b084
PM
570 struct proc_dir_entry *proc;
571
d4beaa66
G
572 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
573 &exp_file_ops);
e9c1b084
PM
574 if (!proc)
575 return -ENOMEM;
54b07dca 576#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
577 return 0;
578}
579
dc5129f8 580static void exp_proc_remove(struct net *net)
e9c1b084 581{
54b07dca 582#ifdef CONFIG_NF_CONNTRACK_PROCFS
ece31ffd 583 remove_proc_entry("nf_conntrack_expect", net->proc_net);
54b07dca 584#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
585}
586
13ccdfc2 587module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
a71c0855 588
83b4dbe1 589int nf_conntrack_expect_pernet_init(struct net *net)
e9c1b084 590{
a71c0855
PM
591 int err = -ENOMEM;
592
9b03f38d 593 net->ct.expect_count = 0;
d862a662 594 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
9b03f38d 595 if (net->ct.expect_hash == NULL)
a71c0855 596 goto err1;
e9c1b084 597
dc5129f8 598 err = exp_proc_init(net);
e9c1b084 599 if (err < 0)
83b4dbe1 600 goto err2;
e9c1b084
PM
601
602 return 0;
12293bf9 603err2:
d862a662 604 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
a71c0855 605err1:
e9c1b084
PM
606 return err;
607}
608
83b4dbe1 609void nf_conntrack_expect_pernet_fini(struct net *net)
e9c1b084 610{
dc5129f8 611 exp_proc_remove(net);
d862a662 612 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
e9c1b084 613}
83b4dbe1
G
614
615int nf_conntrack_expect_init(void)
616{
617 if (!nf_ct_expect_hsize) {
618 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
619 if (!nf_ct_expect_hsize)
620 nf_ct_expect_hsize = 1;
621 }
622 nf_ct_expect_max = nf_ct_expect_hsize * 4;
623 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
624 sizeof(struct nf_conntrack_expect),
625 0, 0, NULL);
626 if (!nf_ct_expect_cachep)
627 return -ENOMEM;
628 return 0;
629}
630
631void nf_conntrack_expect_fini(void)
632{
633 rcu_barrier(); /* Wait for call_rcu() before destroy */
634 kmem_cache_destroy(nf_ct_expect_cachep);
635}