netfilter: nat: propagate errors from xfrm_me_harder()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/types.h>
13#include <linux/netfilter.h>
14#include <linux/skbuff.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <linux/stddef.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/percpu.h>
21#include <linux/kernel.h>
a71c0855 22#include <linux/jhash.h>
d9b93842 23#include <linux/moduleparam.h>
bc3b2d7f 24#include <linux/export.h>
457c4cbc 25#include <net/net_namespace.h>
77ab9cff
MJ
26
27#include <net/netfilter/nf_conntrack.h>
28#include <net/netfilter/nf_conntrack_core.h>
29#include <net/netfilter/nf_conntrack_expect.h>
30#include <net/netfilter/nf_conntrack_helper.h>
31#include <net/netfilter/nf_conntrack_tuple.h>
5d0aa2cc 32#include <net/netfilter/nf_conntrack_zones.h>
77ab9cff 33
a71c0855
PM
34unsigned int nf_ct_expect_hsize __read_mostly;
35EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
36
f264a7df 37unsigned int nf_ct_expect_max __read_mostly;
a71c0855 38
e9c1b084 39static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
77ab9cff
MJ
40
41/* nf_conntrack_expect helper functions */
ebbf41df
PNA
42void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
43 u32 pid, int report)
77ab9cff
MJ
44{
45 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 46 struct net *net = nf_ct_exp_net(exp);
77ab9cff 47
3d058d7b 48 NF_CT_ASSERT(master_help);
77ab9cff
MJ
49 NF_CT_ASSERT(!timer_pending(&exp->timeout));
50
7d0742da 51 hlist_del_rcu(&exp->hnode);
9b03f38d 52 net->ct.expect_count--;
a71c0855 53
b560580a 54 hlist_del(&exp->lnode);
3d058d7b 55 master_help->expecting[exp->class]--;
bc01befd 56
ebbf41df 57 nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
6823645d 58 nf_ct_expect_put(exp);
b560580a 59
0d55af87 60 NF_CT_STAT_INC(net, expect_delete);
77ab9cff 61}
ebbf41df 62EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
77ab9cff 63
6823645d 64static void nf_ct_expectation_timed_out(unsigned long ul_expect)
77ab9cff
MJ
65{
66 struct nf_conntrack_expect *exp = (void *)ul_expect;
67
f8ba1aff 68 spin_lock_bh(&nf_conntrack_lock);
77ab9cff 69 nf_ct_unlink_expect(exp);
f8ba1aff 70 spin_unlock_bh(&nf_conntrack_lock);
6823645d 71 nf_ct_expect_put(exp);
77ab9cff
MJ
72}
73
a71c0855
PM
74static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
75{
34498825
PM
76 unsigned int hash;
77
f682cefa
CG
78 if (unlikely(!nf_conntrack_hash_rnd)) {
79 init_nf_conntrack_hash_rnd();
a71c0855
PM
80 }
81
34498825 82 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
a71c0855 83 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
f682cefa 84 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
34498825 85 return ((u64)hash * nf_ct_expect_hsize) >> 32;
a71c0855
PM
86}
87
77ab9cff 88struct nf_conntrack_expect *
5d0aa2cc
PM
89__nf_ct_expect_find(struct net *net, u16 zone,
90 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
91{
92 struct nf_conntrack_expect *i;
a71c0855
PM
93 unsigned int h;
94
9b03f38d 95 if (!net->ct.expect_count)
a71c0855 96 return NULL;
77ab9cff 97
a71c0855 98 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 99 hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
5d0aa2cc
PM
100 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
101 nf_ct_zone(i->master) == zone)
77ab9cff
MJ
102 return i;
103 }
104 return NULL;
105}
6823645d 106EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
77ab9cff
MJ
107
108/* Just find a expectation corresponding to a tuple. */
109struct nf_conntrack_expect *
5d0aa2cc
PM
110nf_ct_expect_find_get(struct net *net, u16 zone,
111 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
112{
113 struct nf_conntrack_expect *i;
114
7d0742da 115 rcu_read_lock();
5d0aa2cc 116 i = __nf_ct_expect_find(net, zone, tuple);
7d0742da
PM
117 if (i && !atomic_inc_not_zero(&i->use))
118 i = NULL;
119 rcu_read_unlock();
77ab9cff
MJ
120
121 return i;
122}
6823645d 123EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
77ab9cff
MJ
124
125/* If an expectation for this connection is found, it gets delete from
126 * global list then returned. */
127struct nf_conntrack_expect *
5d0aa2cc
PM
128nf_ct_find_expectation(struct net *net, u16 zone,
129 const struct nf_conntrack_tuple *tuple)
77ab9cff 130{
359b9ab6 131 struct nf_conntrack_expect *i, *exp = NULL;
359b9ab6
PM
132 unsigned int h;
133
9b03f38d 134 if (!net->ct.expect_count)
359b9ab6 135 return NULL;
ece00641 136
359b9ab6 137 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 138 hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
359b9ab6 139 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
5d0aa2cc
PM
140 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
141 nf_ct_zone(i->master) == zone) {
359b9ab6
PM
142 exp = i;
143 break;
144 }
145 }
ece00641
YK
146 if (!exp)
147 return NULL;
77ab9cff 148
77ab9cff
MJ
149 /* If master is not in hash table yet (ie. packet hasn't left
150 this machine yet), how can other end know about expected?
151 Hence these are not the droids you are looking for (if
152 master ct never got confirmed, we'd hold a reference to it
153 and weird things would happen to future packets). */
ece00641
YK
154 if (!nf_ct_is_confirmed(exp->master))
155 return NULL;
156
157 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
158 atomic_inc(&exp->use);
159 return exp;
160 } else if (del_timer(&exp->timeout)) {
161 nf_ct_unlink_expect(exp);
162 return exp;
77ab9cff 163 }
ece00641 164
77ab9cff
MJ
165 return NULL;
166}
167
168/* delete all expectations for this conntrack */
169void nf_ct_remove_expectations(struct nf_conn *ct)
170{
77ab9cff 171 struct nf_conn_help *help = nfct_help(ct);
b560580a 172 struct nf_conntrack_expect *exp;
b67bfe0d 173 struct hlist_node *next;
77ab9cff
MJ
174
175 /* Optimization: most connection never expect any others. */
6002f266 176 if (!help)
77ab9cff
MJ
177 return;
178
b67bfe0d 179 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
b560580a
PM
180 if (del_timer(&exp->timeout)) {
181 nf_ct_unlink_expect(exp);
182 nf_ct_expect_put(exp);
601e68e1 183 }
77ab9cff
MJ
184 }
185}
13b18339 186EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
77ab9cff
MJ
187
188/* Would two expected things clash? */
189static inline int expect_clash(const struct nf_conntrack_expect *a,
190 const struct nf_conntrack_expect *b)
191{
192 /* Part covered by intersection of masks must be unequal,
193 otherwise they clash */
d4156e8c 194 struct nf_conntrack_tuple_mask intersect_mask;
77ab9cff
MJ
195 int count;
196
77ab9cff 197 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
77ab9cff
MJ
198
199 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
200 intersect_mask.src.u3.all[count] =
201 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
202 }
203
77ab9cff
MJ
204 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
205}
206
207static inline int expect_matches(const struct nf_conntrack_expect *a,
208 const struct nf_conntrack_expect *b)
209{
f64f9e71
JP
210 return a->master == b->master && a->class == b->class &&
211 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
5d0aa2cc
PM
212 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
213 nf_ct_zone(a->master) == nf_ct_zone(b->master);
77ab9cff
MJ
214}
215
216/* Generally a bad idea to call this: could have matched already. */
6823645d 217void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
77ab9cff 218{
f8ba1aff 219 spin_lock_bh(&nf_conntrack_lock);
4e1d4e6c
PM
220 if (del_timer(&exp->timeout)) {
221 nf_ct_unlink_expect(exp);
222 nf_ct_expect_put(exp);
77ab9cff 223 }
f8ba1aff 224 spin_unlock_bh(&nf_conntrack_lock);
77ab9cff 225}
6823645d 226EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
77ab9cff
MJ
227
228/* We don't increase the master conntrack refcount for non-fulfilled
229 * conntracks. During the conntrack destruction, the expectations are
230 * always killed before the conntrack itself */
6823645d 231struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
77ab9cff
MJ
232{
233 struct nf_conntrack_expect *new;
234
6823645d 235 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
77ab9cff
MJ
236 if (!new)
237 return NULL;
238
239 new->master = me;
240 atomic_set(&new->use, 1);
241 return new;
242}
6823645d 243EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
77ab9cff 244
6002f266 245void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
76108cea 246 u_int8_t family,
1d9d7522
PM
247 const union nf_inet_addr *saddr,
248 const union nf_inet_addr *daddr,
249 u_int8_t proto, const __be16 *src, const __be16 *dst)
d6a9b650
PM
250{
251 int len;
252
253 if (family == AF_INET)
254 len = 4;
255 else
256 len = 16;
257
258 exp->flags = 0;
6002f266 259 exp->class = class;
d6a9b650
PM
260 exp->expectfn = NULL;
261 exp->helper = NULL;
262 exp->tuple.src.l3num = family;
263 exp->tuple.dst.protonum = proto;
d6a9b650
PM
264
265 if (saddr) {
266 memcpy(&exp->tuple.src.u3, saddr, len);
267 if (sizeof(exp->tuple.src.u3) > len)
268 /* address needs to be cleared for nf_ct_tuple_equal */
269 memset((void *)&exp->tuple.src.u3 + len, 0x00,
270 sizeof(exp->tuple.src.u3) - len);
271 memset(&exp->mask.src.u3, 0xFF, len);
272 if (sizeof(exp->mask.src.u3) > len)
273 memset((void *)&exp->mask.src.u3 + len, 0x00,
274 sizeof(exp->mask.src.u3) - len);
275 } else {
276 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
277 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
278 }
279
d6a9b650 280 if (src) {
a34c4589
AV
281 exp->tuple.src.u.all = *src;
282 exp->mask.src.u.all = htons(0xFFFF);
d6a9b650
PM
283 } else {
284 exp->tuple.src.u.all = 0;
285 exp->mask.src.u.all = 0;
286 }
287
d4156e8c
PM
288 memcpy(&exp->tuple.dst.u3, daddr, len);
289 if (sizeof(exp->tuple.dst.u3) > len)
290 /* address needs to be cleared for nf_ct_tuple_equal */
291 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
292 sizeof(exp->tuple.dst.u3) - len);
293
a34c4589 294 exp->tuple.dst.u.all = *dst;
d6a9b650 295}
6823645d 296EXPORT_SYMBOL_GPL(nf_ct_expect_init);
d6a9b650 297
7d0742da
PM
298static void nf_ct_expect_free_rcu(struct rcu_head *head)
299{
300 struct nf_conntrack_expect *exp;
301
302 exp = container_of(head, struct nf_conntrack_expect, rcu);
303 kmem_cache_free(nf_ct_expect_cachep, exp);
304}
305
6823645d 306void nf_ct_expect_put(struct nf_conntrack_expect *exp)
77ab9cff
MJ
307{
308 if (atomic_dec_and_test(&exp->use))
7d0742da 309 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
77ab9cff 310}
6823645d 311EXPORT_SYMBOL_GPL(nf_ct_expect_put);
77ab9cff 312
3d058d7b 313static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
77ab9cff
MJ
314{
315 struct nf_conn_help *master_help = nfct_help(exp->master);
3d058d7b 316 struct nf_conntrack_helper *helper;
9b03f38d 317 struct net *net = nf_ct_exp_net(exp);
a71c0855 318 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
77ab9cff 319
3bfd45f9
ED
320 /* two references : one for hash insert, one for the timer */
321 atomic_add(2, &exp->use);
b560580a 322
3d058d7b
PNA
323 hlist_add_head(&exp->lnode, &master_help->expectations);
324 master_help->expecting[exp->class]++;
a71c0855 325
9b03f38d
AD
326 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
327 net->ct.expect_count++;
77ab9cff 328
6823645d
PM
329 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
330 (unsigned long)exp);
3d058d7b
PNA
331 helper = rcu_dereference_protected(master_help->helper,
332 lockdep_is_held(&nf_conntrack_lock));
333 if (helper) {
334 exp->timeout.expires = jiffies +
335 helper->expect_policy[exp->class].timeout * HZ;
bc01befd 336 }
77ab9cff
MJ
337 add_timer(&exp->timeout);
338
0d55af87 339 NF_CT_STAT_INC(net, expect_create);
3d058d7b 340 return 0;
77ab9cff
MJ
341}
342
343/* Race with expectations being used means we could have none to find; OK. */
6002f266
PM
344static void evict_oldest_expect(struct nf_conn *master,
345 struct nf_conntrack_expect *new)
77ab9cff 346{
b560580a 347 struct nf_conn_help *master_help = nfct_help(master);
6002f266 348 struct nf_conntrack_expect *exp, *last = NULL;
77ab9cff 349
b67bfe0d 350 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
6002f266
PM
351 if (exp->class == new->class)
352 last = exp;
353 }
b560580a 354
6002f266
PM
355 if (last && del_timer(&last->timeout)) {
356 nf_ct_unlink_expect(last);
357 nf_ct_expect_put(last);
77ab9cff
MJ
358 }
359}
360
19abb7b0 361static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
77ab9cff 362{
6002f266 363 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
364 struct nf_conntrack_expect *i;
365 struct nf_conn *master = expect->master;
366 struct nf_conn_help *master_help = nfct_help(master);
3d058d7b 367 struct nf_conntrack_helper *helper;
9b03f38d 368 struct net *net = nf_ct_exp_net(expect);
b67bfe0d 369 struct hlist_node *next;
a71c0855 370 unsigned int h;
83731671 371 int ret = 1;
77ab9cff 372
3d058d7b 373 if (!master_help) {
3c158f7f
PM
374 ret = -ESHUTDOWN;
375 goto out;
376 }
a71c0855 377 h = nf_ct_expect_dst_hash(&expect->tuple);
b67bfe0d 378 hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
77ab9cff 379 if (expect_matches(i, expect)) {
2614f864
PNA
380 if (del_timer(&i->timeout)) {
381 nf_ct_unlink_expect(i);
382 nf_ct_expect_put(i);
383 break;
77ab9cff
MJ
384 }
385 } else if (expect_clash(i, expect)) {
386 ret = -EBUSY;
387 goto out;
388 }
389 }
390 /* Will be over limit? */
3d058d7b
PNA
391 helper = rcu_dereference_protected(master_help->helper,
392 lockdep_is_held(&nf_conntrack_lock));
393 if (helper) {
394 p = &helper->expect_policy[expect->class];
bc01befd
PNA
395 if (p->max_expected &&
396 master_help->expecting[expect->class] >= p->max_expected) {
397 evict_oldest_expect(master, expect);
398 if (master_help->expecting[expect->class]
399 >= p->max_expected) {
400 ret = -EMFILE;
401 goto out;
402 }
6002f266
PM
403 }
404 }
77ab9cff 405
9b03f38d 406 if (net->ct.expect_count >= nf_ct_expect_max) {
e87cc472 407 net_warn_ratelimited("nf_conntrack: expectation table full\n");
f264a7df 408 ret = -EMFILE;
f264a7df 409 }
19abb7b0
PNA
410out:
411 return ret;
412}
413
83731671
PNA
414int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
415 u32 pid, int report)
19abb7b0
PNA
416{
417 int ret;
418
419 spin_lock_bh(&nf_conntrack_lock);
420 ret = __nf_ct_expect_check(expect);
83731671 421 if (ret <= 0)
19abb7b0 422 goto out;
f264a7df 423
3d058d7b
PNA
424 ret = nf_ct_expect_insert(expect);
425 if (ret < 0)
426 goto out;
f8ba1aff 427 spin_unlock_bh(&nf_conntrack_lock);
83731671 428 nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
77ab9cff 429 return ret;
19abb7b0
PNA
430out:
431 spin_unlock_bh(&nf_conntrack_lock);
19abb7b0
PNA
432 return ret;
433}
434EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
435
54b07dca 436#ifdef CONFIG_NF_CONNTRACK_PROCFS
5d08ad44 437struct ct_expect_iter_state {
dc5129f8 438 struct seq_net_private p;
5d08ad44
PM
439 unsigned int bucket;
440};
441
442static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
77ab9cff 443{
dc5129f8 444 struct net *net = seq_file_net(seq);
5d08ad44 445 struct ct_expect_iter_state *st = seq->private;
7d0742da 446 struct hlist_node *n;
77ab9cff 447
5d08ad44 448 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
0e60ebe0 449 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
7d0742da
PM
450 if (n)
451 return n;
5d08ad44
PM
452 }
453 return NULL;
454}
77ab9cff 455
5d08ad44
PM
456static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
457 struct hlist_node *head)
458{
dc5129f8 459 struct net *net = seq_file_net(seq);
5d08ad44 460 struct ct_expect_iter_state *st = seq->private;
77ab9cff 461
0e60ebe0 462 head = rcu_dereference(hlist_next_rcu(head));
5d08ad44
PM
463 while (head == NULL) {
464 if (++st->bucket >= nf_ct_expect_hsize)
77ab9cff 465 return NULL;
0e60ebe0 466 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
77ab9cff 467 }
5d08ad44 468 return head;
77ab9cff
MJ
469}
470
5d08ad44 471static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
77ab9cff 472{
5d08ad44 473 struct hlist_node *head = ct_expect_get_first(seq);
77ab9cff 474
5d08ad44
PM
475 if (head)
476 while (pos && (head = ct_expect_get_next(seq, head)))
477 pos--;
478 return pos ? NULL : head;
479}
77ab9cff 480
5d08ad44 481static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
7d0742da 482 __acquires(RCU)
5d08ad44 483{
7d0742da 484 rcu_read_lock();
5d08ad44
PM
485 return ct_expect_get_idx(seq, *pos);
486}
77ab9cff 487
5d08ad44
PM
488static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
489{
490 (*pos)++;
491 return ct_expect_get_next(seq, v);
77ab9cff
MJ
492}
493
5d08ad44 494static void exp_seq_stop(struct seq_file *seq, void *v)
7d0742da 495 __releases(RCU)
77ab9cff 496{
7d0742da 497 rcu_read_unlock();
77ab9cff
MJ
498}
499
500static int exp_seq_show(struct seq_file *s, void *v)
501{
5d08ad44 502 struct nf_conntrack_expect *expect;
b87921bd 503 struct nf_conntrack_helper *helper;
5d08ad44 504 struct hlist_node *n = v;
359b9ab6 505 char *delim = "";
5d08ad44
PM
506
507 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
77ab9cff
MJ
508
509 if (expect->timeout.function)
510 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
511 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
512 else
513 seq_printf(s, "- ");
514 seq_printf(s, "l3proto = %u proto=%u ",
515 expect->tuple.src.l3num,
516 expect->tuple.dst.protonum);
517 print_tuple(s, &expect->tuple,
518 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 519 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff 520 expect->tuple.dst.protonum));
4bb119ea 521
359b9ab6
PM
522 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
523 seq_printf(s, "PERMANENT");
524 delim = ",";
525 }
bc01befd 526 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
359b9ab6 527 seq_printf(s, "%sINACTIVE", delim);
bc01befd
PNA
528 delim = ",";
529 }
530 if (expect->flags & NF_CT_EXPECT_USERSPACE)
531 seq_printf(s, "%sUSERSPACE", delim);
4bb119ea 532
b87921bd
PM
533 helper = rcu_dereference(nfct_help(expect->master)->helper);
534 if (helper) {
535 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
536 if (helper->expect_policy[expect->class].name)
537 seq_printf(s, "/%s",
538 helper->expect_policy[expect->class].name);
539 }
540
77ab9cff
MJ
541 return seq_putc(s, '\n');
542}
543
56b3d975 544static const struct seq_operations exp_seq_ops = {
77ab9cff
MJ
545 .start = exp_seq_start,
546 .next = exp_seq_next,
547 .stop = exp_seq_stop,
548 .show = exp_seq_show
549};
550
551static int exp_open(struct inode *inode, struct file *file)
552{
dc5129f8 553 return seq_open_net(inode, file, &exp_seq_ops,
e2da5913 554 sizeof(struct ct_expect_iter_state));
77ab9cff
MJ
555}
556
5d08ad44 557static const struct file_operations exp_file_ops = {
77ab9cff
MJ
558 .owner = THIS_MODULE,
559 .open = exp_open,
560 .read = seq_read,
561 .llseek = seq_lseek,
dc5129f8 562 .release = seq_release_net,
77ab9cff 563};
54b07dca 564#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084 565
dc5129f8 566static int exp_proc_init(struct net *net)
e9c1b084 567{
54b07dca 568#ifdef CONFIG_NF_CONNTRACK_PROCFS
e9c1b084
PM
569 struct proc_dir_entry *proc;
570
d4beaa66
G
571 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
572 &exp_file_ops);
e9c1b084
PM
573 if (!proc)
574 return -ENOMEM;
54b07dca 575#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
576 return 0;
577}
578
dc5129f8 579static void exp_proc_remove(struct net *net)
e9c1b084 580{
54b07dca 581#ifdef CONFIG_NF_CONNTRACK_PROCFS
ece31ffd 582 remove_proc_entry("nf_conntrack_expect", net->proc_net);
54b07dca 583#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
584}
585
13ccdfc2 586module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
a71c0855 587
83b4dbe1 588int nf_conntrack_expect_pernet_init(struct net *net)
e9c1b084 589{
a71c0855
PM
590 int err = -ENOMEM;
591
9b03f38d 592 net->ct.expect_count = 0;
d862a662 593 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
9b03f38d 594 if (net->ct.expect_hash == NULL)
a71c0855 595 goto err1;
e9c1b084 596
dc5129f8 597 err = exp_proc_init(net);
e9c1b084 598 if (err < 0)
83b4dbe1 599 goto err2;
e9c1b084
PM
600
601 return 0;
12293bf9 602err2:
d862a662 603 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
a71c0855 604err1:
e9c1b084
PM
605 return err;
606}
607
83b4dbe1 608void nf_conntrack_expect_pernet_fini(struct net *net)
e9c1b084 609{
dc5129f8 610 exp_proc_remove(net);
d862a662 611 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
e9c1b084 612}
83b4dbe1
G
613
614int nf_conntrack_expect_init(void)
615{
616 if (!nf_ct_expect_hsize) {
617 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
618 if (!nf_ct_expect_hsize)
619 nf_ct_expect_hsize = 1;
620 }
621 nf_ct_expect_max = nf_ct_expect_hsize * 4;
622 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
623 sizeof(struct nf_conntrack_expect),
624 0, 0, NULL);
625 if (!nf_ct_expect_cachep)
626 return -ENOMEM;
627 return 0;
628}
629
630void nf_conntrack_expect_fini(void)
631{
632 rcu_barrier(); /* Wait for call_rcu() before destroy */
633 kmem_cache_destroy(nf_ct_expect_cachep);
634}