netfilter: nf_ct_helper: allocate 16 bytes for the helper and policy names
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / net / netfilter / nf_conntrack_netlink.c
1 /* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
8 *
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
11 *
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
13 *
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
16 */
17
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/rculist.h>
22 #include <linux/rculist_nulls.h>
23 #include <linux/types.h>
24 #include <linux/timer.h>
25 #include <linux/security.h>
26 #include <linux/skbuff.h>
27 #include <linux/errno.h>
28 #include <linux/netlink.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/slab.h>
32
33 #include <linux/netfilter.h>
34 #include <net/netlink.h>
35 #include <net/sock.h>
36 #include <net/netfilter/nf_conntrack.h>
37 #include <net/netfilter/nf_conntrack_core.h>
38 #include <net/netfilter/nf_conntrack_expect.h>
39 #include <net/netfilter/nf_conntrack_helper.h>
40 #include <net/netfilter/nf_conntrack_l3proto.h>
41 #include <net/netfilter/nf_conntrack_l4proto.h>
42 #include <net/netfilter/nf_conntrack_tuple.h>
43 #include <net/netfilter/nf_conntrack_acct.h>
44 #include <net/netfilter/nf_conntrack_zones.h>
45 #include <net/netfilter/nf_conntrack_timestamp.h>
46 #ifdef CONFIG_NF_NAT_NEEDED
47 #include <net/netfilter/nf_nat_core.h>
48 #include <net/netfilter/nf_nat_protocol.h>
49 #endif
50
51 #include <linux/netfilter/nfnetlink.h>
52 #include <linux/netfilter/nfnetlink_conntrack.h>
53
54 MODULE_LICENSE("GPL");
55
56 static char __initdata version[] = "0.93";
57
58 static inline int
59 ctnetlink_dump_tuples_proto(struct sk_buff *skb,
60 const struct nf_conntrack_tuple *tuple,
61 struct nf_conntrack_l4proto *l4proto)
62 {
63 int ret = 0;
64 struct nlattr *nest_parms;
65
66 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
67 if (!nest_parms)
68 goto nla_put_failure;
69 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
70 goto nla_put_failure;
71
72 if (likely(l4proto->tuple_to_nlattr))
73 ret = l4proto->tuple_to_nlattr(skb, tuple);
74
75 nla_nest_end(skb, nest_parms);
76
77 return ret;
78
79 nla_put_failure:
80 return -1;
81 }
82
83 static inline int
84 ctnetlink_dump_tuples_ip(struct sk_buff *skb,
85 const struct nf_conntrack_tuple *tuple,
86 struct nf_conntrack_l3proto *l3proto)
87 {
88 int ret = 0;
89 struct nlattr *nest_parms;
90
91 nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED);
92 if (!nest_parms)
93 goto nla_put_failure;
94
95 if (likely(l3proto->tuple_to_nlattr))
96 ret = l3proto->tuple_to_nlattr(skb, tuple);
97
98 nla_nest_end(skb, nest_parms);
99
100 return ret;
101
102 nla_put_failure:
103 return -1;
104 }
105
106 static int
107 ctnetlink_dump_tuples(struct sk_buff *skb,
108 const struct nf_conntrack_tuple *tuple)
109 {
110 int ret;
111 struct nf_conntrack_l3proto *l3proto;
112 struct nf_conntrack_l4proto *l4proto;
113
114 rcu_read_lock();
115 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
116 ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
117
118 if (ret >= 0) {
119 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
120 tuple->dst.protonum);
121 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
122 }
123 rcu_read_unlock();
124 return ret;
125 }
126
127 static inline int
128 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
129 {
130 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
131 goto nla_put_failure;
132 return 0;
133
134 nla_put_failure:
135 return -1;
136 }
137
138 static inline int
139 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
140 {
141 long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
142
143 if (timeout < 0)
144 timeout = 0;
145
146 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
147 goto nla_put_failure;
148 return 0;
149
150 nla_put_failure:
151 return -1;
152 }
153
154 static inline int
155 ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
156 {
157 struct nf_conntrack_l4proto *l4proto;
158 struct nlattr *nest_proto;
159 int ret;
160
161 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
162 if (!l4proto->to_nlattr)
163 return 0;
164
165 nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED);
166 if (!nest_proto)
167 goto nla_put_failure;
168
169 ret = l4proto->to_nlattr(skb, nest_proto, ct);
170
171 nla_nest_end(skb, nest_proto);
172
173 return ret;
174
175 nla_put_failure:
176 return -1;
177 }
178
179 static inline int
180 ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
181 {
182 struct nlattr *nest_helper;
183 const struct nf_conn_help *help = nfct_help(ct);
184 struct nf_conntrack_helper *helper;
185
186 if (!help)
187 return 0;
188
189 helper = rcu_dereference(help->helper);
190 if (!helper)
191 goto out;
192
193 nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
194 if (!nest_helper)
195 goto nla_put_failure;
196 if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
197 goto nla_put_failure;
198
199 if (helper->to_nlattr)
200 helper->to_nlattr(skb, ct);
201
202 nla_nest_end(skb, nest_helper);
203 out:
204 return 0;
205
206 nla_put_failure:
207 return -1;
208 }
209
210 static int
211 dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
212 enum ip_conntrack_dir dir)
213 {
214 enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
215 struct nlattr *nest_count;
216
217 nest_count = nla_nest_start(skb, type | NLA_F_NESTED);
218 if (!nest_count)
219 goto nla_put_failure;
220
221 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
222 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
223 goto nla_put_failure;
224
225 nla_nest_end(skb, nest_count);
226
227 return 0;
228
229 nla_put_failure:
230 return -1;
231 }
232
233 static int
234 ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
235 enum ip_conntrack_dir dir, int type)
236 {
237 struct nf_conn_counter *acct;
238 u64 pkts, bytes;
239
240 acct = nf_conn_acct_find(ct);
241 if (!acct)
242 return 0;
243
244 if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
245 pkts = atomic64_xchg(&acct[dir].packets, 0);
246 bytes = atomic64_xchg(&acct[dir].bytes, 0);
247 } else {
248 pkts = atomic64_read(&acct[dir].packets);
249 bytes = atomic64_read(&acct[dir].bytes);
250 }
251 return dump_counters(skb, pkts, bytes, dir);
252 }
253
254 static int
255 ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
256 {
257 struct nlattr *nest_count;
258 const struct nf_conn_tstamp *tstamp;
259
260 tstamp = nf_conn_tstamp_find(ct);
261 if (!tstamp)
262 return 0;
263
264 nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
265 if (!nest_count)
266 goto nla_put_failure;
267
268 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
269 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
270 cpu_to_be64(tstamp->stop))))
271 goto nla_put_failure;
272 nla_nest_end(skb, nest_count);
273
274 return 0;
275
276 nla_put_failure:
277 return -1;
278 }
279
280 #ifdef CONFIG_NF_CONNTRACK_MARK
281 static inline int
282 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
283 {
284 if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
285 goto nla_put_failure;
286 return 0;
287
288 nla_put_failure:
289 return -1;
290 }
291 #else
292 #define ctnetlink_dump_mark(a, b) (0)
293 #endif
294
295 #ifdef CONFIG_NF_CONNTRACK_SECMARK
296 static inline int
297 ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
298 {
299 struct nlattr *nest_secctx;
300 int len, ret;
301 char *secctx;
302
303 ret = security_secid_to_secctx(ct->secmark, &secctx, &len);
304 if (ret)
305 return 0;
306
307 ret = -1;
308 nest_secctx = nla_nest_start(skb, CTA_SECCTX | NLA_F_NESTED);
309 if (!nest_secctx)
310 goto nla_put_failure;
311
312 if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
313 goto nla_put_failure;
314 nla_nest_end(skb, nest_secctx);
315
316 ret = 0;
317 nla_put_failure:
318 security_release_secctx(secctx, len);
319 return ret;
320 }
321 #else
322 #define ctnetlink_dump_secctx(a, b) (0)
323 #endif
324
325 #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
326
327 static inline int
328 ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
329 {
330 struct nlattr *nest_parms;
331
332 if (!(ct->status & IPS_EXPECTED))
333 return 0;
334
335 nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED);
336 if (!nest_parms)
337 goto nla_put_failure;
338 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
339 goto nla_put_failure;
340 nla_nest_end(skb, nest_parms);
341
342 return 0;
343
344 nla_put_failure:
345 return -1;
346 }
347
348 #ifdef CONFIG_NF_NAT_NEEDED
349 static int
350 dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
351 {
352 struct nlattr *nest_parms;
353
354 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
355 if (!nest_parms)
356 goto nla_put_failure;
357
358 if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
359 htonl(natseq->correction_pos)) ||
360 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
361 htonl(natseq->offset_before)) ||
362 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
363 htonl(natseq->offset_after)))
364 goto nla_put_failure;
365
366 nla_nest_end(skb, nest_parms);
367
368 return 0;
369
370 nla_put_failure:
371 return -1;
372 }
373
374 static inline int
375 ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
376 {
377 struct nf_nat_seq *natseq;
378 struct nf_conn_nat *nat = nfct_nat(ct);
379
380 if (!(ct->status & IPS_SEQ_ADJUST) || !nat)
381 return 0;
382
383 natseq = &nat->seq[IP_CT_DIR_ORIGINAL];
384 if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1)
385 return -1;
386
387 natseq = &nat->seq[IP_CT_DIR_REPLY];
388 if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1)
389 return -1;
390
391 return 0;
392 }
393 #else
394 #define ctnetlink_dump_nat_seq_adj(a, b) (0)
395 #endif
396
397 static inline int
398 ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
399 {
400 if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
401 goto nla_put_failure;
402 return 0;
403
404 nla_put_failure:
405 return -1;
406 }
407
408 static inline int
409 ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
410 {
411 if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
412 goto nla_put_failure;
413 return 0;
414
415 nla_put_failure:
416 return -1;
417 }
418
419 static int
420 ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
421 struct nf_conn *ct)
422 {
423 struct nlmsghdr *nlh;
424 struct nfgenmsg *nfmsg;
425 struct nlattr *nest_parms;
426 unsigned int flags = pid ? NLM_F_MULTI : 0, event;
427
428 event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_NEW);
429 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
430 if (nlh == NULL)
431 goto nlmsg_failure;
432
433 nfmsg = nlmsg_data(nlh);
434 nfmsg->nfgen_family = nf_ct_l3num(ct);
435 nfmsg->version = NFNETLINK_V0;
436 nfmsg->res_id = 0;
437
438 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
439 if (!nest_parms)
440 goto nla_put_failure;
441 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
442 goto nla_put_failure;
443 nla_nest_end(skb, nest_parms);
444
445 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
446 if (!nest_parms)
447 goto nla_put_failure;
448 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
449 goto nla_put_failure;
450 nla_nest_end(skb, nest_parms);
451
452 if (nf_ct_zone(ct) &&
453 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
454 goto nla_put_failure;
455
456 if (ctnetlink_dump_status(skb, ct) < 0 ||
457 ctnetlink_dump_timeout(skb, ct) < 0 ||
458 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL, type) < 0 ||
459 ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY, type) < 0 ||
460 ctnetlink_dump_timestamp(skb, ct) < 0 ||
461 ctnetlink_dump_protoinfo(skb, ct) < 0 ||
462 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
463 ctnetlink_dump_mark(skb, ct) < 0 ||
464 ctnetlink_dump_secctx(skb, ct) < 0 ||
465 ctnetlink_dump_id(skb, ct) < 0 ||
466 ctnetlink_dump_use(skb, ct) < 0 ||
467 ctnetlink_dump_master(skb, ct) < 0 ||
468 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
469 goto nla_put_failure;
470
471 nlmsg_end(skb, nlh);
472 return skb->len;
473
474 nlmsg_failure:
475 nla_put_failure:
476 nlmsg_cancel(skb, nlh);
477 return -1;
478 }
479
480 #ifdef CONFIG_NF_CONNTRACK_EVENTS
481 static inline size_t
482 ctnetlink_proto_size(const struct nf_conn *ct)
483 {
484 struct nf_conntrack_l3proto *l3proto;
485 struct nf_conntrack_l4proto *l4proto;
486 size_t len = 0;
487
488 rcu_read_lock();
489 l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
490 len += l3proto->nla_size;
491
492 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
493 len += l4proto->nla_size;
494 rcu_read_unlock();
495
496 return len;
497 }
498
499 static inline size_t
500 ctnetlink_counters_size(const struct nf_conn *ct)
501 {
502 if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT))
503 return 0;
504 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
505 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
506 + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
507 ;
508 }
509
510 static inline int
511 ctnetlink_secctx_size(const struct nf_conn *ct)
512 {
513 #ifdef CONFIG_NF_CONNTRACK_SECMARK
514 int len, ret;
515
516 ret = security_secid_to_secctx(ct->secmark, NULL, &len);
517 if (ret)
518 return 0;
519
520 return nla_total_size(0) /* CTA_SECCTX */
521 + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */
522 #else
523 return 0;
524 #endif
525 }
526
527 static inline size_t
528 ctnetlink_timestamp_size(const struct nf_conn *ct)
529 {
530 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
531 if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
532 return 0;
533 return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
534 #else
535 return 0;
536 #endif
537 }
538
539 static inline size_t
540 ctnetlink_nlmsg_size(const struct nf_conn *ct)
541 {
542 return NLMSG_ALIGN(sizeof(struct nfgenmsg))
543 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
544 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
545 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
546 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
547 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
548 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
549 + ctnetlink_counters_size(ct)
550 + ctnetlink_timestamp_size(ct)
551 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
552 + nla_total_size(0) /* CTA_PROTOINFO */
553 + nla_total_size(0) /* CTA_HELP */
554 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
555 + ctnetlink_secctx_size(ct)
556 #ifdef CONFIG_NF_NAT_NEEDED
557 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
558 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
559 #endif
560 #ifdef CONFIG_NF_CONNTRACK_MARK
561 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
562 #endif
563 + ctnetlink_proto_size(ct)
564 ;
565 }
566
567 static int
568 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
569 {
570 struct net *net;
571 struct nlmsghdr *nlh;
572 struct nfgenmsg *nfmsg;
573 struct nlattr *nest_parms;
574 struct nf_conn *ct = item->ct;
575 struct sk_buff *skb;
576 unsigned int type;
577 unsigned int flags = 0, group;
578 int err;
579
580 /* ignore our fake conntrack entry */
581 if (nf_ct_is_untracked(ct))
582 return 0;
583
584 if (events & (1 << IPCT_DESTROY)) {
585 type = IPCTNL_MSG_CT_DELETE;
586 group = NFNLGRP_CONNTRACK_DESTROY;
587 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
588 type = IPCTNL_MSG_CT_NEW;
589 flags = NLM_F_CREATE|NLM_F_EXCL;
590 group = NFNLGRP_CONNTRACK_NEW;
591 } else if (events) {
592 type = IPCTNL_MSG_CT_NEW;
593 group = NFNLGRP_CONNTRACK_UPDATE;
594 } else
595 return 0;
596
597 net = nf_ct_net(ct);
598 if (!item->report && !nfnetlink_has_listeners(net, group))
599 return 0;
600
601 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
602 if (skb == NULL)
603 goto errout;
604
605 type |= NFNL_SUBSYS_CTNETLINK << 8;
606 nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
607 if (nlh == NULL)
608 goto nlmsg_failure;
609
610 nfmsg = nlmsg_data(nlh);
611 nfmsg->nfgen_family = nf_ct_l3num(ct);
612 nfmsg->version = NFNETLINK_V0;
613 nfmsg->res_id = 0;
614
615 rcu_read_lock();
616 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
617 if (!nest_parms)
618 goto nla_put_failure;
619 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
620 goto nla_put_failure;
621 nla_nest_end(skb, nest_parms);
622
623 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
624 if (!nest_parms)
625 goto nla_put_failure;
626 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
627 goto nla_put_failure;
628 nla_nest_end(skb, nest_parms);
629
630 if (nf_ct_zone(ct) &&
631 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
632 goto nla_put_failure;
633
634 if (ctnetlink_dump_id(skb, ct) < 0)
635 goto nla_put_failure;
636
637 if (ctnetlink_dump_status(skb, ct) < 0)
638 goto nla_put_failure;
639
640 if (events & (1 << IPCT_DESTROY)) {
641 if (ctnetlink_dump_counters(skb, ct,
642 IP_CT_DIR_ORIGINAL, type) < 0 ||
643 ctnetlink_dump_counters(skb, ct,
644 IP_CT_DIR_REPLY, type) < 0 ||
645 ctnetlink_dump_timestamp(skb, ct) < 0)
646 goto nla_put_failure;
647 } else {
648 if (ctnetlink_dump_timeout(skb, ct) < 0)
649 goto nla_put_failure;
650
651 if (events & (1 << IPCT_PROTOINFO)
652 && ctnetlink_dump_protoinfo(skb, ct) < 0)
653 goto nla_put_failure;
654
655 if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
656 && ctnetlink_dump_helpinfo(skb, ct) < 0)
657 goto nla_put_failure;
658
659 #ifdef CONFIG_NF_CONNTRACK_SECMARK
660 if ((events & (1 << IPCT_SECMARK) || ct->secmark)
661 && ctnetlink_dump_secctx(skb, ct) < 0)
662 goto nla_put_failure;
663 #endif
664
665 if (events & (1 << IPCT_RELATED) &&
666 ctnetlink_dump_master(skb, ct) < 0)
667 goto nla_put_failure;
668
669 if (events & (1 << IPCT_NATSEQADJ) &&
670 ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
671 goto nla_put_failure;
672 }
673
674 #ifdef CONFIG_NF_CONNTRACK_MARK
675 if ((events & (1 << IPCT_MARK) || ct->mark)
676 && ctnetlink_dump_mark(skb, ct) < 0)
677 goto nla_put_failure;
678 #endif
679 rcu_read_unlock();
680
681 nlmsg_end(skb, nlh);
682 err = nfnetlink_send(skb, net, item->pid, group, item->report,
683 GFP_ATOMIC);
684 if (err == -ENOBUFS || err == -EAGAIN)
685 return -ENOBUFS;
686
687 return 0;
688
689 nla_put_failure:
690 rcu_read_unlock();
691 nlmsg_cancel(skb, nlh);
692 nlmsg_failure:
693 kfree_skb(skb);
694 errout:
695 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0)
696 return -ENOBUFS;
697
698 return 0;
699 }
700 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
701
702 static int ctnetlink_done(struct netlink_callback *cb)
703 {
704 if (cb->args[1])
705 nf_ct_put((struct nf_conn *)cb->args[1]);
706 if (cb->data)
707 kfree(cb->data);
708 return 0;
709 }
710
711 struct ctnetlink_dump_filter {
712 struct {
713 u_int32_t val;
714 u_int32_t mask;
715 } mark;
716 };
717
718 static int
719 ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
720 {
721 struct net *net = sock_net(skb->sk);
722 struct nf_conn *ct, *last;
723 struct nf_conntrack_tuple_hash *h;
724 struct hlist_nulls_node *n;
725 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
726 u_int8_t l3proto = nfmsg->nfgen_family;
727 int res;
728 #ifdef CONFIG_NF_CONNTRACK_MARK
729 const struct ctnetlink_dump_filter *filter = cb->data;
730 #endif
731
732 spin_lock_bh(&nf_conntrack_lock);
733 last = (struct nf_conn *)cb->args[1];
734 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
735 restart:
736 hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
737 hnnode) {
738 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
739 continue;
740 ct = nf_ct_tuplehash_to_ctrack(h);
741 /* Dump entries of a given L3 protocol number.
742 * If it is not specified, ie. l3proto == 0,
743 * then dump everything. */
744 if (l3proto && nf_ct_l3num(ct) != l3proto)
745 continue;
746 if (cb->args[1]) {
747 if (ct != last)
748 continue;
749 cb->args[1] = 0;
750 }
751 #ifdef CONFIG_NF_CONNTRACK_MARK
752 if (filter && !((ct->mark & filter->mark.mask) ==
753 filter->mark.val)) {
754 continue;
755 }
756 #endif
757 rcu_read_lock();
758 res =
759 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
760 cb->nlh->nlmsg_seq,
761 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
762 ct);
763 rcu_read_unlock();
764 if (res < 0) {
765 nf_conntrack_get(&ct->ct_general);
766 cb->args[1] = (unsigned long)ct;
767 goto out;
768 }
769 }
770 if (cb->args[1]) {
771 cb->args[1] = 0;
772 goto restart;
773 }
774 }
775 out:
776 spin_unlock_bh(&nf_conntrack_lock);
777 if (last)
778 nf_ct_put(last);
779
780 return skb->len;
781 }
782
783 static inline int
784 ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
785 {
786 struct nlattr *tb[CTA_IP_MAX+1];
787 struct nf_conntrack_l3proto *l3proto;
788 int ret = 0;
789
790 nla_parse_nested(tb, CTA_IP_MAX, attr, NULL);
791
792 rcu_read_lock();
793 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
794
795 if (likely(l3proto->nlattr_to_tuple)) {
796 ret = nla_validate_nested(attr, CTA_IP_MAX,
797 l3proto->nla_policy);
798 if (ret == 0)
799 ret = l3proto->nlattr_to_tuple(tb, tuple);
800 }
801
802 rcu_read_unlock();
803
804 return ret;
805 }
806
807 static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
808 [CTA_PROTO_NUM] = { .type = NLA_U8 },
809 };
810
811 static inline int
812 ctnetlink_parse_tuple_proto(struct nlattr *attr,
813 struct nf_conntrack_tuple *tuple)
814 {
815 struct nlattr *tb[CTA_PROTO_MAX+1];
816 struct nf_conntrack_l4proto *l4proto;
817 int ret = 0;
818
819 ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy);
820 if (ret < 0)
821 return ret;
822
823 if (!tb[CTA_PROTO_NUM])
824 return -EINVAL;
825 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
826
827 rcu_read_lock();
828 l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
829
830 if (likely(l4proto->nlattr_to_tuple)) {
831 ret = nla_validate_nested(attr, CTA_PROTO_MAX,
832 l4proto->nla_policy);
833 if (ret == 0)
834 ret = l4proto->nlattr_to_tuple(tb, tuple);
835 }
836
837 rcu_read_unlock();
838
839 return ret;
840 }
841
842 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
843 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
844 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
845 };
846
847 static int
848 ctnetlink_parse_tuple(const struct nlattr * const cda[],
849 struct nf_conntrack_tuple *tuple,
850 enum ctattr_type type, u_int8_t l3num)
851 {
852 struct nlattr *tb[CTA_TUPLE_MAX+1];
853 int err;
854
855 memset(tuple, 0, sizeof(*tuple));
856
857 nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
858
859 if (!tb[CTA_TUPLE_IP])
860 return -EINVAL;
861
862 tuple->src.l3num = l3num;
863
864 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
865 if (err < 0)
866 return err;
867
868 if (!tb[CTA_TUPLE_PROTO])
869 return -EINVAL;
870
871 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
872 if (err < 0)
873 return err;
874
875 /* orig and expect tuples get DIR_ORIGINAL */
876 if (type == CTA_TUPLE_REPLY)
877 tuple->dst.dir = IP_CT_DIR_REPLY;
878 else
879 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
880
881 return 0;
882 }
883
884 static int
885 ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
886 {
887 if (attr)
888 #ifdef CONFIG_NF_CONNTRACK_ZONES
889 *zone = ntohs(nla_get_be16(attr));
890 #else
891 return -EOPNOTSUPP;
892 #endif
893 else
894 *zone = 0;
895
896 return 0;
897 }
898
899 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
900 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING },
901 };
902
903 static inline int
904 ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
905 {
906 struct nlattr *tb[CTA_HELP_MAX+1];
907
908 nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
909
910 if (!tb[CTA_HELP_NAME])
911 return -EINVAL;
912
913 *helper_name = nla_data(tb[CTA_HELP_NAME]);
914
915 return 0;
916 }
917
918 static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
919 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
920 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
921 [CTA_STATUS] = { .type = NLA_U32 },
922 [CTA_PROTOINFO] = { .type = NLA_NESTED },
923 [CTA_HELP] = { .type = NLA_NESTED },
924 [CTA_NAT_SRC] = { .type = NLA_NESTED },
925 [CTA_TIMEOUT] = { .type = NLA_U32 },
926 [CTA_MARK] = { .type = NLA_U32 },
927 [CTA_ID] = { .type = NLA_U32 },
928 [CTA_NAT_DST] = { .type = NLA_NESTED },
929 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
930 [CTA_ZONE] = { .type = NLA_U16 },
931 [CTA_MARK_MASK] = { .type = NLA_U32 },
932 };
933
934 static int
935 ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
936 const struct nlmsghdr *nlh,
937 const struct nlattr * const cda[])
938 {
939 struct net *net = sock_net(ctnl);
940 struct nf_conntrack_tuple_hash *h;
941 struct nf_conntrack_tuple tuple;
942 struct nf_conn *ct;
943 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
944 u_int8_t u3 = nfmsg->nfgen_family;
945 u16 zone;
946 int err;
947
948 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
949 if (err < 0)
950 return err;
951
952 if (cda[CTA_TUPLE_ORIG])
953 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
954 else if (cda[CTA_TUPLE_REPLY])
955 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
956 else {
957 /* Flush the whole table */
958 nf_conntrack_flush_report(net,
959 NETLINK_CB(skb).pid,
960 nlmsg_report(nlh));
961 return 0;
962 }
963
964 if (err < 0)
965 return err;
966
967 h = nf_conntrack_find_get(net, zone, &tuple);
968 if (!h)
969 return -ENOENT;
970
971 ct = nf_ct_tuplehash_to_ctrack(h);
972
973 if (cda[CTA_ID]) {
974 u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
975 if (id != (u32)(unsigned long)ct) {
976 nf_ct_put(ct);
977 return -ENOENT;
978 }
979 }
980
981 if (del_timer(&ct->timeout)) {
982 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
983 NETLINK_CB(skb).pid,
984 nlmsg_report(nlh)) < 0) {
985 nf_ct_delete_from_lists(ct);
986 /* we failed to report the event, try later */
987 nf_ct_insert_dying_list(ct);
988 nf_ct_put(ct);
989 return 0;
990 }
991 /* death_by_timeout would report the event again */
992 set_bit(IPS_DYING_BIT, &ct->status);
993 nf_ct_delete_from_lists(ct);
994 nf_ct_put(ct);
995 }
996 nf_ct_put(ct);
997
998 return 0;
999 }
1000
1001 static int
1002 ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
1003 const struct nlmsghdr *nlh,
1004 const struct nlattr * const cda[])
1005 {
1006 struct net *net = sock_net(ctnl);
1007 struct nf_conntrack_tuple_hash *h;
1008 struct nf_conntrack_tuple tuple;
1009 struct nf_conn *ct;
1010 struct sk_buff *skb2 = NULL;
1011 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1012 u_int8_t u3 = nfmsg->nfgen_family;
1013 u16 zone;
1014 int err;
1015
1016 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1017 struct netlink_dump_control c = {
1018 .dump = ctnetlink_dump_table,
1019 .done = ctnetlink_done,
1020 };
1021 #ifdef CONFIG_NF_CONNTRACK_MARK
1022 if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
1023 struct ctnetlink_dump_filter *filter;
1024
1025 filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
1026 GFP_ATOMIC);
1027 if (filter == NULL)
1028 return -ENOMEM;
1029
1030 filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
1031 filter->mark.mask =
1032 ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
1033 c.data = filter;
1034 }
1035 #endif
1036 return netlink_dump_start(ctnl, skb, nlh, &c);
1037 }
1038
1039 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1040 if (err < 0)
1041 return err;
1042
1043 if (cda[CTA_TUPLE_ORIG])
1044 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
1045 else if (cda[CTA_TUPLE_REPLY])
1046 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
1047 else
1048 return -EINVAL;
1049
1050 if (err < 0)
1051 return err;
1052
1053 h = nf_conntrack_find_get(net, zone, &tuple);
1054 if (!h)
1055 return -ENOENT;
1056
1057 ct = nf_ct_tuplehash_to_ctrack(h);
1058
1059 err = -ENOMEM;
1060 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1061 if (skb2 == NULL) {
1062 nf_ct_put(ct);
1063 return -ENOMEM;
1064 }
1065
1066 rcu_read_lock();
1067 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
1068 NFNL_MSG_TYPE(nlh->nlmsg_type), ct);
1069 rcu_read_unlock();
1070 nf_ct_put(ct);
1071 if (err <= 0)
1072 goto free;
1073
1074 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
1075 if (err < 0)
1076 goto out;
1077
1078 return 0;
1079
1080 free:
1081 kfree_skb(skb2);
1082 out:
1083 /* this avoids a loop in nfnetlink. */
1084 return err == -EAGAIN ? -ENOBUFS : err;
1085 }
1086
1087 #ifdef CONFIG_NF_NAT_NEEDED
1088 static int
1089 ctnetlink_parse_nat_setup(struct nf_conn *ct,
1090 enum nf_nat_manip_type manip,
1091 const struct nlattr *attr)
1092 {
1093 typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
1094
1095 parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
1096 if (!parse_nat_setup) {
1097 #ifdef CONFIG_MODULES
1098 rcu_read_unlock();
1099 nfnl_unlock();
1100 if (request_module("nf-nat-ipv4") < 0) {
1101 nfnl_lock();
1102 rcu_read_lock();
1103 return -EOPNOTSUPP;
1104 }
1105 nfnl_lock();
1106 rcu_read_lock();
1107 if (nfnetlink_parse_nat_setup_hook)
1108 return -EAGAIN;
1109 #endif
1110 return -EOPNOTSUPP;
1111 }
1112
1113 return parse_nat_setup(ct, manip, attr);
1114 }
1115 #endif
1116
1117 static int
1118 ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1119 {
1120 unsigned long d;
1121 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
1122 d = ct->status ^ status;
1123
1124 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
1125 /* unchangeable */
1126 return -EBUSY;
1127
1128 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
1129 /* SEEN_REPLY bit can only be set */
1130 return -EBUSY;
1131
1132 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
1133 /* ASSURED bit can only be set */
1134 return -EBUSY;
1135
1136 /* Be careful here, modifying NAT bits can screw up things,
1137 * so don't let users modify them directly if they don't pass
1138 * nf_nat_range. */
1139 ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
1140 return 0;
1141 }
1142
1143 static int
1144 ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1145 {
1146 #ifdef CONFIG_NF_NAT_NEEDED
1147 int ret;
1148
1149 if (cda[CTA_NAT_DST]) {
1150 ret = ctnetlink_parse_nat_setup(ct,
1151 NF_NAT_MANIP_DST,
1152 cda[CTA_NAT_DST]);
1153 if (ret < 0)
1154 return ret;
1155 }
1156 if (cda[CTA_NAT_SRC]) {
1157 ret = ctnetlink_parse_nat_setup(ct,
1158 NF_NAT_MANIP_SRC,
1159 cda[CTA_NAT_SRC]);
1160 if (ret < 0)
1161 return ret;
1162 }
1163 return 0;
1164 #else
1165 return -EOPNOTSUPP;
1166 #endif
1167 }
1168
1169 static inline int
1170 ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
1171 {
1172 struct nf_conntrack_helper *helper;
1173 struct nf_conn_help *help = nfct_help(ct);
1174 char *helpname = NULL;
1175 int err;
1176
1177 /* don't change helper of sibling connections */
1178 if (ct->master)
1179 return -EBUSY;
1180
1181 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
1182 if (err < 0)
1183 return err;
1184
1185 if (!strcmp(helpname, "")) {
1186 if (help && help->helper) {
1187 /* we had a helper before ... */
1188 nf_ct_remove_expectations(ct);
1189 RCU_INIT_POINTER(help->helper, NULL);
1190 }
1191
1192 return 0;
1193 }
1194
1195 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1196 nf_ct_protonum(ct));
1197 if (helper == NULL) {
1198 #ifdef CONFIG_MODULES
1199 spin_unlock_bh(&nf_conntrack_lock);
1200
1201 if (request_module("nfct-helper-%s", helpname) < 0) {
1202 spin_lock_bh(&nf_conntrack_lock);
1203 return -EOPNOTSUPP;
1204 }
1205
1206 spin_lock_bh(&nf_conntrack_lock);
1207 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1208 nf_ct_protonum(ct));
1209 if (helper)
1210 return -EAGAIN;
1211 #endif
1212 return -EOPNOTSUPP;
1213 }
1214
1215 if (help) {
1216 if (help->helper == helper)
1217 return 0;
1218 if (help->helper)
1219 return -EBUSY;
1220 /* need to zero data of old helper */
1221 memset(&help->help, 0, sizeof(help->help));
1222 } else {
1223 /* we cannot set a helper for an existing conntrack */
1224 return -EOPNOTSUPP;
1225 }
1226
1227 rcu_assign_pointer(help->helper, helper);
1228
1229 return 0;
1230 }
1231
1232 static inline int
1233 ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
1234 {
1235 u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1236
1237 if (!del_timer(&ct->timeout))
1238 return -ETIME;
1239
1240 ct->timeout.expires = jiffies + timeout * HZ;
1241 add_timer(&ct->timeout);
1242
1243 return 0;
1244 }
1245
1246 static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
1247 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
1248 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
1249 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
1250 };
1251
1252 static inline int
1253 ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
1254 {
1255 const struct nlattr *attr = cda[CTA_PROTOINFO];
1256 struct nlattr *tb[CTA_PROTOINFO_MAX+1];
1257 struct nf_conntrack_l4proto *l4proto;
1258 int err = 0;
1259
1260 nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
1261
1262 rcu_read_lock();
1263 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
1264 if (l4proto->from_nlattr)
1265 err = l4proto->from_nlattr(tb, ct);
1266 rcu_read_unlock();
1267
1268 return err;
1269 }
1270
1271 #ifdef CONFIG_NF_NAT_NEEDED
1272 static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
1273 [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
1274 [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
1275 [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
1276 };
1277
1278 static inline int
1279 change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
1280 {
1281 struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
1282
1283 nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
1284
1285 if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
1286 return -EINVAL;
1287
1288 natseq->correction_pos =
1289 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS]));
1290
1291 if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE])
1292 return -EINVAL;
1293
1294 natseq->offset_before =
1295 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE]));
1296
1297 if (!cda[CTA_NAT_SEQ_OFFSET_AFTER])
1298 return -EINVAL;
1299
1300 natseq->offset_after =
1301 ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER]));
1302
1303 return 0;
1304 }
1305
1306 static int
1307 ctnetlink_change_nat_seq_adj(struct nf_conn *ct,
1308 const struct nlattr * const cda[])
1309 {
1310 int ret = 0;
1311 struct nf_conn_nat *nat = nfct_nat(ct);
1312
1313 if (!nat)
1314 return 0;
1315
1316 if (cda[CTA_NAT_SEQ_ADJ_ORIG]) {
1317 ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL],
1318 cda[CTA_NAT_SEQ_ADJ_ORIG]);
1319 if (ret < 0)
1320 return ret;
1321
1322 ct->status |= IPS_SEQ_ADJUST;
1323 }
1324
1325 if (cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1326 ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY],
1327 cda[CTA_NAT_SEQ_ADJ_REPLY]);
1328 if (ret < 0)
1329 return ret;
1330
1331 ct->status |= IPS_SEQ_ADJUST;
1332 }
1333
1334 return 0;
1335 }
1336 #endif
1337
1338 static int
1339 ctnetlink_change_conntrack(struct nf_conn *ct,
1340 const struct nlattr * const cda[])
1341 {
1342 int err;
1343
1344 /* only allow NAT changes and master assignation for new conntracks */
1345 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
1346 return -EOPNOTSUPP;
1347
1348 if (cda[CTA_HELP]) {
1349 err = ctnetlink_change_helper(ct, cda);
1350 if (err < 0)
1351 return err;
1352 }
1353
1354 if (cda[CTA_TIMEOUT]) {
1355 err = ctnetlink_change_timeout(ct, cda);
1356 if (err < 0)
1357 return err;
1358 }
1359
1360 if (cda[CTA_STATUS]) {
1361 err = ctnetlink_change_status(ct, cda);
1362 if (err < 0)
1363 return err;
1364 }
1365
1366 if (cda[CTA_PROTOINFO]) {
1367 err = ctnetlink_change_protoinfo(ct, cda);
1368 if (err < 0)
1369 return err;
1370 }
1371
1372 #if defined(CONFIG_NF_CONNTRACK_MARK)
1373 if (cda[CTA_MARK])
1374 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1375 #endif
1376
1377 #ifdef CONFIG_NF_NAT_NEEDED
1378 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1379 err = ctnetlink_change_nat_seq_adj(ct, cda);
1380 if (err < 0)
1381 return err;
1382 }
1383 #endif
1384
1385 return 0;
1386 }
1387
1388 static struct nf_conn *
1389 ctnetlink_create_conntrack(struct net *net, u16 zone,
1390 const struct nlattr * const cda[],
1391 struct nf_conntrack_tuple *otuple,
1392 struct nf_conntrack_tuple *rtuple,
1393 u8 u3)
1394 {
1395 struct nf_conn *ct;
1396 int err = -EINVAL;
1397 struct nf_conntrack_helper *helper;
1398 struct nf_conn_tstamp *tstamp;
1399
1400 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
1401 if (IS_ERR(ct))
1402 return ERR_PTR(-ENOMEM);
1403
1404 if (!cda[CTA_TIMEOUT])
1405 goto err1;
1406 ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
1407
1408 ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
1409
1410 rcu_read_lock();
1411 if (cda[CTA_HELP]) {
1412 char *helpname = NULL;
1413
1414 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
1415 if (err < 0)
1416 goto err2;
1417
1418 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
1419 nf_ct_protonum(ct));
1420 if (helper == NULL) {
1421 rcu_read_unlock();
1422 #ifdef CONFIG_MODULES
1423 if (request_module("nfct-helper-%s", helpname) < 0) {
1424 err = -EOPNOTSUPP;
1425 goto err1;
1426 }
1427
1428 rcu_read_lock();
1429 helper = __nf_conntrack_helper_find(helpname,
1430 nf_ct_l3num(ct),
1431 nf_ct_protonum(ct));
1432 if (helper) {
1433 err = -EAGAIN;
1434 goto err2;
1435 }
1436 rcu_read_unlock();
1437 #endif
1438 err = -EOPNOTSUPP;
1439 goto err1;
1440 } else {
1441 struct nf_conn_help *help;
1442
1443 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
1444 if (help == NULL) {
1445 err = -ENOMEM;
1446 goto err2;
1447 }
1448
1449 /* not in hash table yet so not strictly necessary */
1450 RCU_INIT_POINTER(help->helper, helper);
1451 }
1452 } else {
1453 /* try an implicit helper assignation */
1454 err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1455 if (err < 0)
1456 goto err2;
1457 }
1458
1459 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
1460 err = ctnetlink_change_nat(ct, cda);
1461 if (err < 0)
1462 goto err2;
1463 }
1464
1465 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1466 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1467 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1468 /* we must add conntrack extensions before confirmation. */
1469 ct->status |= IPS_CONFIRMED;
1470
1471 if (cda[CTA_STATUS]) {
1472 err = ctnetlink_change_status(ct, cda);
1473 if (err < 0)
1474 goto err2;
1475 }
1476
1477 #ifdef CONFIG_NF_NAT_NEEDED
1478 if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
1479 err = ctnetlink_change_nat_seq_adj(ct, cda);
1480 if (err < 0)
1481 goto err2;
1482 }
1483 #endif
1484
1485 memset(&ct->proto, 0, sizeof(ct->proto));
1486 if (cda[CTA_PROTOINFO]) {
1487 err = ctnetlink_change_protoinfo(ct, cda);
1488 if (err < 0)
1489 goto err2;
1490 }
1491
1492 #if defined(CONFIG_NF_CONNTRACK_MARK)
1493 if (cda[CTA_MARK])
1494 ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
1495 #endif
1496
1497 /* setup master conntrack: this is a confirmed expectation */
1498 if (cda[CTA_TUPLE_MASTER]) {
1499 struct nf_conntrack_tuple master;
1500 struct nf_conntrack_tuple_hash *master_h;
1501 struct nf_conn *master_ct;
1502
1503 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
1504 if (err < 0)
1505 goto err2;
1506
1507 master_h = nf_conntrack_find_get(net, zone, &master);
1508 if (master_h == NULL) {
1509 err = -ENOENT;
1510 goto err2;
1511 }
1512 master_ct = nf_ct_tuplehash_to_ctrack(master_h);
1513 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1514 ct->master = master_ct;
1515 }
1516 tstamp = nf_conn_tstamp_find(ct);
1517 if (tstamp)
1518 tstamp->start = ktime_to_ns(ktime_get_real());
1519
1520 err = nf_conntrack_hash_check_insert(ct);
1521 if (err < 0)
1522 goto err2;
1523
1524 rcu_read_unlock();
1525
1526 return ct;
1527
1528 err2:
1529 rcu_read_unlock();
1530 err1:
1531 nf_conntrack_free(ct);
1532 return ERR_PTR(err);
1533 }
1534
1535 static int
1536 ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
1537 const struct nlmsghdr *nlh,
1538 const struct nlattr * const cda[])
1539 {
1540 struct net *net = sock_net(ctnl);
1541 struct nf_conntrack_tuple otuple, rtuple;
1542 struct nf_conntrack_tuple_hash *h = NULL;
1543 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1544 struct nf_conn *ct;
1545 u_int8_t u3 = nfmsg->nfgen_family;
1546 u16 zone;
1547 int err;
1548
1549 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
1550 if (err < 0)
1551 return err;
1552
1553 if (cda[CTA_TUPLE_ORIG]) {
1554 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
1555 if (err < 0)
1556 return err;
1557 }
1558
1559 if (cda[CTA_TUPLE_REPLY]) {
1560 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
1561 if (err < 0)
1562 return err;
1563 }
1564
1565 if (cda[CTA_TUPLE_ORIG])
1566 h = nf_conntrack_find_get(net, zone, &otuple);
1567 else if (cda[CTA_TUPLE_REPLY])
1568 h = nf_conntrack_find_get(net, zone, &rtuple);
1569
1570 if (h == NULL) {
1571 err = -ENOENT;
1572 if (nlh->nlmsg_flags & NLM_F_CREATE) {
1573 enum ip_conntrack_events events;
1574
1575 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
1576 &rtuple, u3);
1577 if (IS_ERR(ct))
1578 return PTR_ERR(ct);
1579
1580 err = 0;
1581 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
1582 events = IPCT_RELATED;
1583 else
1584 events = IPCT_NEW;
1585
1586 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1587 (1 << IPCT_ASSURED) |
1588 (1 << IPCT_HELPER) |
1589 (1 << IPCT_PROTOINFO) |
1590 (1 << IPCT_NATSEQADJ) |
1591 (1 << IPCT_MARK) | events,
1592 ct, NETLINK_CB(skb).pid,
1593 nlmsg_report(nlh));
1594 nf_ct_put(ct);
1595 }
1596
1597 return err;
1598 }
1599 /* implicit 'else' */
1600
1601 err = -EEXIST;
1602 ct = nf_ct_tuplehash_to_ctrack(h);
1603 if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
1604 spin_lock_bh(&nf_conntrack_lock);
1605 err = ctnetlink_change_conntrack(ct, cda);
1606 spin_unlock_bh(&nf_conntrack_lock);
1607 if (err == 0) {
1608 nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
1609 (1 << IPCT_ASSURED) |
1610 (1 << IPCT_HELPER) |
1611 (1 << IPCT_PROTOINFO) |
1612 (1 << IPCT_NATSEQADJ) |
1613 (1 << IPCT_MARK),
1614 ct, NETLINK_CB(skb).pid,
1615 nlmsg_report(nlh));
1616 }
1617 }
1618
1619 nf_ct_put(ct);
1620 return err;
1621 }
1622
1623 /***********************************************************************
1624 * EXPECT
1625 ***********************************************************************/
1626
1627 static inline int
1628 ctnetlink_exp_dump_tuple(struct sk_buff *skb,
1629 const struct nf_conntrack_tuple *tuple,
1630 enum ctattr_expect type)
1631 {
1632 struct nlattr *nest_parms;
1633
1634 nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
1635 if (!nest_parms)
1636 goto nla_put_failure;
1637 if (ctnetlink_dump_tuples(skb, tuple) < 0)
1638 goto nla_put_failure;
1639 nla_nest_end(skb, nest_parms);
1640
1641 return 0;
1642
1643 nla_put_failure:
1644 return -1;
1645 }
1646
1647 static inline int
1648 ctnetlink_exp_dump_mask(struct sk_buff *skb,
1649 const struct nf_conntrack_tuple *tuple,
1650 const struct nf_conntrack_tuple_mask *mask)
1651 {
1652 int ret;
1653 struct nf_conntrack_l3proto *l3proto;
1654 struct nf_conntrack_l4proto *l4proto;
1655 struct nf_conntrack_tuple m;
1656 struct nlattr *nest_parms;
1657
1658 memset(&m, 0xFF, sizeof(m));
1659 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
1660 m.src.u.all = mask->src.u.all;
1661 m.dst.protonum = tuple->dst.protonum;
1662
1663 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
1664 if (!nest_parms)
1665 goto nla_put_failure;
1666
1667 rcu_read_lock();
1668 l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
1669 ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
1670 if (ret >= 0) {
1671 l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
1672 tuple->dst.protonum);
1673 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
1674 }
1675 rcu_read_unlock();
1676
1677 if (unlikely(ret < 0))
1678 goto nla_put_failure;
1679
1680 nla_nest_end(skb, nest_parms);
1681
1682 return 0;
1683
1684 nla_put_failure:
1685 return -1;
1686 }
1687
1688 static int
1689 ctnetlink_exp_dump_expect(struct sk_buff *skb,
1690 const struct nf_conntrack_expect *exp)
1691 {
1692 struct nf_conn *master = exp->master;
1693 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
1694 struct nf_conn_help *help;
1695 #ifdef CONFIG_NF_NAT_NEEDED
1696 struct nlattr *nest_parms;
1697 struct nf_conntrack_tuple nat_tuple = {};
1698 #endif
1699 struct nf_ct_helper_expectfn *expfn;
1700
1701 if (timeout < 0)
1702 timeout = 0;
1703
1704 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
1705 goto nla_put_failure;
1706 if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
1707 goto nla_put_failure;
1708 if (ctnetlink_exp_dump_tuple(skb,
1709 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1710 CTA_EXPECT_MASTER) < 0)
1711 goto nla_put_failure;
1712
1713 #ifdef CONFIG_NF_NAT_NEEDED
1714 if (exp->saved_ip || exp->saved_proto.all) {
1715 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
1716 if (!nest_parms)
1717 goto nla_put_failure;
1718
1719 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
1720 goto nla_put_failure;
1721
1722 nat_tuple.src.l3num = nf_ct_l3num(master);
1723 nat_tuple.src.u3.ip = exp->saved_ip;
1724 nat_tuple.dst.protonum = nf_ct_protonum(master);
1725 nat_tuple.src.u = exp->saved_proto;
1726
1727 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
1728 CTA_EXPECT_NAT_TUPLE) < 0)
1729 goto nla_put_failure;
1730 nla_nest_end(skb, nest_parms);
1731 }
1732 #endif
1733 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
1734 nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
1735 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
1736 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
1737 goto nla_put_failure;
1738 help = nfct_help(master);
1739 if (help) {
1740 struct nf_conntrack_helper *helper;
1741
1742 helper = rcu_dereference(help->helper);
1743 if (helper &&
1744 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
1745 goto nla_put_failure;
1746 }
1747 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
1748 if (expfn != NULL &&
1749 nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
1750 goto nla_put_failure;
1751
1752 return 0;
1753
1754 nla_put_failure:
1755 return -1;
1756 }
1757
1758 static int
1759 ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1760 int event, const struct nf_conntrack_expect *exp)
1761 {
1762 struct nlmsghdr *nlh;
1763 struct nfgenmsg *nfmsg;
1764 unsigned int flags = pid ? NLM_F_MULTI : 0;
1765
1766 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1767 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
1768 if (nlh == NULL)
1769 goto nlmsg_failure;
1770
1771 nfmsg = nlmsg_data(nlh);
1772 nfmsg->nfgen_family = exp->tuple.src.l3num;
1773 nfmsg->version = NFNETLINK_V0;
1774 nfmsg->res_id = 0;
1775
1776 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
1777 goto nla_put_failure;
1778
1779 nlmsg_end(skb, nlh);
1780 return skb->len;
1781
1782 nlmsg_failure:
1783 nla_put_failure:
1784 nlmsg_cancel(skb, nlh);
1785 return -1;
1786 }
1787
1788 #ifdef CONFIG_NF_CONNTRACK_EVENTS
1789 static int
1790 ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
1791 {
1792 struct nf_conntrack_expect *exp = item->exp;
1793 struct net *net = nf_ct_exp_net(exp);
1794 struct nlmsghdr *nlh;
1795 struct nfgenmsg *nfmsg;
1796 struct sk_buff *skb;
1797 unsigned int type, group;
1798 int flags = 0;
1799
1800 if (events & (1 << IPEXP_DESTROY)) {
1801 type = IPCTNL_MSG_EXP_DELETE;
1802 group = NFNLGRP_CONNTRACK_EXP_DESTROY;
1803 } else if (events & (1 << IPEXP_NEW)) {
1804 type = IPCTNL_MSG_EXP_NEW;
1805 flags = NLM_F_CREATE|NLM_F_EXCL;
1806 group = NFNLGRP_CONNTRACK_EXP_NEW;
1807 } else
1808 return 0;
1809
1810 if (!item->report && !nfnetlink_has_listeners(net, group))
1811 return 0;
1812
1813 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1814 if (skb == NULL)
1815 goto errout;
1816
1817 type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
1818 nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
1819 if (nlh == NULL)
1820 goto nlmsg_failure;
1821
1822 nfmsg = nlmsg_data(nlh);
1823 nfmsg->nfgen_family = exp->tuple.src.l3num;
1824 nfmsg->version = NFNETLINK_V0;
1825 nfmsg->res_id = 0;
1826
1827 rcu_read_lock();
1828 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
1829 goto nla_put_failure;
1830 rcu_read_unlock();
1831
1832 nlmsg_end(skb, nlh);
1833 nfnetlink_send(skb, net, item->pid, group, item->report, GFP_ATOMIC);
1834 return 0;
1835
1836 nla_put_failure:
1837 rcu_read_unlock();
1838 nlmsg_cancel(skb, nlh);
1839 nlmsg_failure:
1840 kfree_skb(skb);
1841 errout:
1842 nfnetlink_set_err(net, 0, 0, -ENOBUFS);
1843 return 0;
1844 }
1845 #endif
1846 static int ctnetlink_exp_done(struct netlink_callback *cb)
1847 {
1848 if (cb->args[1])
1849 nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
1850 return 0;
1851 }
1852
1853 static int
1854 ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1855 {
1856 struct net *net = sock_net(skb->sk);
1857 struct nf_conntrack_expect *exp, *last;
1858 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
1859 struct hlist_node *n;
1860 u_int8_t l3proto = nfmsg->nfgen_family;
1861
1862 rcu_read_lock();
1863 last = (struct nf_conntrack_expect *)cb->args[1];
1864 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
1865 restart:
1866 hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]],
1867 hnode) {
1868 if (l3proto && exp->tuple.src.l3num != l3proto)
1869 continue;
1870 if (cb->args[1]) {
1871 if (exp != last)
1872 continue;
1873 cb->args[1] = 0;
1874 }
1875 if (ctnetlink_exp_fill_info(skb,
1876 NETLINK_CB(cb->skb).pid,
1877 cb->nlh->nlmsg_seq,
1878 IPCTNL_MSG_EXP_NEW,
1879 exp) < 0) {
1880 if (!atomic_inc_not_zero(&exp->use))
1881 continue;
1882 cb->args[1] = (unsigned long)exp;
1883 goto out;
1884 }
1885 }
1886 if (cb->args[1]) {
1887 cb->args[1] = 0;
1888 goto restart;
1889 }
1890 }
1891 out:
1892 rcu_read_unlock();
1893 if (last)
1894 nf_ct_expect_put(last);
1895
1896 return skb->len;
1897 }
1898
1899 static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
1900 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
1901 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
1902 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
1903 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
1904 [CTA_EXPECT_ID] = { .type = NLA_U32 },
1905 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
1906 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
1907 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
1908 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
1909 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
1910 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
1911 };
1912
1913 static int
1914 ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1915 const struct nlmsghdr *nlh,
1916 const struct nlattr * const cda[])
1917 {
1918 struct net *net = sock_net(ctnl);
1919 struct nf_conntrack_tuple tuple;
1920 struct nf_conntrack_expect *exp;
1921 struct sk_buff *skb2;
1922 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1923 u_int8_t u3 = nfmsg->nfgen_family;
1924 u16 zone;
1925 int err;
1926
1927 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1928 struct netlink_dump_control c = {
1929 .dump = ctnetlink_exp_dump_table,
1930 .done = ctnetlink_exp_done,
1931 };
1932 return netlink_dump_start(ctnl, skb, nlh, &c);
1933 }
1934
1935 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
1936 if (err < 0)
1937 return err;
1938
1939 if (cda[CTA_EXPECT_TUPLE])
1940 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
1941 else if (cda[CTA_EXPECT_MASTER])
1942 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
1943 else
1944 return -EINVAL;
1945
1946 if (err < 0)
1947 return err;
1948
1949 exp = nf_ct_expect_find_get(net, zone, &tuple);
1950 if (!exp)
1951 return -ENOENT;
1952
1953 if (cda[CTA_EXPECT_ID]) {
1954 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
1955 if (ntohl(id) != (u32)(unsigned long)exp) {
1956 nf_ct_expect_put(exp);
1957 return -ENOENT;
1958 }
1959 }
1960
1961 err = -ENOMEM;
1962 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1963 if (skb2 == NULL) {
1964 nf_ct_expect_put(exp);
1965 goto out;
1966 }
1967
1968 rcu_read_lock();
1969 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
1970 nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
1971 rcu_read_unlock();
1972 nf_ct_expect_put(exp);
1973 if (err <= 0)
1974 goto free;
1975
1976 err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
1977 if (err < 0)
1978 goto out;
1979
1980 return 0;
1981
1982 free:
1983 kfree_skb(skb2);
1984 out:
1985 /* this avoids a loop in nfnetlink. */
1986 return err == -EAGAIN ? -ENOBUFS : err;
1987 }
1988
1989 static int
1990 ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1991 const struct nlmsghdr *nlh,
1992 const struct nlattr * const cda[])
1993 {
1994 struct net *net = sock_net(ctnl);
1995 struct nf_conntrack_expect *exp;
1996 struct nf_conntrack_tuple tuple;
1997 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1998 struct hlist_node *n, *next;
1999 u_int8_t u3 = nfmsg->nfgen_family;
2000 unsigned int i;
2001 u16 zone;
2002 int err;
2003
2004 if (cda[CTA_EXPECT_TUPLE]) {
2005 /* delete a single expect by tuple */
2006 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2007 if (err < 0)
2008 return err;
2009
2010 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2011 if (err < 0)
2012 return err;
2013
2014 /* bump usage count to 2 */
2015 exp = nf_ct_expect_find_get(net, zone, &tuple);
2016 if (!exp)
2017 return -ENOENT;
2018
2019 if (cda[CTA_EXPECT_ID]) {
2020 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
2021 if (ntohl(id) != (u32)(unsigned long)exp) {
2022 nf_ct_expect_put(exp);
2023 return -ENOENT;
2024 }
2025 }
2026
2027 /* after list removal, usage count == 1 */
2028 spin_lock_bh(&nf_conntrack_lock);
2029 if (del_timer(&exp->timeout)) {
2030 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).pid,
2031 nlmsg_report(nlh));
2032 nf_ct_expect_put(exp);
2033 }
2034 spin_unlock_bh(&nf_conntrack_lock);
2035 /* have to put what we 'get' above.
2036 * after this line usage count == 0 */
2037 nf_ct_expect_put(exp);
2038 } else if (cda[CTA_EXPECT_HELP_NAME]) {
2039 char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2040 struct nf_conn_help *m_help;
2041
2042 /* delete all expectations for this helper */
2043 spin_lock_bh(&nf_conntrack_lock);
2044 for (i = 0; i < nf_ct_expect_hsize; i++) {
2045 hlist_for_each_entry_safe(exp, n, next,
2046 &net->ct.expect_hash[i],
2047 hnode) {
2048 m_help = nfct_help(exp->master);
2049 if (!strcmp(m_help->helper->name, name) &&
2050 del_timer(&exp->timeout)) {
2051 nf_ct_unlink_expect_report(exp,
2052 NETLINK_CB(skb).pid,
2053 nlmsg_report(nlh));
2054 nf_ct_expect_put(exp);
2055 }
2056 }
2057 }
2058 spin_unlock_bh(&nf_conntrack_lock);
2059 } else {
2060 /* This basically means we have to flush everything*/
2061 spin_lock_bh(&nf_conntrack_lock);
2062 for (i = 0; i < nf_ct_expect_hsize; i++) {
2063 hlist_for_each_entry_safe(exp, n, next,
2064 &net->ct.expect_hash[i],
2065 hnode) {
2066 if (del_timer(&exp->timeout)) {
2067 nf_ct_unlink_expect_report(exp,
2068 NETLINK_CB(skb).pid,
2069 nlmsg_report(nlh));
2070 nf_ct_expect_put(exp);
2071 }
2072 }
2073 }
2074 spin_unlock_bh(&nf_conntrack_lock);
2075 }
2076
2077 return 0;
2078 }
2079 static int
2080 ctnetlink_change_expect(struct nf_conntrack_expect *x,
2081 const struct nlattr * const cda[])
2082 {
2083 if (cda[CTA_EXPECT_TIMEOUT]) {
2084 if (!del_timer(&x->timeout))
2085 return -ETIME;
2086
2087 x->timeout.expires = jiffies +
2088 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2089 add_timer(&x->timeout);
2090 }
2091 return 0;
2092 }
2093
2094 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
2095 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
2096 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
2097 };
2098
2099 static int
2100 ctnetlink_parse_expect_nat(const struct nlattr *attr,
2101 struct nf_conntrack_expect *exp,
2102 u_int8_t u3)
2103 {
2104 #ifdef CONFIG_NF_NAT_NEEDED
2105 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
2106 struct nf_conntrack_tuple nat_tuple = {};
2107 int err;
2108
2109 nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
2110
2111 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
2112 return -EINVAL;
2113
2114 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
2115 &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
2116 if (err < 0)
2117 return err;
2118
2119 exp->saved_ip = nat_tuple.src.u3.ip;
2120 exp->saved_proto = nat_tuple.src.u;
2121 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
2122
2123 return 0;
2124 #else
2125 return -EOPNOTSUPP;
2126 #endif
2127 }
2128
2129 static int
2130 ctnetlink_create_expect(struct net *net, u16 zone,
2131 const struct nlattr * const cda[],
2132 u_int8_t u3,
2133 u32 pid, int report)
2134 {
2135 struct nf_conntrack_tuple tuple, mask, master_tuple;
2136 struct nf_conntrack_tuple_hash *h = NULL;
2137 struct nf_conntrack_expect *exp;
2138 struct nf_conn *ct;
2139 struct nf_conn_help *help;
2140 struct nf_conntrack_helper *helper = NULL;
2141 u_int32_t class = 0;
2142 int err = 0;
2143
2144 /* caller guarantees that those three CTA_EXPECT_* exist */
2145 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2146 if (err < 0)
2147 return err;
2148 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
2149 if (err < 0)
2150 return err;
2151 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
2152 if (err < 0)
2153 return err;
2154
2155 /* Look for master conntrack of this expectation */
2156 h = nf_conntrack_find_get(net, zone, &master_tuple);
2157 if (!h)
2158 return -ENOENT;
2159 ct = nf_ct_tuplehash_to_ctrack(h);
2160
2161 /* Look for helper of this expectation */
2162 if (cda[CTA_EXPECT_HELP_NAME]) {
2163 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
2164
2165 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
2166 nf_ct_protonum(ct));
2167 if (helper == NULL) {
2168 #ifdef CONFIG_MODULES
2169 if (request_module("nfct-helper-%s", helpname) < 0) {
2170 err = -EOPNOTSUPP;
2171 goto out;
2172 }
2173
2174 helper = __nf_conntrack_helper_find(helpname,
2175 nf_ct_l3num(ct),
2176 nf_ct_protonum(ct));
2177 if (helper) {
2178 err = -EAGAIN;
2179 goto out;
2180 }
2181 #endif
2182 err = -EOPNOTSUPP;
2183 goto out;
2184 }
2185 }
2186
2187 if (cda[CTA_EXPECT_CLASS] && helper) {
2188 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
2189 if (class > helper->expect_class_max) {
2190 err = -EINVAL;
2191 goto out;
2192 }
2193 }
2194 exp = nf_ct_expect_alloc(ct);
2195 if (!exp) {
2196 err = -ENOMEM;
2197 goto out;
2198 }
2199 help = nfct_help(ct);
2200 if (!help) {
2201 if (!cda[CTA_EXPECT_TIMEOUT]) {
2202 err = -EINVAL;
2203 goto out;
2204 }
2205 exp->timeout.expires =
2206 jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
2207
2208 exp->flags = NF_CT_EXPECT_USERSPACE;
2209 if (cda[CTA_EXPECT_FLAGS]) {
2210 exp->flags |=
2211 ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2212 }
2213 } else {
2214 if (cda[CTA_EXPECT_FLAGS]) {
2215 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
2216 exp->flags &= ~NF_CT_EXPECT_USERSPACE;
2217 } else
2218 exp->flags = 0;
2219 }
2220 if (cda[CTA_EXPECT_FN]) {
2221 const char *name = nla_data(cda[CTA_EXPECT_FN]);
2222 struct nf_ct_helper_expectfn *expfn;
2223
2224 expfn = nf_ct_helper_expectfn_find_by_name(name);
2225 if (expfn == NULL) {
2226 err = -EINVAL;
2227 goto err_out;
2228 }
2229 exp->expectfn = expfn->expectfn;
2230 } else
2231 exp->expectfn = NULL;
2232
2233 exp->class = class;
2234 exp->master = ct;
2235 exp->helper = helper;
2236 memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
2237 memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
2238 exp->mask.src.u.all = mask.src.u.all;
2239
2240 if (cda[CTA_EXPECT_NAT]) {
2241 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
2242 exp, u3);
2243 if (err < 0)
2244 goto err_out;
2245 }
2246 err = nf_ct_expect_related_report(exp, pid, report);
2247 err_out:
2248 nf_ct_expect_put(exp);
2249 out:
2250 nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
2251 return err;
2252 }
2253
2254 static int
2255 ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
2256 const struct nlmsghdr *nlh,
2257 const struct nlattr * const cda[])
2258 {
2259 struct net *net = sock_net(ctnl);
2260 struct nf_conntrack_tuple tuple;
2261 struct nf_conntrack_expect *exp;
2262 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2263 u_int8_t u3 = nfmsg->nfgen_family;
2264 u16 zone;
2265 int err;
2266
2267 if (!cda[CTA_EXPECT_TUPLE]
2268 || !cda[CTA_EXPECT_MASK]
2269 || !cda[CTA_EXPECT_MASTER])
2270 return -EINVAL;
2271
2272 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2273 if (err < 0)
2274 return err;
2275
2276 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
2277 if (err < 0)
2278 return err;
2279
2280 spin_lock_bh(&nf_conntrack_lock);
2281 exp = __nf_ct_expect_find(net, zone, &tuple);
2282
2283 if (!exp) {
2284 spin_unlock_bh(&nf_conntrack_lock);
2285 err = -ENOENT;
2286 if (nlh->nlmsg_flags & NLM_F_CREATE) {
2287 err = ctnetlink_create_expect(net, zone, cda,
2288 u3,
2289 NETLINK_CB(skb).pid,
2290 nlmsg_report(nlh));
2291 }
2292 return err;
2293 }
2294
2295 err = -EEXIST;
2296 if (!(nlh->nlmsg_flags & NLM_F_EXCL))
2297 err = ctnetlink_change_expect(exp, cda);
2298 spin_unlock_bh(&nf_conntrack_lock);
2299
2300 return err;
2301 }
2302
2303 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2304 static struct nf_ct_event_notifier ctnl_notifier = {
2305 .fcn = ctnetlink_conntrack_event,
2306 };
2307
2308 static struct nf_exp_event_notifier ctnl_notifier_exp = {
2309 .fcn = ctnetlink_expect_event,
2310 };
2311 #endif
2312
2313 static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
2314 [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack,
2315 .attr_count = CTA_MAX,
2316 .policy = ct_nla_policy },
2317 [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack,
2318 .attr_count = CTA_MAX,
2319 .policy = ct_nla_policy },
2320 [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack,
2321 .attr_count = CTA_MAX,
2322 .policy = ct_nla_policy },
2323 [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
2324 .attr_count = CTA_MAX,
2325 .policy = ct_nla_policy },
2326 };
2327
2328 static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
2329 [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect,
2330 .attr_count = CTA_EXPECT_MAX,
2331 .policy = exp_nla_policy },
2332 [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect,
2333 .attr_count = CTA_EXPECT_MAX,
2334 .policy = exp_nla_policy },
2335 [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
2336 .attr_count = CTA_EXPECT_MAX,
2337 .policy = exp_nla_policy },
2338 };
2339
2340 static const struct nfnetlink_subsystem ctnl_subsys = {
2341 .name = "conntrack",
2342 .subsys_id = NFNL_SUBSYS_CTNETLINK,
2343 .cb_count = IPCTNL_MSG_MAX,
2344 .cb = ctnl_cb,
2345 };
2346
2347 static const struct nfnetlink_subsystem ctnl_exp_subsys = {
2348 .name = "conntrack_expect",
2349 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
2350 .cb_count = IPCTNL_MSG_EXP_MAX,
2351 .cb = ctnl_exp_cb,
2352 };
2353
2354 MODULE_ALIAS("ip_conntrack_netlink");
2355 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
2356 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
2357
2358 static int __net_init ctnetlink_net_init(struct net *net)
2359 {
2360 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2361 int ret;
2362
2363 ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
2364 if (ret < 0) {
2365 pr_err("ctnetlink_init: cannot register notifier.\n");
2366 goto err_out;
2367 }
2368
2369 ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
2370 if (ret < 0) {
2371 pr_err("ctnetlink_init: cannot expect register notifier.\n");
2372 goto err_unreg_notifier;
2373 }
2374 #endif
2375 return 0;
2376
2377 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2378 err_unreg_notifier:
2379 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
2380 err_out:
2381 return ret;
2382 #endif
2383 }
2384
2385 static void ctnetlink_net_exit(struct net *net)
2386 {
2387 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2388 nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
2389 nf_conntrack_unregister_notifier(net, &ctnl_notifier);
2390 #endif
2391 }
2392
2393 static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
2394 {
2395 struct net *net;
2396
2397 list_for_each_entry(net, net_exit_list, exit_list)
2398 ctnetlink_net_exit(net);
2399 }
2400
2401 static struct pernet_operations ctnetlink_net_ops = {
2402 .init = ctnetlink_net_init,
2403 .exit_batch = ctnetlink_net_exit_batch,
2404 };
2405
2406 static int __init ctnetlink_init(void)
2407 {
2408 int ret;
2409
2410 pr_info("ctnetlink v%s: registering with nfnetlink.\n", version);
2411 ret = nfnetlink_subsys_register(&ctnl_subsys);
2412 if (ret < 0) {
2413 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
2414 goto err_out;
2415 }
2416
2417 ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
2418 if (ret < 0) {
2419 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
2420 goto err_unreg_subsys;
2421 }
2422
2423 if (register_pernet_subsys(&ctnetlink_net_ops)) {
2424 pr_err("ctnetlink_init: cannot register pernet operations\n");
2425 goto err_unreg_exp_subsys;
2426 }
2427
2428 return 0;
2429
2430 err_unreg_exp_subsys:
2431 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
2432 err_unreg_subsys:
2433 nfnetlink_subsys_unregister(&ctnl_subsys);
2434 err_out:
2435 return ret;
2436 }
2437
2438 static void __exit ctnetlink_exit(void)
2439 {
2440 pr_info("ctnetlink: unregistering from nfnetlink.\n");
2441
2442 unregister_pernet_subsys(&ctnetlink_net_ops);
2443 nfnetlink_subsys_unregister(&ctnl_exp_subsys);
2444 nfnetlink_subsys_unregister(&ctnl_subsys);
2445 }
2446
2447 module_init(ctnetlink_init);
2448 module_exit(ctnetlink_exit);