Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / nfnetlink_log.c
... / ...
CommitLineData
1/*
2 * This is a module which is used for logging packets to userspace via
3 * nfetlink.
4 *
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
6 *
7 * Based on the old ipv4-only ipt_ULOG.c:
8 * (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14#include <linux/module.h>
15#include <linux/skbuff.h>
16#include <linux/if_arp.h>
17#include <linux/init.h>
18#include <linux/ip.h>
19#include <linux/ipv6.h>
20#include <linux/netdevice.h>
21#include <linux/netfilter.h>
22#include <linux/netlink.h>
23#include <linux/netfilter/nfnetlink.h>
24#include <linux/netfilter/nfnetlink_log.h>
25#include <linux/spinlock.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/security.h>
29#include <linux/list.h>
30#include <linux/jhash.h>
31#include <linux/random.h>
32#include <linux/slab.h>
33#include <net/sock.h>
34#include <net/netfilter/nf_log.h>
35#include <net/netfilter/nfnetlink_log.h>
36
37#include <linux/atomic.h>
38
39#ifdef CONFIG_BRIDGE_NETFILTER
40#include "../bridge/br_private.h"
41#endif
42
43#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
44#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
45#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
46#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */
47
48#define PRINTR(x, args...) do { if (net_ratelimit()) \
49 printk(x, ## args); } while (0);
50
51struct nfulnl_instance {
52 struct hlist_node hlist; /* global list of instances */
53 spinlock_t lock;
54 atomic_t use; /* use count */
55
56 unsigned int qlen; /* number of nlmsgs in skb */
57 struct sk_buff *skb; /* pre-allocatd skb */
58 struct timer_list timer;
59 struct user_namespace *peer_user_ns; /* User namespace of the peer process */
60 int peer_portid; /* PORTID of the peer process */
61
62 /* configurable parameters */
63 unsigned int flushtimeout; /* timeout until queue flush */
64 unsigned int nlbufsiz; /* netlink buffer allocation size */
65 unsigned int qthreshold; /* threshold of the queue */
66 u_int32_t copy_range;
67 u_int32_t seq; /* instance-local sequential counter */
68 u_int16_t group_num; /* number of this queue */
69 u_int16_t flags;
70 u_int8_t copy_mode;
71 struct rcu_head rcu;
72};
73
74static DEFINE_SPINLOCK(instances_lock);
75static atomic_t global_seq;
76
77#define INSTANCE_BUCKETS 16
78static struct hlist_head instance_table[INSTANCE_BUCKETS];
79static unsigned int hash_init;
80
81static inline u_int8_t instance_hashfn(u_int16_t group_num)
82{
83 return ((group_num & 0xff) % INSTANCE_BUCKETS);
84}
85
86static struct nfulnl_instance *
87__instance_lookup(u_int16_t group_num)
88{
89 struct hlist_head *head;
90 struct hlist_node *pos;
91 struct nfulnl_instance *inst;
92
93 head = &instance_table[instance_hashfn(group_num)];
94 hlist_for_each_entry_rcu(inst, pos, head, hlist) {
95 if (inst->group_num == group_num)
96 return inst;
97 }
98 return NULL;
99}
100
101static inline void
102instance_get(struct nfulnl_instance *inst)
103{
104 atomic_inc(&inst->use);
105}
106
107static struct nfulnl_instance *
108instance_lookup_get(u_int16_t group_num)
109{
110 struct nfulnl_instance *inst;
111
112 rcu_read_lock_bh();
113 inst = __instance_lookup(group_num);
114 if (inst && !atomic_inc_not_zero(&inst->use))
115 inst = NULL;
116 rcu_read_unlock_bh();
117
118 return inst;
119}
120
121static void nfulnl_instance_free_rcu(struct rcu_head *head)
122{
123 kfree(container_of(head, struct nfulnl_instance, rcu));
124 module_put(THIS_MODULE);
125}
126
127static void
128instance_put(struct nfulnl_instance *inst)
129{
130 if (inst && atomic_dec_and_test(&inst->use))
131 call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
132}
133
134static void nfulnl_timer(unsigned long data);
135
136static struct nfulnl_instance *
137instance_create(u_int16_t group_num, int portid, struct user_namespace *user_ns)
138{
139 struct nfulnl_instance *inst;
140 int err;
141
142 spin_lock_bh(&instances_lock);
143 if (__instance_lookup(group_num)) {
144 err = -EEXIST;
145 goto out_unlock;
146 }
147
148 inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
149 if (!inst) {
150 err = -ENOMEM;
151 goto out_unlock;
152 }
153
154 if (!try_module_get(THIS_MODULE)) {
155 kfree(inst);
156 err = -EAGAIN;
157 goto out_unlock;
158 }
159
160 INIT_HLIST_NODE(&inst->hlist);
161 spin_lock_init(&inst->lock);
162 /* needs to be two, since we _put() after creation */
163 atomic_set(&inst->use, 2);
164
165 setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
166
167 inst->peer_user_ns = user_ns;
168 inst->peer_portid = portid;
169 inst->group_num = group_num;
170
171 inst->qthreshold = NFULNL_QTHRESH_DEFAULT;
172 inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT;
173 inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT;
174 inst->copy_mode = NFULNL_COPY_PACKET;
175 inst->copy_range = NFULNL_COPY_RANGE_MAX;
176
177 hlist_add_head_rcu(&inst->hlist,
178 &instance_table[instance_hashfn(group_num)]);
179
180 spin_unlock_bh(&instances_lock);
181
182 return inst;
183
184out_unlock:
185 spin_unlock_bh(&instances_lock);
186 return ERR_PTR(err);
187}
188
189static void __nfulnl_flush(struct nfulnl_instance *inst);
190
191/* called with BH disabled */
192static void
193__instance_destroy(struct nfulnl_instance *inst)
194{
195 /* first pull it out of the global list */
196 hlist_del_rcu(&inst->hlist);
197
198 /* then flush all pending packets from skb */
199
200 spin_lock(&inst->lock);
201
202 /* lockless readers wont be able to use us */
203 inst->copy_mode = NFULNL_COPY_DISABLED;
204
205 if (inst->skb)
206 __nfulnl_flush(inst);
207 spin_unlock(&inst->lock);
208
209 /* and finally put the refcount */
210 instance_put(inst);
211}
212
213static inline void
214instance_destroy(struct nfulnl_instance *inst)
215{
216 spin_lock_bh(&instances_lock);
217 __instance_destroy(inst);
218 spin_unlock_bh(&instances_lock);
219}
220
221static int
222nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
223 unsigned int range)
224{
225 int status = 0;
226
227 spin_lock_bh(&inst->lock);
228
229 switch (mode) {
230 case NFULNL_COPY_NONE:
231 case NFULNL_COPY_META:
232 inst->copy_mode = mode;
233 inst->copy_range = 0;
234 break;
235
236 case NFULNL_COPY_PACKET:
237 inst->copy_mode = mode;
238 inst->copy_range = min_t(unsigned int,
239 range, NFULNL_COPY_RANGE_MAX);
240 break;
241
242 default:
243 status = -EINVAL;
244 break;
245 }
246
247 spin_unlock_bh(&inst->lock);
248
249 return status;
250}
251
252static int
253nfulnl_set_nlbufsiz(struct nfulnl_instance *inst, u_int32_t nlbufsiz)
254{
255 int status;
256
257 spin_lock_bh(&inst->lock);
258 if (nlbufsiz < NFULNL_NLBUFSIZ_DEFAULT)
259 status = -ERANGE;
260 else if (nlbufsiz > 131072)
261 status = -ERANGE;
262 else {
263 inst->nlbufsiz = nlbufsiz;
264 status = 0;
265 }
266 spin_unlock_bh(&inst->lock);
267
268 return status;
269}
270
271static int
272nfulnl_set_timeout(struct nfulnl_instance *inst, u_int32_t timeout)
273{
274 spin_lock_bh(&inst->lock);
275 inst->flushtimeout = timeout;
276 spin_unlock_bh(&inst->lock);
277
278 return 0;
279}
280
281static int
282nfulnl_set_qthresh(struct nfulnl_instance *inst, u_int32_t qthresh)
283{
284 spin_lock_bh(&inst->lock);
285 inst->qthreshold = qthresh;
286 spin_unlock_bh(&inst->lock);
287
288 return 0;
289}
290
291static int
292nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
293{
294 spin_lock_bh(&inst->lock);
295 inst->flags = flags;
296 spin_unlock_bh(&inst->lock);
297
298 return 0;
299}
300
301static struct sk_buff *
302nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size)
303{
304 struct sk_buff *skb;
305 unsigned int n;
306
307 /* alloc skb which should be big enough for a whole multipart
308 * message. WARNING: has to be <= 128k due to slab restrictions */
309
310 n = max(inst_size, pkt_size);
311 skb = alloc_skb(n, GFP_ATOMIC);
312 if (!skb) {
313 if (n > pkt_size) {
314 /* try to allocate only as much as we need for current
315 * packet */
316
317 skb = alloc_skb(pkt_size, GFP_ATOMIC);
318 if (!skb)
319 pr_err("nfnetlink_log: can't even alloc %u bytes\n",
320 pkt_size);
321 }
322 }
323
324 return skb;
325}
326
327static int
328__nfulnl_send(struct nfulnl_instance *inst)
329{
330 int status = -1;
331
332 if (inst->qlen > 1) {
333 struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
334 NLMSG_DONE,
335 sizeof(struct nfgenmsg),
336 0);
337 if (!nlh)
338 goto out;
339 }
340 status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_portid,
341 MSG_DONTWAIT);
342
343 inst->qlen = 0;
344 inst->skb = NULL;
345out:
346 return status;
347}
348
349static void
350__nfulnl_flush(struct nfulnl_instance *inst)
351{
352 /* timer holds a reference */
353 if (del_timer(&inst->timer))
354 instance_put(inst);
355 if (inst->skb)
356 __nfulnl_send(inst);
357}
358
359static void
360nfulnl_timer(unsigned long data)
361{
362 struct nfulnl_instance *inst = (struct nfulnl_instance *)data;
363
364 spin_lock_bh(&inst->lock);
365 if (inst->skb)
366 __nfulnl_send(inst);
367 spin_unlock_bh(&inst->lock);
368 instance_put(inst);
369}
370
371/* This is an inline function, we don't really care about a long
372 * list of arguments */
373static inline int
374__build_packet_message(struct nfulnl_instance *inst,
375 const struct sk_buff *skb,
376 unsigned int data_len,
377 u_int8_t pf,
378 unsigned int hooknum,
379 const struct net_device *indev,
380 const struct net_device *outdev,
381 const char *prefix, unsigned int plen)
382{
383 struct nfulnl_msg_packet_hdr pmsg;
384 struct nlmsghdr *nlh;
385 struct nfgenmsg *nfmsg;
386 sk_buff_data_t old_tail = inst->skb->tail;
387 struct sock *sk;
388 const unsigned char *hwhdrp;
389
390 nlh = nlmsg_put(inst->skb, 0, 0,
391 NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
392 sizeof(struct nfgenmsg), 0);
393 if (!nlh)
394 return -1;
395 nfmsg = nlmsg_data(nlh);
396 nfmsg->nfgen_family = pf;
397 nfmsg->version = NFNETLINK_V0;
398 nfmsg->res_id = htons(inst->group_num);
399
400 pmsg.hw_protocol = skb->protocol;
401 pmsg.hook = hooknum;
402
403 if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
404 goto nla_put_failure;
405
406 if (prefix &&
407 nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
408 goto nla_put_failure;
409
410 if (indev) {
411#ifndef CONFIG_BRIDGE_NETFILTER
412 if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
413 htonl(indev->ifindex)))
414 goto nla_put_failure;
415#else
416 if (pf == PF_BRIDGE) {
417 /* Case 1: outdev is physical input device, we need to
418 * look for bridge group (when called from
419 * netfilter_bridge) */
420 if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
421 htonl(indev->ifindex)) ||
422 /* this is the bridge group "brX" */
423 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
424 nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
425 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
426 goto nla_put_failure;
427 } else {
428 /* Case 2: indev is bridge group, we need to look for
429 * physical device (when called from ipv4) */
430 if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
431 htonl(indev->ifindex)))
432 goto nla_put_failure;
433 if (skb->nf_bridge && skb->nf_bridge->physindev &&
434 nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
435 htonl(skb->nf_bridge->physindev->ifindex)))
436 goto nla_put_failure;
437 }
438#endif
439 }
440
441 if (outdev) {
442#ifndef CONFIG_BRIDGE_NETFILTER
443 if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
444 htonl(outdev->ifindex)))
445 goto nla_put_failure;
446#else
447 if (pf == PF_BRIDGE) {
448 /* Case 1: outdev is physical output device, we need to
449 * look for bridge group (when called from
450 * netfilter_bridge) */
451 if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
452 htonl(outdev->ifindex)) ||
453 /* this is the bridge group "brX" */
454 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
455 nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
456 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
457 goto nla_put_failure;
458 } else {
459 /* Case 2: indev is a bridge group, we need to look
460 * for physical device (when called from ipv4) */
461 if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
462 htonl(outdev->ifindex)))
463 goto nla_put_failure;
464 if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
465 nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
466 htonl(skb->nf_bridge->physoutdev->ifindex)))
467 goto nla_put_failure;
468 }
469#endif
470 }
471
472 if (skb->mark &&
473 nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
474 goto nla_put_failure;
475
476 if (indev && skb->dev &&
477 skb->mac_header != skb->network_header) {
478 struct nfulnl_msg_packet_hw phw;
479 int len = dev_parse_header(skb, phw.hw_addr);
480 if (len > 0) {
481 phw.hw_addrlen = htons(len);
482 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
483 goto nla_put_failure;
484 }
485 }
486
487 if (indev && skb_mac_header_was_set(skb)) {
488 if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
489 nla_put_be16(inst->skb, NFULA_HWLEN,
490 htons(skb->dev->hard_header_len)))
491 goto nla_put_failure;
492
493 hwhdrp = skb_mac_header(skb);
494
495 if (skb->dev->type == ARPHRD_SIT)
496 hwhdrp -= ETH_HLEN;
497
498 if (hwhdrp >= skb->head &&
499 nla_put(inst->skb, NFULA_HWHEADER,
500 skb->dev->hard_header_len, hwhdrp))
501 goto nla_put_failure;
502 }
503
504 if (skb->tstamp.tv64) {
505 struct nfulnl_msg_packet_timestamp ts;
506 struct timeval tv = ktime_to_timeval(skb->tstamp);
507 ts.sec = cpu_to_be64(tv.tv_sec);
508 ts.usec = cpu_to_be64(tv.tv_usec);
509
510 if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
511 goto nla_put_failure;
512 }
513
514 /* UID */
515 sk = skb->sk;
516 if (sk && sk->sk_state != TCP_TIME_WAIT) {
517 read_lock_bh(&sk->sk_callback_lock);
518 if (sk->sk_socket && sk->sk_socket->file) {
519 struct file *file = sk->sk_socket->file;
520 const struct cred *cred = file->f_cred;
521 struct user_namespace *user_ns = inst->peer_user_ns;
522 __be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid));
523 __be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid));
524 read_unlock_bh(&sk->sk_callback_lock);
525 if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
526 nla_put_be32(inst->skb, NFULA_GID, gid))
527 goto nla_put_failure;
528 } else
529 read_unlock_bh(&sk->sk_callback_lock);
530 }
531
532 /* local sequence number */
533 if ((inst->flags & NFULNL_CFG_F_SEQ) &&
534 nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
535 goto nla_put_failure;
536
537 /* global sequence number */
538 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
539 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
540 htonl(atomic_inc_return(&global_seq))))
541 goto nla_put_failure;
542
543 if (data_len) {
544 struct nlattr *nla;
545 int size = nla_attr_size(data_len);
546
547 if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
548 printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
549 return -1;
550 }
551
552 nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
553 nla->nla_type = NFULA_PAYLOAD;
554 nla->nla_len = size;
555
556 if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
557 BUG();
558 }
559
560 nlh->nlmsg_len = inst->skb->tail - old_tail;
561 return 0;
562
563nla_put_failure:
564 PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
565 return -1;
566}
567
568#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
569
570static struct nf_loginfo default_loginfo = {
571 .type = NF_LOG_TYPE_ULOG,
572 .u = {
573 .ulog = {
574 .copy_len = 0xffff,
575 .group = 0,
576 .qthreshold = 1,
577 },
578 },
579};
580
581/* log handler for internal netfilter logging api */
582void
583nfulnl_log_packet(u_int8_t pf,
584 unsigned int hooknum,
585 const struct sk_buff *skb,
586 const struct net_device *in,
587 const struct net_device *out,
588 const struct nf_loginfo *li_user,
589 const char *prefix)
590{
591 unsigned int size, data_len;
592 struct nfulnl_instance *inst;
593 const struct nf_loginfo *li;
594 unsigned int qthreshold;
595 unsigned int plen;
596
597 if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
598 li = li_user;
599 else
600 li = &default_loginfo;
601
602 inst = instance_lookup_get(li->u.ulog.group);
603 if (!inst)
604 return;
605
606 plen = 0;
607 if (prefix)
608 plen = strlen(prefix) + 1;
609
610 /* FIXME: do we want to make the size calculation conditional based on
611 * what is actually present? way more branches and checks, but more
612 * memory efficient... */
613 size = NLMSG_SPACE(sizeof(struct nfgenmsg))
614 + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr))
615 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
616 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
617#ifdef CONFIG_BRIDGE_NETFILTER
618 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
619 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
620#endif
621 + nla_total_size(sizeof(u_int32_t)) /* mark */
622 + nla_total_size(sizeof(u_int32_t)) /* uid */
623 + nla_total_size(sizeof(u_int32_t)) /* gid */
624 + nla_total_size(plen) /* prefix */
625 + nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
626 + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp));
627
628 if (in && skb_mac_header_was_set(skb)) {
629 size += nla_total_size(skb->dev->hard_header_len)
630 + nla_total_size(sizeof(u_int16_t)) /* hwtype */
631 + nla_total_size(sizeof(u_int16_t)); /* hwlen */
632 }
633
634 spin_lock_bh(&inst->lock);
635
636 if (inst->flags & NFULNL_CFG_F_SEQ)
637 size += nla_total_size(sizeof(u_int32_t));
638 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
639 size += nla_total_size(sizeof(u_int32_t));
640
641 qthreshold = inst->qthreshold;
642 /* per-rule qthreshold overrides per-instance */
643 if (li->u.ulog.qthreshold)
644 if (qthreshold > li->u.ulog.qthreshold)
645 qthreshold = li->u.ulog.qthreshold;
646
647
648 switch (inst->copy_mode) {
649 case NFULNL_COPY_META:
650 case NFULNL_COPY_NONE:
651 data_len = 0;
652 break;
653
654 case NFULNL_COPY_PACKET:
655 if (inst->copy_range == 0
656 || inst->copy_range > skb->len)
657 data_len = skb->len;
658 else
659 data_len = inst->copy_range;
660
661 size += nla_total_size(data_len);
662 break;
663
664 case NFULNL_COPY_DISABLED:
665 default:
666 goto unlock_and_release;
667 }
668
669 if (inst->skb &&
670 size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) {
671 /* either the queue len is too high or we don't have
672 * enough room in the skb left. flush to userspace. */
673 __nfulnl_flush(inst);
674 }
675
676 if (!inst->skb) {
677 inst->skb = nfulnl_alloc_skb(inst->nlbufsiz, size);
678 if (!inst->skb)
679 goto alloc_failure;
680 }
681
682 inst->qlen++;
683
684 __build_packet_message(inst, skb, data_len, pf,
685 hooknum, in, out, prefix, plen);
686
687 if (inst->qlen >= qthreshold)
688 __nfulnl_flush(inst);
689 /* timer_pending always called within inst->lock, so there
690 * is no chance of a race here */
691 else if (!timer_pending(&inst->timer)) {
692 instance_get(inst);
693 inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
694 add_timer(&inst->timer);
695 }
696
697unlock_and_release:
698 spin_unlock_bh(&inst->lock);
699 instance_put(inst);
700 return;
701
702alloc_failure:
703 /* FIXME: statistics */
704 goto unlock_and_release;
705}
706EXPORT_SYMBOL_GPL(nfulnl_log_packet);
707
708static int
709nfulnl_rcv_nl_event(struct notifier_block *this,
710 unsigned long event, void *ptr)
711{
712 struct netlink_notify *n = ptr;
713
714 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
715 int i;
716
717 /* destroy all instances for this portid */
718 spin_lock_bh(&instances_lock);
719 for (i = 0; i < INSTANCE_BUCKETS; i++) {
720 struct hlist_node *tmp, *t2;
721 struct nfulnl_instance *inst;
722 struct hlist_head *head = &instance_table[i];
723
724 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
725 if ((net_eq(n->net, &init_net)) &&
726 (n->portid == inst->peer_portid))
727 __instance_destroy(inst);
728 }
729 }
730 spin_unlock_bh(&instances_lock);
731 }
732 return NOTIFY_DONE;
733}
734
735static struct notifier_block nfulnl_rtnl_notifier = {
736 .notifier_call = nfulnl_rcv_nl_event,
737};
738
739static int
740nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
741 const struct nlmsghdr *nlh,
742 const struct nlattr * const nfqa[])
743{
744 return -ENOTSUPP;
745}
746
747static struct nf_logger nfulnl_logger __read_mostly = {
748 .name = "nfnetlink_log",
749 .logfn = &nfulnl_log_packet,
750 .me = THIS_MODULE,
751};
752
753static const struct nla_policy nfula_cfg_policy[NFULA_CFG_MAX+1] = {
754 [NFULA_CFG_CMD] = { .len = sizeof(struct nfulnl_msg_config_cmd) },
755 [NFULA_CFG_MODE] = { .len = sizeof(struct nfulnl_msg_config_mode) },
756 [NFULA_CFG_TIMEOUT] = { .type = NLA_U32 },
757 [NFULA_CFG_QTHRESH] = { .type = NLA_U32 },
758 [NFULA_CFG_NLBUFSIZ] = { .type = NLA_U32 },
759 [NFULA_CFG_FLAGS] = { .type = NLA_U16 },
760};
761
762static int
763nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
764 const struct nlmsghdr *nlh,
765 const struct nlattr * const nfula[])
766{
767 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
768 u_int16_t group_num = ntohs(nfmsg->res_id);
769 struct nfulnl_instance *inst;
770 struct nfulnl_msg_config_cmd *cmd = NULL;
771 int ret = 0;
772
773 if (nfula[NFULA_CFG_CMD]) {
774 u_int8_t pf = nfmsg->nfgen_family;
775 cmd = nla_data(nfula[NFULA_CFG_CMD]);
776
777 /* Commands without queue context */
778 switch (cmd->command) {
779 case NFULNL_CFG_CMD_PF_BIND:
780 return nf_log_bind_pf(pf, &nfulnl_logger);
781 case NFULNL_CFG_CMD_PF_UNBIND:
782 nf_log_unbind_pf(pf);
783 return 0;
784 }
785 }
786
787 inst = instance_lookup_get(group_num);
788 if (inst && inst->peer_portid != NETLINK_CB(skb).portid) {
789 ret = -EPERM;
790 goto out_put;
791 }
792
793 if (cmd != NULL) {
794 switch (cmd->command) {
795 case NFULNL_CFG_CMD_BIND:
796 if (inst) {
797 ret = -EBUSY;
798 goto out_put;
799 }
800
801 inst = instance_create(group_num,
802 NETLINK_CB(skb).portid,
803 sk_user_ns(NETLINK_CB(skb).ssk));
804 if (IS_ERR(inst)) {
805 ret = PTR_ERR(inst);
806 goto out;
807 }
808 break;
809 case NFULNL_CFG_CMD_UNBIND:
810 if (!inst) {
811 ret = -ENODEV;
812 goto out;
813 }
814
815 instance_destroy(inst);
816 goto out_put;
817 default:
818 ret = -ENOTSUPP;
819 break;
820 }
821 }
822
823 if (nfula[NFULA_CFG_MODE]) {
824 struct nfulnl_msg_config_mode *params;
825 params = nla_data(nfula[NFULA_CFG_MODE]);
826
827 if (!inst) {
828 ret = -ENODEV;
829 goto out;
830 }
831 nfulnl_set_mode(inst, params->copy_mode,
832 ntohl(params->copy_range));
833 }
834
835 if (nfula[NFULA_CFG_TIMEOUT]) {
836 __be32 timeout = nla_get_be32(nfula[NFULA_CFG_TIMEOUT]);
837
838 if (!inst) {
839 ret = -ENODEV;
840 goto out;
841 }
842 nfulnl_set_timeout(inst, ntohl(timeout));
843 }
844
845 if (nfula[NFULA_CFG_NLBUFSIZ]) {
846 __be32 nlbufsiz = nla_get_be32(nfula[NFULA_CFG_NLBUFSIZ]);
847
848 if (!inst) {
849 ret = -ENODEV;
850 goto out;
851 }
852 nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz));
853 }
854
855 if (nfula[NFULA_CFG_QTHRESH]) {
856 __be32 qthresh = nla_get_be32(nfula[NFULA_CFG_QTHRESH]);
857
858 if (!inst) {
859 ret = -ENODEV;
860 goto out;
861 }
862 nfulnl_set_qthresh(inst, ntohl(qthresh));
863 }
864
865 if (nfula[NFULA_CFG_FLAGS]) {
866 __be16 flags = nla_get_be16(nfula[NFULA_CFG_FLAGS]);
867
868 if (!inst) {
869 ret = -ENODEV;
870 goto out;
871 }
872 nfulnl_set_flags(inst, ntohs(flags));
873 }
874
875out_put:
876 instance_put(inst);
877out:
878 return ret;
879}
880
881static const struct nfnl_callback nfulnl_cb[NFULNL_MSG_MAX] = {
882 [NFULNL_MSG_PACKET] = { .call = nfulnl_recv_unsupp,
883 .attr_count = NFULA_MAX, },
884 [NFULNL_MSG_CONFIG] = { .call = nfulnl_recv_config,
885 .attr_count = NFULA_CFG_MAX,
886 .policy = nfula_cfg_policy },
887};
888
889static const struct nfnetlink_subsystem nfulnl_subsys = {
890 .name = "log",
891 .subsys_id = NFNL_SUBSYS_ULOG,
892 .cb_count = NFULNL_MSG_MAX,
893 .cb = nfulnl_cb,
894};
895
896#ifdef CONFIG_PROC_FS
897struct iter_state {
898 unsigned int bucket;
899};
900
901static struct hlist_node *get_first(struct iter_state *st)
902{
903 if (!st)
904 return NULL;
905
906 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
907 if (!hlist_empty(&instance_table[st->bucket]))
908 return rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
909 }
910 return NULL;
911}
912
913static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
914{
915 h = rcu_dereference_bh(hlist_next_rcu(h));
916 while (!h) {
917 if (++st->bucket >= INSTANCE_BUCKETS)
918 return NULL;
919
920 h = rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
921 }
922 return h;
923}
924
925static struct hlist_node *get_idx(struct iter_state *st, loff_t pos)
926{
927 struct hlist_node *head;
928 head = get_first(st);
929
930 if (head)
931 while (pos && (head = get_next(st, head)))
932 pos--;
933 return pos ? NULL : head;
934}
935
936static void *seq_start(struct seq_file *seq, loff_t *pos)
937 __acquires(rcu_bh)
938{
939 rcu_read_lock_bh();
940 return get_idx(seq->private, *pos);
941}
942
943static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
944{
945 (*pos)++;
946 return get_next(s->private, v);
947}
948
949static void seq_stop(struct seq_file *s, void *v)
950 __releases(rcu_bh)
951{
952 rcu_read_unlock_bh();
953}
954
955static int seq_show(struct seq_file *s, void *v)
956{
957 const struct nfulnl_instance *inst = v;
958
959 return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
960 inst->group_num,
961 inst->peer_portid, inst->qlen,
962 inst->copy_mode, inst->copy_range,
963 inst->flushtimeout, atomic_read(&inst->use));
964}
965
966static const struct seq_operations nful_seq_ops = {
967 .start = seq_start,
968 .next = seq_next,
969 .stop = seq_stop,
970 .show = seq_show,
971};
972
973static int nful_open(struct inode *inode, struct file *file)
974{
975 return seq_open_private(file, &nful_seq_ops,
976 sizeof(struct iter_state));
977}
978
979static const struct file_operations nful_file_ops = {
980 .owner = THIS_MODULE,
981 .open = nful_open,
982 .read = seq_read,
983 .llseek = seq_lseek,
984 .release = seq_release_private,
985};
986
987#endif /* PROC_FS */
988
989static int __init nfnetlink_log_init(void)
990{
991 int i, status = -ENOMEM;
992
993 for (i = 0; i < INSTANCE_BUCKETS; i++)
994 INIT_HLIST_HEAD(&instance_table[i]);
995
996 /* it's not really all that important to have a random value, so
997 * we can do this from the init function, even if there hasn't
998 * been that much entropy yet */
999 get_random_bytes(&hash_init, sizeof(hash_init));
1000
1001 netlink_register_notifier(&nfulnl_rtnl_notifier);
1002 status = nfnetlink_subsys_register(&nfulnl_subsys);
1003 if (status < 0) {
1004 printk(KERN_ERR "log: failed to create netlink socket\n");
1005 goto cleanup_netlink_notifier;
1006 }
1007
1008 status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger);
1009 if (status < 0) {
1010 printk(KERN_ERR "log: failed to register logger\n");
1011 goto cleanup_subsys;
1012 }
1013
1014#ifdef CONFIG_PROC_FS
1015 if (!proc_create("nfnetlink_log", 0440,
1016 proc_net_netfilter, &nful_file_ops)) {
1017 status = -ENOMEM;
1018 goto cleanup_logger;
1019 }
1020#endif
1021 return status;
1022
1023#ifdef CONFIG_PROC_FS
1024cleanup_logger:
1025 nf_log_unregister(&nfulnl_logger);
1026#endif
1027cleanup_subsys:
1028 nfnetlink_subsys_unregister(&nfulnl_subsys);
1029cleanup_netlink_notifier:
1030 netlink_unregister_notifier(&nfulnl_rtnl_notifier);
1031 return status;
1032}
1033
1034static void __exit nfnetlink_log_fini(void)
1035{
1036 nf_log_unregister(&nfulnl_logger);
1037#ifdef CONFIG_PROC_FS
1038 remove_proc_entry("nfnetlink_log", proc_net_netfilter);
1039#endif
1040 nfnetlink_subsys_unregister(&nfulnl_subsys);
1041 netlink_unregister_notifier(&nfulnl_rtnl_notifier);
1042}
1043
1044MODULE_DESCRIPTION("netfilter userspace logging");
1045MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1046MODULE_LICENSE("GPL");
1047MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
1048
1049module_init(nfnetlink_log_init);
1050module_exit(nfnetlink_log_fini);