2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink.
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
7 * Based on the old ipv4-only ip_queue.c:
8 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
9 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/notifier.h>
21 #include <linux/netdevice.h>
22 #include <linux/netfilter.h>
23 #include <linux/proc_fs.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/netfilter/nfnetlink.h>
27 #include <linux/netfilter/nfnetlink_queue.h>
28 #include <linux/list.h>
30 #include <net/netfilter/nf_queue.h>
32 #include <asm/atomic.h>
34 #ifdef CONFIG_BRIDGE_NETFILTER
35 #include "../bridge/br_private.h"
38 #define NFQNL_QMAX_DEFAULT 1024
40 struct nfqnl_instance
{
41 struct hlist_node hlist
; /* global list of queues */
45 unsigned int queue_maxlen
;
46 unsigned int copy_range
;
47 unsigned int queue_total
;
48 unsigned int queue_dropped
;
49 unsigned int queue_user_dropped
;
51 unsigned int id_sequence
; /* 'sequence' of pkt ids */
53 u_int16_t queue_num
; /* number of this queue */
58 struct list_head queue_list
; /* packets in queue */
61 typedef int (*nfqnl_cmpfn
)(struct nf_queue_entry
*, unsigned long);
63 static DEFINE_SPINLOCK(instances_lock
);
65 #define INSTANCE_BUCKETS 16
66 static struct hlist_head instance_table
[INSTANCE_BUCKETS
] __read_mostly
;
68 static inline u_int8_t
instance_hashfn(u_int16_t queue_num
)
70 return ((queue_num
>> 8) | queue_num
) % INSTANCE_BUCKETS
;
73 static struct nfqnl_instance
*
74 instance_lookup(u_int16_t queue_num
)
76 struct hlist_head
*head
;
77 struct hlist_node
*pos
;
78 struct nfqnl_instance
*inst
;
80 head
= &instance_table
[instance_hashfn(queue_num
)];
81 hlist_for_each_entry_rcu(inst
, pos
, head
, hlist
) {
82 if (inst
->queue_num
== queue_num
)
88 static struct nfqnl_instance
*
89 instance_create(u_int16_t queue_num
, int pid
)
91 struct nfqnl_instance
*inst
= NULL
;
94 spin_lock(&instances_lock
);
95 if (instance_lookup(queue_num
))
98 inst
= kzalloc(sizeof(*inst
), GFP_ATOMIC
);
102 inst
->queue_num
= queue_num
;
103 inst
->peer_pid
= pid
;
104 inst
->queue_maxlen
= NFQNL_QMAX_DEFAULT
;
105 inst
->copy_range
= 0xfffff;
106 inst
->copy_mode
= NFQNL_COPY_NONE
;
107 spin_lock_init(&inst
->lock
);
108 INIT_LIST_HEAD(&inst
->queue_list
);
109 INIT_RCU_HEAD(&inst
->rcu
);
111 if (!try_module_get(THIS_MODULE
))
114 h
= instance_hashfn(queue_num
);
115 hlist_add_head_rcu(&inst
->hlist
, &instance_table
[h
]);
117 spin_unlock(&instances_lock
);
124 spin_unlock(&instances_lock
);
128 static void nfqnl_flush(struct nfqnl_instance
*queue
, nfqnl_cmpfn cmpfn
,
132 instance_destroy_rcu(struct rcu_head
*head
)
134 struct nfqnl_instance
*inst
= container_of(head
, struct nfqnl_instance
,
137 nfqnl_flush(inst
, NULL
, 0);
139 module_put(THIS_MODULE
);
143 __instance_destroy(struct nfqnl_instance
*inst
)
145 hlist_del_rcu(&inst
->hlist
);
146 call_rcu(&inst
->rcu
, instance_destroy_rcu
);
150 instance_destroy(struct nfqnl_instance
*inst
)
152 spin_lock(&instances_lock
);
153 __instance_destroy(inst
);
154 spin_unlock(&instances_lock
);
158 __enqueue_entry(struct nfqnl_instance
*queue
, struct nf_queue_entry
*entry
)
160 list_add_tail(&entry
->list
, &queue
->queue_list
);
161 queue
->queue_total
++;
164 static struct nf_queue_entry
*
165 find_dequeue_entry(struct nfqnl_instance
*queue
, unsigned int id
)
167 struct nf_queue_entry
*entry
= NULL
, *i
;
169 spin_lock_bh(&queue
->lock
);
171 list_for_each_entry(i
, &queue
->queue_list
, list
) {
179 list_del(&entry
->list
);
180 queue
->queue_total
--;
183 spin_unlock_bh(&queue
->lock
);
189 nfqnl_flush(struct nfqnl_instance
*queue
, nfqnl_cmpfn cmpfn
, unsigned long data
)
191 struct nf_queue_entry
*entry
, *next
;
193 spin_lock_bh(&queue
->lock
);
194 list_for_each_entry_safe(entry
, next
, &queue
->queue_list
, list
) {
195 if (!cmpfn
|| cmpfn(entry
, data
)) {
196 list_del(&entry
->list
);
197 queue
->queue_total
--;
198 nf_reinject(entry
, NF_DROP
);
201 spin_unlock_bh(&queue
->lock
);
204 static struct sk_buff
*
205 nfqnl_build_packet_message(struct nfqnl_instance
*queue
,
206 struct nf_queue_entry
*entry
)
208 sk_buff_data_t old_tail
;
212 struct nfqnl_msg_packet_hdr pmsg
;
213 struct nlmsghdr
*nlh
;
214 struct nfgenmsg
*nfmsg
;
215 struct sk_buff
*entskb
= entry
->skb
;
216 struct net_device
*indev
;
217 struct net_device
*outdev
;
219 size
= NLMSG_ALIGN(sizeof(struct nfgenmsg
))
220 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr
))
221 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
222 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
223 #ifdef CONFIG_BRIDGE_NETFILTER
224 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
225 + nla_total_size(sizeof(u_int32_t
)) /* ifindex */
227 + nla_total_size(sizeof(u_int32_t
)) /* mark */
228 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw
))
229 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp
));
231 outdev
= entry
->outdev
;
233 spin_lock_bh(&queue
->lock
);
235 switch ((enum nfqnl_config_mode
)queue
->copy_mode
) {
236 case NFQNL_COPY_META
:
237 case NFQNL_COPY_NONE
:
241 case NFQNL_COPY_PACKET
:
242 if ((entskb
->ip_summed
== CHECKSUM_PARTIAL
||
243 entskb
->ip_summed
== CHECKSUM_COMPLETE
) &&
244 skb_checksum_help(entskb
)) {
245 spin_unlock_bh(&queue
->lock
);
248 if (queue
->copy_range
== 0
249 || queue
->copy_range
> entskb
->len
)
250 data_len
= entskb
->len
;
252 data_len
= queue
->copy_range
;
254 size
+= nla_total_size(data_len
);
258 entry
->id
= queue
->id_sequence
++;
260 spin_unlock_bh(&queue
->lock
);
262 skb
= alloc_skb(size
, GFP_ATOMIC
);
266 old_tail
= skb
->tail
;
267 nlh
= NLMSG_PUT(skb
, 0, 0,
268 NFNL_SUBSYS_QUEUE
<< 8 | NFQNL_MSG_PACKET
,
269 sizeof(struct nfgenmsg
));
270 nfmsg
= NLMSG_DATA(nlh
);
271 nfmsg
->nfgen_family
= entry
->pf
;
272 nfmsg
->version
= NFNETLINK_V0
;
273 nfmsg
->res_id
= htons(queue
->queue_num
);
275 pmsg
.packet_id
= htonl(entry
->id
);
276 pmsg
.hw_protocol
= entskb
->protocol
;
277 pmsg
.hook
= entry
->hook
;
279 NLA_PUT(skb
, NFQA_PACKET_HDR
, sizeof(pmsg
), &pmsg
);
281 indev
= entry
->indev
;
283 #ifndef CONFIG_BRIDGE_NETFILTER
284 NLA_PUT_BE32(skb
, NFQA_IFINDEX_INDEV
, htonl(indev
->ifindex
));
286 if (entry
->pf
== PF_BRIDGE
) {
287 /* Case 1: indev is physical input device, we need to
288 * look for bridge group (when called from
289 * netfilter_bridge) */
290 NLA_PUT_BE32(skb
, NFQA_IFINDEX_PHYSINDEV
,
291 htonl(indev
->ifindex
));
292 /* this is the bridge group "brX" */
293 NLA_PUT_BE32(skb
, NFQA_IFINDEX_INDEV
,
294 htonl(indev
->br_port
->br
->dev
->ifindex
));
296 /* Case 2: indev is bridge group, we need to look for
297 * physical device (when called from ipv4) */
298 NLA_PUT_BE32(skb
, NFQA_IFINDEX_INDEV
,
299 htonl(indev
->ifindex
));
300 if (entskb
->nf_bridge
&& entskb
->nf_bridge
->physindev
)
301 NLA_PUT_BE32(skb
, NFQA_IFINDEX_PHYSINDEV
,
302 htonl(entskb
->nf_bridge
->physindev
->ifindex
));
308 #ifndef CONFIG_BRIDGE_NETFILTER
309 NLA_PUT_BE32(skb
, NFQA_IFINDEX_OUTDEV
, htonl(outdev
->ifindex
));
311 if (entry
->pf
== PF_BRIDGE
) {
312 /* Case 1: outdev is physical output device, we need to
313 * look for bridge group (when called from
314 * netfilter_bridge) */
315 NLA_PUT_BE32(skb
, NFQA_IFINDEX_PHYSOUTDEV
,
316 htonl(outdev
->ifindex
));
317 /* this is the bridge group "brX" */
318 NLA_PUT_BE32(skb
, NFQA_IFINDEX_OUTDEV
,
319 htonl(outdev
->br_port
->br
->dev
->ifindex
));
321 /* Case 2: outdev is bridge group, we need to look for
322 * physical output device (when called from ipv4) */
323 NLA_PUT_BE32(skb
, NFQA_IFINDEX_OUTDEV
,
324 htonl(outdev
->ifindex
));
325 if (entskb
->nf_bridge
&& entskb
->nf_bridge
->physoutdev
)
326 NLA_PUT_BE32(skb
, NFQA_IFINDEX_PHYSOUTDEV
,
327 htonl(entskb
->nf_bridge
->physoutdev
->ifindex
));
333 NLA_PUT_BE32(skb
, NFQA_MARK
, htonl(entskb
->mark
));
335 if (indev
&& entskb
->dev
) {
336 struct nfqnl_msg_packet_hw phw
;
337 int len
= dev_parse_header(entskb
, phw
.hw_addr
);
339 phw
.hw_addrlen
= htons(len
);
340 NLA_PUT(skb
, NFQA_HWADDR
, sizeof(phw
), &phw
);
344 if (entskb
->tstamp
.tv64
) {
345 struct nfqnl_msg_packet_timestamp ts
;
346 struct timeval tv
= ktime_to_timeval(entskb
->tstamp
);
347 ts
.sec
= cpu_to_be64(tv
.tv_sec
);
348 ts
.usec
= cpu_to_be64(tv
.tv_usec
);
350 NLA_PUT(skb
, NFQA_TIMESTAMP
, sizeof(ts
), &ts
);
355 int size
= nla_attr_size(data_len
);
357 if (skb_tailroom(skb
) < nla_total_size(data_len
)) {
358 printk(KERN_WARNING
"nf_queue: no tailroom!\n");
362 nla
= (struct nlattr
*)skb_put(skb
, nla_total_size(data_len
));
363 nla
->nla_type
= NFQA_PAYLOAD
;
366 if (skb_copy_bits(entskb
, 0, nla_data(nla
), data_len
))
370 nlh
->nlmsg_len
= skb
->tail
- old_tail
;
378 printk(KERN_ERR
"nf_queue: error creating packet message\n");
383 nfqnl_enqueue_packet(struct nf_queue_entry
*entry
, unsigned int queuenum
)
385 struct sk_buff
*nskb
;
386 struct nfqnl_instance
*queue
;
389 /* rcu_read_lock()ed by nf_hook_slow() */
390 queue
= instance_lookup(queuenum
);
394 if (queue
->copy_mode
== NFQNL_COPY_NONE
)
397 nskb
= nfqnl_build_packet_message(queue
, entry
);
401 spin_lock_bh(&queue
->lock
);
403 if (!queue
->peer_pid
)
404 goto err_out_free_nskb
;
406 if (queue
->queue_total
>= queue
->queue_maxlen
) {
407 queue
->queue_dropped
++;
409 printk(KERN_WARNING
"nf_queue: full at %d entries, "
410 "dropping packets(s). Dropped: %d\n",
411 queue
->queue_total
, queue
->queue_dropped
);
412 goto err_out_free_nskb
;
415 /* nfnetlink_unicast will either free the nskb or add it to a socket */
416 err
= nfnetlink_unicast(nskb
, queue
->peer_pid
, MSG_DONTWAIT
);
418 queue
->queue_user_dropped
++;
422 __enqueue_entry(queue
, entry
);
424 spin_unlock_bh(&queue
->lock
);
430 spin_unlock_bh(&queue
->lock
);
436 nfqnl_mangle(void *data
, int data_len
, struct nf_queue_entry
*e
)
441 diff
= data_len
- e
->skb
->len
;
443 if (pskb_trim(e
->skb
, data_len
))
445 } else if (diff
> 0) {
446 if (data_len
> 0xFFFF)
448 if (diff
> skb_tailroom(e
->skb
)) {
449 err
= pskb_expand_head(e
->skb
, 0,
450 diff
- skb_tailroom(e
->skb
),
453 printk(KERN_WARNING
"nf_queue: OOM "
454 "in mangle, dropping packet\n");
458 skb_put(e
->skb
, diff
);
460 if (!skb_make_writable(e
->skb
, data_len
))
462 skb_copy_to_linear_data(e
->skb
, data
, data_len
);
463 e
->skb
->ip_summed
= CHECKSUM_NONE
;
468 nfqnl_set_mode(struct nfqnl_instance
*queue
,
469 unsigned char mode
, unsigned int range
)
473 spin_lock_bh(&queue
->lock
);
475 case NFQNL_COPY_NONE
:
476 case NFQNL_COPY_META
:
477 queue
->copy_mode
= mode
;
478 queue
->copy_range
= 0;
481 case NFQNL_COPY_PACKET
:
482 queue
->copy_mode
= mode
;
483 /* we're using struct nlattr which has 16bit nla_len */
485 queue
->copy_range
= 0xffff;
487 queue
->copy_range
= range
;
494 spin_unlock_bh(&queue
->lock
);
500 dev_cmp(struct nf_queue_entry
*entry
, unsigned long ifindex
)
503 if (entry
->indev
->ifindex
== ifindex
)
506 if (entry
->outdev
->ifindex
== ifindex
)
508 #ifdef CONFIG_BRIDGE_NETFILTER
509 if (entry
->skb
->nf_bridge
) {
510 if (entry
->skb
->nf_bridge
->physindev
&&
511 entry
->skb
->nf_bridge
->physindev
->ifindex
== ifindex
)
513 if (entry
->skb
->nf_bridge
->physoutdev
&&
514 entry
->skb
->nf_bridge
->physoutdev
->ifindex
== ifindex
)
521 /* drop all packets with either indev or outdev == ifindex from all queue
524 nfqnl_dev_drop(int ifindex
)
530 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++) {
531 struct hlist_node
*tmp
;
532 struct nfqnl_instance
*inst
;
533 struct hlist_head
*head
= &instance_table
[i
];
535 hlist_for_each_entry_rcu(inst
, tmp
, head
, hlist
)
536 nfqnl_flush(inst
, dev_cmp
, ifindex
);
542 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
545 nfqnl_rcv_dev_event(struct notifier_block
*this,
546 unsigned long event
, void *ptr
)
548 struct net_device
*dev
= ptr
;
550 if (dev
->nd_net
!= &init_net
)
553 /* Drop any packets associated with the downed device */
554 if (event
== NETDEV_DOWN
)
555 nfqnl_dev_drop(dev
->ifindex
);
559 static struct notifier_block nfqnl_dev_notifier
= {
560 .notifier_call
= nfqnl_rcv_dev_event
,
564 nfqnl_rcv_nl_event(struct notifier_block
*this,
565 unsigned long event
, void *ptr
)
567 struct netlink_notify
*n
= ptr
;
569 if (event
== NETLINK_URELEASE
&&
570 n
->protocol
== NETLINK_NETFILTER
&& n
->pid
) {
573 /* destroy all instances for this pid */
574 spin_lock(&instances_lock
);
575 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++) {
576 struct hlist_node
*tmp
, *t2
;
577 struct nfqnl_instance
*inst
;
578 struct hlist_head
*head
= &instance_table
[i
];
580 hlist_for_each_entry_safe(inst
, tmp
, t2
, head
, hlist
) {
581 if ((n
->net
== &init_net
) &&
582 (n
->pid
== inst
->peer_pid
))
583 __instance_destroy(inst
);
586 spin_unlock(&instances_lock
);
591 static struct notifier_block nfqnl_rtnl_notifier
= {
592 .notifier_call
= nfqnl_rcv_nl_event
,
595 static const struct nla_policy nfqa_verdict_policy
[NFQA_MAX
+1] = {
596 [NFQA_VERDICT_HDR
] = { .len
= sizeof(struct nfqnl_msg_verdict_hdr
) },
597 [NFQA_MARK
] = { .type
= NLA_U32
},
598 [NFQA_PAYLOAD
] = { .type
= NLA_UNSPEC
},
602 nfqnl_recv_verdict(struct sock
*ctnl
, struct sk_buff
*skb
,
603 struct nlmsghdr
*nlh
, struct nlattr
*nfqa
[])
605 struct nfgenmsg
*nfmsg
= NLMSG_DATA(nlh
);
606 u_int16_t queue_num
= ntohs(nfmsg
->res_id
);
608 struct nfqnl_msg_verdict_hdr
*vhdr
;
609 struct nfqnl_instance
*queue
;
610 unsigned int verdict
;
611 struct nf_queue_entry
*entry
;
615 queue
= instance_lookup(queue_num
);
621 if (queue
->peer_pid
!= NETLINK_CB(skb
).pid
) {
626 if (!nfqa
[NFQA_VERDICT_HDR
]) {
631 vhdr
= nla_data(nfqa
[NFQA_VERDICT_HDR
]);
632 verdict
= ntohl(vhdr
->verdict
);
634 if ((verdict
& NF_VERDICT_MASK
) > NF_MAX_VERDICT
) {
639 entry
= find_dequeue_entry(queue
, ntohl(vhdr
->id
));
646 if (nfqa
[NFQA_PAYLOAD
]) {
647 if (nfqnl_mangle(nla_data(nfqa
[NFQA_PAYLOAD
]),
648 nla_len(nfqa
[NFQA_PAYLOAD
]), entry
) < 0)
653 entry
->skb
->mark
= ntohl(nla_get_be32(nfqa
[NFQA_MARK
]));
655 nf_reinject(entry
, verdict
);
664 nfqnl_recv_unsupp(struct sock
*ctnl
, struct sk_buff
*skb
,
665 struct nlmsghdr
*nlh
, struct nlattr
*nfqa
[])
670 static const struct nla_policy nfqa_cfg_policy
[NFQA_CFG_MAX
+1] = {
671 [NFQA_CFG_CMD
] = { .len
= sizeof(struct nfqnl_msg_config_cmd
) },
672 [NFQA_CFG_PARAMS
] = { .len
= sizeof(struct nfqnl_msg_config_params
) },
675 static const struct nf_queue_handler nfqh
= {
677 .outfn
= &nfqnl_enqueue_packet
,
681 nfqnl_recv_config(struct sock
*ctnl
, struct sk_buff
*skb
,
682 struct nlmsghdr
*nlh
, struct nlattr
*nfqa
[])
684 struct nfgenmsg
*nfmsg
= NLMSG_DATA(nlh
);
685 u_int16_t queue_num
= ntohs(nfmsg
->res_id
);
686 struct nfqnl_instance
*queue
;
687 struct nfqnl_msg_config_cmd
*cmd
= NULL
;
690 if (nfqa
[NFQA_CFG_CMD
]) {
691 cmd
= nla_data(nfqa
[NFQA_CFG_CMD
]);
693 /* Commands without queue context - might sleep */
694 switch (cmd
->command
) {
695 case NFQNL_CFG_CMD_PF_BIND
:
696 ret
= nf_register_queue_handler(ntohs(cmd
->pf
),
699 case NFQNL_CFG_CMD_PF_UNBIND
:
700 ret
= nf_unregister_queue_handler(ntohs(cmd
->pf
),
712 queue
= instance_lookup(queue_num
);
713 if (queue
&& queue
->peer_pid
!= NETLINK_CB(skb
).pid
) {
719 switch (cmd
->command
) {
720 case NFQNL_CFG_CMD_BIND
:
725 queue
= instance_create(queue_num
, NETLINK_CB(skb
).pid
);
731 case NFQNL_CFG_CMD_UNBIND
:
736 instance_destroy(queue
);
738 case NFQNL_CFG_CMD_PF_BIND
:
739 case NFQNL_CFG_CMD_PF_UNBIND
:
747 if (nfqa
[NFQA_CFG_PARAMS
]) {
748 struct nfqnl_msg_config_params
*params
;
754 params
= nla_data(nfqa
[NFQA_CFG_PARAMS
]);
755 nfqnl_set_mode(queue
, params
->copy_mode
,
756 ntohl(params
->copy_range
));
759 if (nfqa
[NFQA_CFG_QUEUE_MAXLEN
]) {
760 __be32
*queue_maxlen
;
766 queue_maxlen
= nla_data(nfqa
[NFQA_CFG_QUEUE_MAXLEN
]);
767 spin_lock_bh(&queue
->lock
);
768 queue
->queue_maxlen
= ntohl(*queue_maxlen
);
769 spin_unlock_bh(&queue
->lock
);
777 static const struct nfnl_callback nfqnl_cb
[NFQNL_MSG_MAX
] = {
778 [NFQNL_MSG_PACKET
] = { .call
= nfqnl_recv_unsupp
,
779 .attr_count
= NFQA_MAX
, },
780 [NFQNL_MSG_VERDICT
] = { .call
= nfqnl_recv_verdict
,
781 .attr_count
= NFQA_MAX
,
782 .policy
= nfqa_verdict_policy
},
783 [NFQNL_MSG_CONFIG
] = { .call
= nfqnl_recv_config
,
784 .attr_count
= NFQA_CFG_MAX
,
785 .policy
= nfqa_cfg_policy
},
788 static const struct nfnetlink_subsystem nfqnl_subsys
= {
790 .subsys_id
= NFNL_SUBSYS_QUEUE
,
791 .cb_count
= NFQNL_MSG_MAX
,
795 #ifdef CONFIG_PROC_FS
800 static struct hlist_node
*get_first(struct seq_file
*seq
)
802 struct iter_state
*st
= seq
->private;
807 for (st
->bucket
= 0; st
->bucket
< INSTANCE_BUCKETS
; st
->bucket
++) {
808 if (!hlist_empty(&instance_table
[st
->bucket
]))
809 return instance_table
[st
->bucket
].first
;
814 static struct hlist_node
*get_next(struct seq_file
*seq
, struct hlist_node
*h
)
816 struct iter_state
*st
= seq
->private;
820 if (++st
->bucket
>= INSTANCE_BUCKETS
)
823 h
= instance_table
[st
->bucket
].first
;
828 static struct hlist_node
*get_idx(struct seq_file
*seq
, loff_t pos
)
830 struct hlist_node
*head
;
831 head
= get_first(seq
);
834 while (pos
&& (head
= get_next(seq
, head
)))
836 return pos
? NULL
: head
;
839 static void *seq_start(struct seq_file
*seq
, loff_t
*pos
)
841 spin_lock(&instances_lock
);
842 return get_idx(seq
, *pos
);
845 static void *seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
848 return get_next(s
, v
);
851 static void seq_stop(struct seq_file
*s
, void *v
)
853 spin_unlock(&instances_lock
);
856 static int seq_show(struct seq_file
*s
, void *v
)
858 const struct nfqnl_instance
*inst
= v
;
860 return seq_printf(s
, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
862 inst
->peer_pid
, inst
->queue_total
,
863 inst
->copy_mode
, inst
->copy_range
,
864 inst
->queue_dropped
, inst
->queue_user_dropped
,
865 inst
->id_sequence
, 1);
868 static const struct seq_operations nfqnl_seq_ops
= {
875 static int nfqnl_open(struct inode
*inode
, struct file
*file
)
877 return seq_open_private(file
, &nfqnl_seq_ops
,
878 sizeof(struct iter_state
));
881 static const struct file_operations nfqnl_file_ops
= {
882 .owner
= THIS_MODULE
,
886 .release
= seq_release_private
,
891 static int __init
nfnetlink_queue_init(void)
893 int i
, status
= -ENOMEM
;
894 #ifdef CONFIG_PROC_FS
895 struct proc_dir_entry
*proc_nfqueue
;
898 for (i
= 0; i
< INSTANCE_BUCKETS
; i
++)
899 INIT_HLIST_HEAD(&instance_table
[i
]);
901 netlink_register_notifier(&nfqnl_rtnl_notifier
);
902 status
= nfnetlink_subsys_register(&nfqnl_subsys
);
904 printk(KERN_ERR
"nf_queue: failed to create netlink socket\n");
905 goto cleanup_netlink_notifier
;
908 #ifdef CONFIG_PROC_FS
909 proc_nfqueue
= create_proc_entry("nfnetlink_queue", 0440,
913 proc_nfqueue
->proc_fops
= &nfqnl_file_ops
;
916 register_netdevice_notifier(&nfqnl_dev_notifier
);
919 #ifdef CONFIG_PROC_FS
921 nfnetlink_subsys_unregister(&nfqnl_subsys
);
923 cleanup_netlink_notifier
:
924 netlink_unregister_notifier(&nfqnl_rtnl_notifier
);
928 static void __exit
nfnetlink_queue_fini(void)
930 nf_unregister_queue_handlers(&nfqh
);
931 unregister_netdevice_notifier(&nfqnl_dev_notifier
);
932 #ifdef CONFIG_PROC_FS
933 remove_proc_entry("nfnetlink_queue", proc_net_netfilter
);
935 nfnetlink_subsys_unregister(&nfqnl_subsys
);
936 netlink_unregister_notifier(&nfqnl_rtnl_notifier
);
939 MODULE_DESCRIPTION("netfilter packet queue handler");
940 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
941 MODULE_LICENSE("GPL");
942 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE
);
944 module_init(nfnetlink_queue_init
);
945 module_exit(nfnetlink_queue_fini
);