Merge git://git.kernel.org/pub/scm/linux/kernel/git/pkl/squashfs-linus
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / netfilter / nf_queue.c
1 #include <linux/kernel.h>
2 #include <linux/slab.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/proc_fs.h>
6 #include <linux/skbuff.h>
7 #include <linux/netfilter.h>
8 #include <linux/seq_file.h>
9 #include <linux/rcupdate.h>
10 #include <net/protocol.h>
11 #include <net/netfilter/nf_queue.h>
12 #include <net/dst.h>
13
14 #include "nf_internals.h"
15
16 /*
17 * A queue handler may be registered for each protocol. Each is protected by
18 * long term mutex. The handler must provide an an outfn() to accept packets
19 * for queueing and must reinject all packets it receives, no matter what.
20 */
21 static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
22
23 static DEFINE_MUTEX(queue_handler_mutex);
24
25 /* return EBUSY when somebody else is registered, return EEXIST if the
26 * same handler is registered, return 0 in case of success. */
27 int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
28 {
29 int ret;
30 const struct nf_queue_handler *old;
31
32 if (pf >= ARRAY_SIZE(queue_handler))
33 return -EINVAL;
34
35 mutex_lock(&queue_handler_mutex);
36 old = rcu_dereference_protected(queue_handler[pf],
37 lockdep_is_held(&queue_handler_mutex));
38 if (old == qh)
39 ret = -EEXIST;
40 else if (old)
41 ret = -EBUSY;
42 else {
43 rcu_assign_pointer(queue_handler[pf], qh);
44 ret = 0;
45 }
46 mutex_unlock(&queue_handler_mutex);
47
48 return ret;
49 }
50 EXPORT_SYMBOL(nf_register_queue_handler);
51
52 /* The caller must flush their queue before this */
53 int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
54 {
55 const struct nf_queue_handler *old;
56
57 if (pf >= ARRAY_SIZE(queue_handler))
58 return -EINVAL;
59
60 mutex_lock(&queue_handler_mutex);
61 old = rcu_dereference_protected(queue_handler[pf],
62 lockdep_is_held(&queue_handler_mutex));
63 if (old && old != qh) {
64 mutex_unlock(&queue_handler_mutex);
65 return -EINVAL;
66 }
67
68 rcu_assign_pointer(queue_handler[pf], NULL);
69 mutex_unlock(&queue_handler_mutex);
70
71 synchronize_rcu();
72
73 return 0;
74 }
75 EXPORT_SYMBOL(nf_unregister_queue_handler);
76
77 void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
78 {
79 u_int8_t pf;
80
81 mutex_lock(&queue_handler_mutex);
82 for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++) {
83 if (rcu_dereference_protected(
84 queue_handler[pf],
85 lockdep_is_held(&queue_handler_mutex)
86 ) == qh)
87 rcu_assign_pointer(queue_handler[pf], NULL);
88 }
89 mutex_unlock(&queue_handler_mutex);
90
91 synchronize_rcu();
92 }
93 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
94
95 static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
96 {
97 /* Release those devices we held, or Alexey will kill me. */
98 if (entry->indev)
99 dev_put(entry->indev);
100 if (entry->outdev)
101 dev_put(entry->outdev);
102 #ifdef CONFIG_BRIDGE_NETFILTER
103 if (entry->skb->nf_bridge) {
104 struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
105
106 if (nf_bridge->physindev)
107 dev_put(nf_bridge->physindev);
108 if (nf_bridge->physoutdev)
109 dev_put(nf_bridge->physoutdev);
110 }
111 #endif
112 /* Drop reference to owner of hook which queued us. */
113 module_put(entry->elem->owner);
114 }
115
116 /*
117 * Any packet that leaves via this function must come back
118 * through nf_reinject().
119 */
120 static int __nf_queue(struct sk_buff *skb,
121 struct list_head *elem,
122 u_int8_t pf, unsigned int hook,
123 struct net_device *indev,
124 struct net_device *outdev,
125 int (*okfn)(struct sk_buff *),
126 unsigned int queuenum)
127 {
128 int status = -ENOENT;
129 struct nf_queue_entry *entry = NULL;
130 #ifdef CONFIG_BRIDGE_NETFILTER
131 struct net_device *physindev;
132 struct net_device *physoutdev;
133 #endif
134 const struct nf_afinfo *afinfo;
135 const struct nf_queue_handler *qh;
136
137 /* QUEUE == DROP if no one is waiting, to be safe. */
138 rcu_read_lock();
139
140 qh = rcu_dereference(queue_handler[pf]);
141 if (!qh) {
142 status = -ESRCH;
143 goto err_unlock;
144 }
145
146 afinfo = nf_get_afinfo(pf);
147 if (!afinfo)
148 goto err_unlock;
149
150 entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
151 if (!entry) {
152 status = -ENOMEM;
153 goto err_unlock;
154 }
155
156 *entry = (struct nf_queue_entry) {
157 .skb = skb,
158 .elem = list_entry(elem, struct nf_hook_ops, list),
159 .pf = pf,
160 .hook = hook,
161 .indev = indev,
162 .outdev = outdev,
163 .okfn = okfn,
164 };
165
166 /* If it's going away, ignore hook. */
167 if (!try_module_get(entry->elem->owner)) {
168 status = -ECANCELED;
169 goto err_unlock;
170 }
171 /* Bump dev refs so they don't vanish while packet is out */
172 if (indev)
173 dev_hold(indev);
174 if (outdev)
175 dev_hold(outdev);
176 #ifdef CONFIG_BRIDGE_NETFILTER
177 if (skb->nf_bridge) {
178 physindev = skb->nf_bridge->physindev;
179 if (physindev)
180 dev_hold(physindev);
181 physoutdev = skb->nf_bridge->physoutdev;
182 if (physoutdev)
183 dev_hold(physoutdev);
184 }
185 #endif
186 skb_dst_force(skb);
187 afinfo->saveroute(skb, entry);
188 status = qh->outfn(entry, queuenum);
189
190 rcu_read_unlock();
191
192 if (status < 0) {
193 nf_queue_entry_release_refs(entry);
194 goto err;
195 }
196
197 return 0;
198
199 err_unlock:
200 rcu_read_unlock();
201 err:
202 kfree(entry);
203 return status;
204 }
205
206 int nf_queue(struct sk_buff *skb,
207 struct list_head *elem,
208 u_int8_t pf, unsigned int hook,
209 struct net_device *indev,
210 struct net_device *outdev,
211 int (*okfn)(struct sk_buff *),
212 unsigned int queuenum)
213 {
214 struct sk_buff *segs;
215 int err;
216 unsigned int queued;
217
218 if (!skb_is_gso(skb))
219 return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
220 queuenum);
221
222 switch (pf) {
223 case NFPROTO_IPV4:
224 skb->protocol = htons(ETH_P_IP);
225 break;
226 case NFPROTO_IPV6:
227 skb->protocol = htons(ETH_P_IPV6);
228 break;
229 }
230
231 segs = skb_gso_segment(skb, 0);
232 /* Does not use PTR_ERR to limit the number of error codes that can be
233 * returned by nf_queue. For instance, callers rely on -ECANCELED to mean
234 * 'ignore this hook'.
235 */
236 if (IS_ERR(segs))
237 return -EINVAL;
238
239 queued = 0;
240 err = 0;
241 do {
242 struct sk_buff *nskb = segs->next;
243
244 segs->next = NULL;
245 if (err == 0)
246 err = __nf_queue(segs, elem, pf, hook, indev,
247 outdev, okfn, queuenum);
248 if (err == 0)
249 queued++;
250 else
251 kfree_skb(segs);
252 segs = nskb;
253 } while (segs);
254
255 /* also free orig skb if only some segments were queued */
256 if (unlikely(err && queued))
257 err = 0;
258 if (err == 0)
259 kfree_skb(skb);
260 return err;
261 }
262
263 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
264 {
265 struct sk_buff *skb = entry->skb;
266 struct list_head *elem = &entry->elem->list;
267 const struct nf_afinfo *afinfo;
268 int err;
269
270 rcu_read_lock();
271
272 nf_queue_entry_release_refs(entry);
273
274 /* Continue traversal iff userspace said ok... */
275 if (verdict == NF_REPEAT) {
276 elem = elem->prev;
277 verdict = NF_ACCEPT;
278 }
279
280 if (verdict == NF_ACCEPT) {
281 afinfo = nf_get_afinfo(entry->pf);
282 if (!afinfo || afinfo->reroute(skb, entry) < 0)
283 verdict = NF_DROP;
284 }
285
286 if (verdict == NF_ACCEPT) {
287 next_hook:
288 verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
289 skb, entry->hook,
290 entry->indev, entry->outdev, &elem,
291 entry->okfn, INT_MIN);
292 }
293
294 switch (verdict & NF_VERDICT_MASK) {
295 case NF_ACCEPT:
296 case NF_STOP:
297 local_bh_disable();
298 entry->okfn(skb);
299 local_bh_enable();
300 break;
301 case NF_QUEUE:
302 err = __nf_queue(skb, elem, entry->pf, entry->hook,
303 entry->indev, entry->outdev, entry->okfn,
304 verdict >> NF_VERDICT_QBITS);
305 if (err < 0) {
306 if (err == -ECANCELED)
307 goto next_hook;
308 if (err == -ESRCH &&
309 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
310 goto next_hook;
311 kfree_skb(skb);
312 }
313 break;
314 case NF_STOLEN:
315 default:
316 kfree_skb(skb);
317 }
318 rcu_read_unlock();
319 kfree(entry);
320 }
321 EXPORT_SYMBOL(nf_reinject);
322
323 #ifdef CONFIG_PROC_FS
324 static void *seq_start(struct seq_file *seq, loff_t *pos)
325 {
326 if (*pos >= ARRAY_SIZE(queue_handler))
327 return NULL;
328
329 return pos;
330 }
331
332 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
333 {
334 (*pos)++;
335
336 if (*pos >= ARRAY_SIZE(queue_handler))
337 return NULL;
338
339 return pos;
340 }
341
342 static void seq_stop(struct seq_file *s, void *v)
343 {
344
345 }
346
347 static int seq_show(struct seq_file *s, void *v)
348 {
349 int ret;
350 loff_t *pos = v;
351 const struct nf_queue_handler *qh;
352
353 rcu_read_lock();
354 qh = rcu_dereference(queue_handler[*pos]);
355 if (!qh)
356 ret = seq_printf(s, "%2lld NONE\n", *pos);
357 else
358 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
359 rcu_read_unlock();
360
361 return ret;
362 }
363
364 static const struct seq_operations nfqueue_seq_ops = {
365 .start = seq_start,
366 .next = seq_next,
367 .stop = seq_stop,
368 .show = seq_show,
369 };
370
371 static int nfqueue_open(struct inode *inode, struct file *file)
372 {
373 return seq_open(file, &nfqueue_seq_ops);
374 }
375
376 static const struct file_operations nfqueue_file_ops = {
377 .owner = THIS_MODULE,
378 .open = nfqueue_open,
379 .read = seq_read,
380 .llseek = seq_lseek,
381 .release = seq_release,
382 };
383 #endif /* PROC_FS */
384
385
386 int __init netfilter_queue_init(void)
387 {
388 #ifdef CONFIG_PROC_FS
389 if (!proc_create("nf_queue", S_IRUGO,
390 proc_net_netfilter, &nfqueue_file_ops))
391 return -1;
392 #endif
393 return 0;
394 }
395