ipv4: fix nexthop attlen check in fib_nh_match
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / inet_fragment.c
CommitLineData
7eb95156
PE
1/*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
321a3a99 19#include <linux/random.h>
1e4b8287
PE
20#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
5a0e3ad6 22#include <linux/slab.h>
7eb95156 23
5a3da1fe 24#include <net/sock.h>
7eb95156 25#include <net/inet_frag.h>
be991971
HFS
26#include <net/inet_ecn.h>
27
28/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
29 * Value : 0xff if frame should be dropped.
30 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
31 */
32const u8 ip_frag_ecn_table[16] = {
33 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
34 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
35 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
36 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
37
38 /* invalid combinations : drop frame */
39 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
40 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
41 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
42 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
43 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
44 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
46};
47EXPORT_SYMBOL(ip_frag_ecn_table);
7eb95156 48
321a3a99
PE
49static void inet_frag_secret_rebuild(unsigned long dummy)
50{
51 struct inet_frags *f = (struct inet_frags *)dummy;
52 unsigned long now = jiffies;
53 int i;
54
19952cc4 55 /* Per bucket lock NOT needed here, due to write lock protection */
321a3a99 56 write_lock(&f->lock);
19952cc4 57
321a3a99
PE
58 get_random_bytes(&f->rnd, sizeof(u32));
59 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
19952cc4 60 struct inet_frag_bucket *hb;
321a3a99 61 struct inet_frag_queue *q;
b67bfe0d 62 struct hlist_node *n;
321a3a99 63
19952cc4
JDB
64 hb = &f->hash[i];
65 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
321a3a99
PE
66 unsigned int hval = f->hashfn(q);
67
68 if (hval != i) {
19952cc4
JDB
69 struct inet_frag_bucket *hb_dest;
70
321a3a99
PE
71 hlist_del(&q->list);
72
73 /* Relink to new hash chain. */
19952cc4
JDB
74 hb_dest = &f->hash[hval];
75 hlist_add_head(&q->list, &hb_dest->chain);
321a3a99
PE
76 }
77 }
78 }
79 write_unlock(&f->lock);
80
3b4bc4a2 81 mod_timer(&f->secret_timer, now + f->secret_interval);
321a3a99
PE
82}
83
7eb95156
PE
84void inet_frags_init(struct inet_frags *f)
85{
86 int i;
87
19952cc4
JDB
88 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
89 struct inet_frag_bucket *hb = &f->hash[i];
7eb95156 90
19952cc4
JDB
91 spin_lock_init(&hb->chain_lock);
92 INIT_HLIST_HEAD(&hb->chain);
93 }
7eb95156
PE
94 rwlock_init(&f->lock);
95
96 f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
97 (jiffies ^ (jiffies >> 6)));
98
b24b8a24
PE
99 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
100 (unsigned long)f);
3b4bc4a2 101 f->secret_timer.expires = jiffies + f->secret_interval;
321a3a99 102 add_timer(&f->secret_timer);
7eb95156
PE
103}
104EXPORT_SYMBOL(inet_frags_init);
105
e5a2bb84
PE
106void inet_frags_init_net(struct netns_frags *nf)
107{
108 nf->nqueues = 0;
d433673e 109 init_frag_mem_limit(nf);
3140c25c 110 INIT_LIST_HEAD(&nf->lru_list);
3ef0eb0d 111 spin_lock_init(&nf->lru_lock);
e5a2bb84
PE
112}
113EXPORT_SYMBOL(inet_frags_init_net);
114
7eb95156
PE
115void inet_frags_fini(struct inet_frags *f)
116{
321a3a99 117 del_timer(&f->secret_timer);
7eb95156
PE
118}
119EXPORT_SYMBOL(inet_frags_fini);
277e650d 120
81566e83
PE
121void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
122{
123 nf->low_thresh = 0;
e8e16b70
DM
124
125 local_bh_disable();
6b102865 126 inet_frag_evictor(nf, f, true);
e8e16b70 127 local_bh_enable();
6d7b857d
JDB
128
129 percpu_counter_destroy(&nf->mem);
81566e83
PE
130}
131EXPORT_SYMBOL(inet_frags_exit_net);
132
277e650d
PE
133static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
134{
19952cc4
JDB
135 struct inet_frag_bucket *hb;
136 unsigned int hash;
137
138 read_lock(&f->lock);
139 hash = f->hashfn(fq);
140 hb = &f->hash[hash];
141
142 spin_lock(&hb->chain_lock);
277e650d 143 hlist_del(&fq->list);
19952cc4
JDB
144 spin_unlock(&hb->chain_lock);
145
146 read_unlock(&f->lock);
3ef0eb0d 147 inet_frag_lru_del(fq);
277e650d
PE
148}
149
150void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
151{
152 if (del_timer(&fq->timer))
153 atomic_dec(&fq->refcnt);
154
bc578a54 155 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
277e650d
PE
156 fq_unlink(fq, f);
157 atomic_dec(&fq->refcnt);
bc578a54 158 fq->last_in |= INET_FRAG_COMPLETE;
277e650d
PE
159 }
160}
277e650d 161EXPORT_SYMBOL(inet_frag_kill);
1e4b8287 162
6ddc0822 163static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
d433673e 164 struct sk_buff *skb)
1e4b8287 165{
1e4b8287
PE
166 if (f->skb_free)
167 f->skb_free(skb);
168 kfree_skb(skb);
169}
170
171void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
172 int *work)
173{
174 struct sk_buff *fp;
6ddc0822 175 struct netns_frags *nf;
d433673e 176 unsigned int sum, sum_truesize = 0;
1e4b8287 177
547b792c
IJ
178 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
179 WARN_ON(del_timer(&q->timer) != 0);
1e4b8287
PE
180
181 /* Release all fragment data. */
182 fp = q->fragments;
6ddc0822 183 nf = q->net;
1e4b8287
PE
184 while (fp) {
185 struct sk_buff *xp = fp->next;
186
d433673e
JDB
187 sum_truesize += fp->truesize;
188 frag_kfree_skb(nf, f, fp);
1e4b8287
PE
189 fp = xp;
190 }
d433673e 191 sum = sum_truesize + f->qsize;
1e4b8287 192 if (work)
d433673e
JDB
193 *work -= sum;
194 sub_frag_mem_limit(q, sum);
1e4b8287 195
c9547709
PE
196 if (f->destructor)
197 f->destructor(q);
198 kfree(q);
1e4b8287
PE
199
200}
201EXPORT_SYMBOL(inet_frag_destroy);
8e7999c4 202
6b102865 203int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
8e7999c4
PE
204{
205 struct inet_frag_queue *q;
206 int work, evicted = 0;
207
6b102865 208 if (!force) {
d433673e 209 if (frag_mem_limit(nf) <= nf->high_thresh)
6b102865
AW
210 return 0;
211 }
212
d433673e 213 work = frag_mem_limit(nf) - nf->low_thresh;
458b05e1 214 while (work > 0 || force) {
3ef0eb0d
JDB
215 spin_lock(&nf->lru_lock);
216
3140c25c 217 if (list_empty(&nf->lru_list)) {
3ef0eb0d 218 spin_unlock(&nf->lru_lock);
8e7999c4
PE
219 break;
220 }
221
3140c25c 222 q = list_first_entry(&nf->lru_list,
8e7999c4
PE
223 struct inet_frag_queue, lru_list);
224 atomic_inc(&q->refcnt);
68399ac3
JDB
225 /* Remove q from list to avoid several CPUs grabbing it */
226 list_del_init(&q->lru_list);
227
3ef0eb0d 228 spin_unlock(&nf->lru_lock);
8e7999c4
PE
229
230 spin_lock(&q->lock);
bc578a54 231 if (!(q->last_in & INET_FRAG_COMPLETE))
8e7999c4
PE
232 inet_frag_kill(q, f);
233 spin_unlock(&q->lock);
234
235 if (atomic_dec_and_test(&q->refcnt))
236 inet_frag_destroy(q, f, &work);
237 evicted++;
238 }
239
240 return evicted;
241}
242EXPORT_SYMBOL(inet_frag_evictor);
2588fe1d 243
ac18e750
PE
244static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
245 struct inet_frag_queue *qp_in, struct inet_frags *f,
9a375803 246 void *arg)
2588fe1d 247{
19952cc4 248 struct inet_frag_bucket *hb;
2588fe1d
PE
249 struct inet_frag_queue *qp;
250#ifdef CONFIG_SMP
2588fe1d 251#endif
9a375803 252 unsigned int hash;
2588fe1d 253
19952cc4 254 read_lock(&f->lock); /* Protects against hash rebuild */
9a375803
PE
255 /*
256 * While we stayed w/o the lock other CPU could update
257 * the rnd seed, so we need to re-calculate the hash
258 * chain. Fortunatelly the qp_in can be used to get one.
259 */
260 hash = f->hashfn(qp_in);
19952cc4
JDB
261 hb = &f->hash[hash];
262 spin_lock(&hb->chain_lock);
263
2588fe1d
PE
264#ifdef CONFIG_SMP
265 /* With SMP race we have to recheck hash table, because
266 * such entry could be created on other cpu, while we
19952cc4 267 * released the hash bucket lock.
2588fe1d 268 */
19952cc4 269 hlist_for_each_entry(qp, &hb->chain, list) {
ac18e750 270 if (qp->net == nf && f->match(qp, arg)) {
2588fe1d 271 atomic_inc(&qp->refcnt);
19952cc4
JDB
272 spin_unlock(&hb->chain_lock);
273 read_unlock(&f->lock);
bc578a54 274 qp_in->last_in |= INET_FRAG_COMPLETE;
2588fe1d
PE
275 inet_frag_put(qp_in, f);
276 return qp;
277 }
278 }
279#endif
280 qp = qp_in;
b2fd5321 281 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
2588fe1d
PE
282 atomic_inc(&qp->refcnt);
283
284 atomic_inc(&qp->refcnt);
19952cc4 285 hlist_add_head(&qp->list, &hb->chain);
f7a2e253 286 inet_frag_lru_add(nf, qp);
19952cc4
JDB
287 spin_unlock(&hb->chain_lock);
288 read_unlock(&f->lock);
f7a2e253 289
2588fe1d
PE
290 return qp;
291}
e521db9d 292
ac18e750
PE
293static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
294 struct inet_frags *f, void *arg)
e521db9d
PE
295{
296 struct inet_frag_queue *q;
297
298 q = kzalloc(f->qsize, GFP_ATOMIC);
299 if (q == NULL)
300 return NULL;
301
54db0cc2 302 q->net = nf;
c6fda282 303 f->constructor(q, arg);
d433673e
JDB
304 add_frag_mem_limit(q, f->qsize);
305
e521db9d
PE
306 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
307 spin_lock_init(&q->lock);
308 atomic_set(&q->refcnt, 1);
b56141ab 309 INIT_LIST_HEAD(&q->lru_list);
e521db9d
PE
310
311 return q;
312}
c6fda282 313
ac18e750 314static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
9a375803 315 struct inet_frags *f, void *arg)
c6fda282
PE
316{
317 struct inet_frag_queue *q;
318
ac18e750 319 q = inet_frag_alloc(nf, f, arg);
c6fda282
PE
320 if (q == NULL)
321 return NULL;
322
9a375803 323 return inet_frag_intern(nf, q, f, arg);
c6fda282 324}
abd6523d 325
ac18e750
PE
326struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
327 struct inet_frags *f, void *key, unsigned int hash)
56bca31f 328 __releases(&f->lock)
abd6523d 329{
19952cc4 330 struct inet_frag_bucket *hb;
abd6523d 331 struct inet_frag_queue *q;
5a3da1fe 332 int depth = 0;
abd6523d 333
19952cc4
JDB
334 hb = &f->hash[hash];
335
336 spin_lock(&hb->chain_lock);
337 hlist_for_each_entry(q, &hb->chain, list) {
ac18e750 338 if (q->net == nf && f->match(q, key)) {
abd6523d 339 atomic_inc(&q->refcnt);
19952cc4 340 spin_unlock(&hb->chain_lock);
abd6523d
PE
341 read_unlock(&f->lock);
342 return q;
343 }
5a3da1fe 344 depth++;
abd6523d 345 }
19952cc4 346 spin_unlock(&hb->chain_lock);
abd6523d
PE
347 read_unlock(&f->lock);
348
5a3da1fe
HFS
349 if (depth <= INETFRAGS_MAXDEPTH)
350 return inet_frag_create(nf, f, key);
351 else
352 return ERR_PTR(-ENOBUFS);
abd6523d
PE
353}
354EXPORT_SYMBOL(inet_frag_find);
5a3da1fe
HFS
355
356void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
357 const char *prefix)
358{
359 static const char msg[] = "inet_frag_find: Fragment hash bucket"
360 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
361 ". Dropping fragment.\n";
362
363 if (PTR_ERR(q) == -ENOBUFS)
364 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
365}
366EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);