net: mark read-only arrays as const
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / ipv4 / fib_trie.c
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
9 *
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
12 *
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 *
15 * This work is based on the LPC-trie which is originally descibed in:
16 *
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.nada.kth.se/~snilsson/public/papers/dyntrie2/
20 *
21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 *
25 *
26 * Code from fib_hash has been reused which includes the following header:
27 *
28 *
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
32 *
33 * IPv4 FIB: lookup engine and maintenance routines.
34 *
35 *
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
37 *
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
42 *
43 * Substantial contributions to this work comes from:
44 *
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
49 */
50
51 #define VERSION "0.408"
52
53 #include <asm/uaccess.h>
54 #include <asm/system.h>
55 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/kernel.h>
58 #include <linux/mm.h>
59 #include <linux/string.h>
60 #include <linux/socket.h>
61 #include <linux/sockios.h>
62 #include <linux/errno.h>
63 #include <linux/in.h>
64 #include <linux/inet.h>
65 #include <linux/inetdevice.h>
66 #include <linux/netdevice.h>
67 #include <linux/if_arp.h>
68 #include <linux/proc_fs.h>
69 #include <linux/rcupdate.h>
70 #include <linux/skbuff.h>
71 #include <linux/netlink.h>
72 #include <linux/init.h>
73 #include <linux/list.h>
74 #include <net/net_namespace.h>
75 #include <net/ip.h>
76 #include <net/protocol.h>
77 #include <net/route.h>
78 #include <net/tcp.h>
79 #include <net/sock.h>
80 #include <net/ip_fib.h>
81 #include "fib_lookup.h"
82
83 #define MAX_STAT_DEPTH 32
84
85 #define KEYLENGTH (8*sizeof(t_key))
86
87 typedef unsigned int t_key;
88
89 #define T_TNODE 0
90 #define T_LEAF 1
91 #define NODE_TYPE_MASK 0x1UL
92 #define NODE_TYPE(node) ((node)->parent & NODE_TYPE_MASK)
93
94 #define IS_TNODE(n) (!(n->parent & T_LEAF))
95 #define IS_LEAF(n) (n->parent & T_LEAF)
96
97 struct node {
98 unsigned long parent;
99 t_key key;
100 };
101
102 struct leaf {
103 unsigned long parent;
104 t_key key;
105 struct hlist_head list;
106 struct rcu_head rcu;
107 };
108
109 struct leaf_info {
110 struct hlist_node hlist;
111 struct rcu_head rcu;
112 int plen;
113 struct list_head falh;
114 };
115
116 struct tnode {
117 unsigned long parent;
118 t_key key;
119 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
120 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
121 unsigned int full_children; /* KEYLENGTH bits needed */
122 unsigned int empty_children; /* KEYLENGTH bits needed */
123 union {
124 struct rcu_head rcu;
125 struct work_struct work;
126 struct tnode *tnode_free;
127 };
128 struct node *child[0];
129 };
130
131 #ifdef CONFIG_IP_FIB_TRIE_STATS
132 struct trie_use_stats {
133 unsigned int gets;
134 unsigned int backtrack;
135 unsigned int semantic_match_passed;
136 unsigned int semantic_match_miss;
137 unsigned int null_node_hit;
138 unsigned int resize_node_skipped;
139 };
140 #endif
141
142 struct trie_stat {
143 unsigned int totdepth;
144 unsigned int maxdepth;
145 unsigned int tnodes;
146 unsigned int leaves;
147 unsigned int nullpointers;
148 unsigned int prefixes;
149 unsigned int nodesizes[MAX_STAT_DEPTH];
150 };
151
152 struct trie {
153 struct node *trie;
154 #ifdef CONFIG_IP_FIB_TRIE_STATS
155 struct trie_use_stats stats;
156 #endif
157 };
158
159 static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
160 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
161 int wasfull);
162 static struct node *resize(struct trie *t, struct tnode *tn);
163 static struct tnode *inflate(struct trie *t, struct tnode *tn);
164 static struct tnode *halve(struct trie *t, struct tnode *tn);
165 /* tnodes to free after resize(); protected by RTNL */
166 static struct tnode *tnode_free_head;
167 static size_t tnode_free_size;
168
169 /*
170 * synchronize_rcu after call_rcu for that many pages; it should be especially
171 * useful before resizing the root node with PREEMPT_NONE configs; the value was
172 * obtained experimentally, aiming to avoid visible slowdown.
173 */
174 static const int sync_pages = 128;
175
176 static struct kmem_cache *fn_alias_kmem __read_mostly;
177 static struct kmem_cache *trie_leaf_kmem __read_mostly;
178
179 static inline struct tnode *node_parent(struct node *node)
180 {
181 return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
182 }
183
184 static inline struct tnode *node_parent_rcu(struct node *node)
185 {
186 struct tnode *ret = node_parent(node);
187
188 return rcu_dereference(ret);
189 }
190
191 /* Same as rcu_assign_pointer
192 * but that macro() assumes that value is a pointer.
193 */
194 static inline void node_set_parent(struct node *node, struct tnode *ptr)
195 {
196 smp_wmb();
197 node->parent = (unsigned long)ptr | NODE_TYPE(node);
198 }
199
200 static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
201 {
202 BUG_ON(i >= 1U << tn->bits);
203
204 return tn->child[i];
205 }
206
207 static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
208 {
209 struct node *ret = tnode_get_child(tn, i);
210
211 return rcu_dereference(ret);
212 }
213
214 static inline int tnode_child_length(const struct tnode *tn)
215 {
216 return 1 << tn->bits;
217 }
218
219 static inline t_key mask_pfx(t_key k, unsigned short l)
220 {
221 return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
222 }
223
224 static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
225 {
226 if (offset < KEYLENGTH)
227 return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
228 else
229 return 0;
230 }
231
232 static inline int tkey_equals(t_key a, t_key b)
233 {
234 return a == b;
235 }
236
237 static inline int tkey_sub_equals(t_key a, int offset, int bits, t_key b)
238 {
239 if (bits == 0 || offset >= KEYLENGTH)
240 return 1;
241 bits = bits > KEYLENGTH ? KEYLENGTH : bits;
242 return ((a ^ b) << offset) >> (KEYLENGTH - bits) == 0;
243 }
244
245 static inline int tkey_mismatch(t_key a, int offset, t_key b)
246 {
247 t_key diff = a ^ b;
248 int i = offset;
249
250 if (!diff)
251 return 0;
252 while ((diff << i) >> (KEYLENGTH-1) == 0)
253 i++;
254 return i;
255 }
256
257 /*
258 To understand this stuff, an understanding of keys and all their bits is
259 necessary. Every node in the trie has a key associated with it, but not
260 all of the bits in that key are significant.
261
262 Consider a node 'n' and its parent 'tp'.
263
264 If n is a leaf, every bit in its key is significant. Its presence is
265 necessitated by path compression, since during a tree traversal (when
266 searching for a leaf - unless we are doing an insertion) we will completely
267 ignore all skipped bits we encounter. Thus we need to verify, at the end of
268 a potentially successful search, that we have indeed been walking the
269 correct key path.
270
271 Note that we can never "miss" the correct key in the tree if present by
272 following the wrong path. Path compression ensures that segments of the key
273 that are the same for all keys with a given prefix are skipped, but the
274 skipped part *is* identical for each node in the subtrie below the skipped
275 bit! trie_insert() in this implementation takes care of that - note the
276 call to tkey_sub_equals() in trie_insert().
277
278 if n is an internal node - a 'tnode' here, the various parts of its key
279 have many different meanings.
280
281 Example:
282 _________________________________________________________________
283 | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
284 -----------------------------------------------------------------
285 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
286
287 _________________________________________________________________
288 | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
289 -----------------------------------------------------------------
290 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
291
292 tp->pos = 7
293 tp->bits = 3
294 n->pos = 15
295 n->bits = 4
296
297 First, let's just ignore the bits that come before the parent tp, that is
298 the bits from 0 to (tp->pos-1). They are *known* but at this point we do
299 not use them for anything.
300
301 The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
302 index into the parent's child array. That is, they will be used to find
303 'n' among tp's children.
304
305 The bits from (tp->pos + tp->bits) to (n->pos - 1) - "S" - are skipped bits
306 for the node n.
307
308 All the bits we have seen so far are significant to the node n. The rest
309 of the bits are really not needed or indeed known in n->key.
310
311 The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
312 n's child array, and will of course be different for each child.
313
314
315 The rest of the bits, from (n->pos + n->bits) onward, are completely unknown
316 at this point.
317
318 */
319
320 static inline void check_tnode(const struct tnode *tn)
321 {
322 WARN_ON(tn && tn->pos+tn->bits > 32);
323 }
324
325 static const int halve_threshold = 25;
326 static const int inflate_threshold = 50;
327 static const int halve_threshold_root = 15;
328 static const int inflate_threshold_root = 25;
329
330 static int inflate_threshold_root_fix;
331 #define INFLATE_FIX_MAX 10 /* a comment in resize() */
332
333 static void __alias_free_mem(struct rcu_head *head)
334 {
335 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
336 kmem_cache_free(fn_alias_kmem, fa);
337 }
338
339 static inline void alias_free_mem_rcu(struct fib_alias *fa)
340 {
341 call_rcu(&fa->rcu, __alias_free_mem);
342 }
343
344 static void __leaf_free_rcu(struct rcu_head *head)
345 {
346 struct leaf *l = container_of(head, struct leaf, rcu);
347 kmem_cache_free(trie_leaf_kmem, l);
348 }
349
350 static inline void free_leaf(struct leaf *l)
351 {
352 call_rcu_bh(&l->rcu, __leaf_free_rcu);
353 }
354
355 static void __leaf_info_free_rcu(struct rcu_head *head)
356 {
357 kfree(container_of(head, struct leaf_info, rcu));
358 }
359
360 static inline void free_leaf_info(struct leaf_info *leaf)
361 {
362 call_rcu(&leaf->rcu, __leaf_info_free_rcu);
363 }
364
365 static struct tnode *tnode_alloc(size_t size)
366 {
367 if (size <= PAGE_SIZE)
368 return kzalloc(size, GFP_KERNEL);
369 else
370 return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
371 }
372
373 static void __tnode_vfree(struct work_struct *arg)
374 {
375 struct tnode *tn = container_of(arg, struct tnode, work);
376 vfree(tn);
377 }
378
379 static void __tnode_free_rcu(struct rcu_head *head)
380 {
381 struct tnode *tn = container_of(head, struct tnode, rcu);
382 size_t size = sizeof(struct tnode) +
383 (sizeof(struct node *) << tn->bits);
384
385 if (size <= PAGE_SIZE)
386 kfree(tn);
387 else {
388 INIT_WORK(&tn->work, __tnode_vfree);
389 schedule_work(&tn->work);
390 }
391 }
392
393 static inline void tnode_free(struct tnode *tn)
394 {
395 if (IS_LEAF(tn))
396 free_leaf((struct leaf *) tn);
397 else
398 call_rcu(&tn->rcu, __tnode_free_rcu);
399 }
400
401 static void tnode_free_safe(struct tnode *tn)
402 {
403 BUG_ON(IS_LEAF(tn));
404 tn->tnode_free = tnode_free_head;
405 tnode_free_head = tn;
406 tnode_free_size += sizeof(struct tnode) +
407 (sizeof(struct node *) << tn->bits);
408 }
409
410 static void tnode_free_flush(void)
411 {
412 struct tnode *tn;
413
414 while ((tn = tnode_free_head)) {
415 tnode_free_head = tn->tnode_free;
416 tn->tnode_free = NULL;
417 tnode_free(tn);
418 }
419
420 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
421 tnode_free_size = 0;
422 synchronize_rcu();
423 }
424 }
425
426 static struct leaf *leaf_new(void)
427 {
428 struct leaf *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
429 if (l) {
430 l->parent = T_LEAF;
431 INIT_HLIST_HEAD(&l->list);
432 }
433 return l;
434 }
435
436 static struct leaf_info *leaf_info_new(int plen)
437 {
438 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
439 if (li) {
440 li->plen = plen;
441 INIT_LIST_HEAD(&li->falh);
442 }
443 return li;
444 }
445
446 static struct tnode *tnode_new(t_key key, int pos, int bits)
447 {
448 size_t sz = sizeof(struct tnode) + (sizeof(struct node *) << bits);
449 struct tnode *tn = tnode_alloc(sz);
450
451 if (tn) {
452 tn->parent = T_TNODE;
453 tn->pos = pos;
454 tn->bits = bits;
455 tn->key = key;
456 tn->full_children = 0;
457 tn->empty_children = 1<<bits;
458 }
459
460 pr_debug("AT %p s=%u %lu\n", tn, (unsigned int) sizeof(struct tnode),
461 (unsigned long) (sizeof(struct node) << bits));
462 return tn;
463 }
464
465 /*
466 * Check whether a tnode 'n' is "full", i.e. it is an internal node
467 * and no bits are skipped. See discussion in dyntree paper p. 6
468 */
469
470 static inline int tnode_full(const struct tnode *tn, const struct node *n)
471 {
472 if (n == NULL || IS_LEAF(n))
473 return 0;
474
475 return ((struct tnode *) n)->pos == tn->pos + tn->bits;
476 }
477
478 static inline void put_child(struct trie *t, struct tnode *tn, int i,
479 struct node *n)
480 {
481 tnode_put_child_reorg(tn, i, n, -1);
482 }
483
484 /*
485 * Add a child at position i overwriting the old value.
486 * Update the value of full_children and empty_children.
487 */
488
489 static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
490 int wasfull)
491 {
492 struct node *chi = tn->child[i];
493 int isfull;
494
495 BUG_ON(i >= 1<<tn->bits);
496
497 /* update emptyChildren */
498 if (n == NULL && chi != NULL)
499 tn->empty_children++;
500 else if (n != NULL && chi == NULL)
501 tn->empty_children--;
502
503 /* update fullChildren */
504 if (wasfull == -1)
505 wasfull = tnode_full(tn, chi);
506
507 isfull = tnode_full(tn, n);
508 if (wasfull && !isfull)
509 tn->full_children--;
510 else if (!wasfull && isfull)
511 tn->full_children++;
512
513 if (n)
514 node_set_parent(n, tn);
515
516 rcu_assign_pointer(tn->child[i], n);
517 }
518
519 static struct node *resize(struct trie *t, struct tnode *tn)
520 {
521 int i;
522 int err = 0;
523 struct tnode *old_tn;
524 int inflate_threshold_use;
525 int halve_threshold_use;
526 int max_resize;
527
528 if (!tn)
529 return NULL;
530
531 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
532 tn, inflate_threshold, halve_threshold);
533
534 /* No children */
535 if (tn->empty_children == tnode_child_length(tn)) {
536 tnode_free_safe(tn);
537 return NULL;
538 }
539 /* One child */
540 if (tn->empty_children == tnode_child_length(tn) - 1)
541 for (i = 0; i < tnode_child_length(tn); i++) {
542 struct node *n;
543
544 n = tn->child[i];
545 if (!n)
546 continue;
547
548 /* compress one level */
549 node_set_parent(n, NULL);
550 tnode_free_safe(tn);
551 return n;
552 }
553 /*
554 * Double as long as the resulting node has a number of
555 * nonempty nodes that are above the threshold.
556 */
557
558 /*
559 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
560 * the Helsinki University of Technology and Matti Tikkanen of Nokia
561 * Telecommunications, page 6:
562 * "A node is doubled if the ratio of non-empty children to all
563 * children in the *doubled* node is at least 'high'."
564 *
565 * 'high' in this instance is the variable 'inflate_threshold'. It
566 * is expressed as a percentage, so we multiply it with
567 * tnode_child_length() and instead of multiplying by 2 (since the
568 * child array will be doubled by inflate()) and multiplying
569 * the left-hand side by 100 (to handle the percentage thing) we
570 * multiply the left-hand side by 50.
571 *
572 * The left-hand side may look a bit weird: tnode_child_length(tn)
573 * - tn->empty_children is of course the number of non-null children
574 * in the current node. tn->full_children is the number of "full"
575 * children, that is non-null tnodes with a skip value of 0.
576 * All of those will be doubled in the resulting inflated tnode, so
577 * we just count them one extra time here.
578 *
579 * A clearer way to write this would be:
580 *
581 * to_be_doubled = tn->full_children;
582 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
583 * tn->full_children;
584 *
585 * new_child_length = tnode_child_length(tn) * 2;
586 *
587 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
588 * new_child_length;
589 * if (new_fill_factor >= inflate_threshold)
590 *
591 * ...and so on, tho it would mess up the while () loop.
592 *
593 * anyway,
594 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
595 * inflate_threshold
596 *
597 * avoid a division:
598 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
599 * inflate_threshold * new_child_length
600 *
601 * expand not_to_be_doubled and to_be_doubled, and shorten:
602 * 100 * (tnode_child_length(tn) - tn->empty_children +
603 * tn->full_children) >= inflate_threshold * new_child_length
604 *
605 * expand new_child_length:
606 * 100 * (tnode_child_length(tn) - tn->empty_children +
607 * tn->full_children) >=
608 * inflate_threshold * tnode_child_length(tn) * 2
609 *
610 * shorten again:
611 * 50 * (tn->full_children + tnode_child_length(tn) -
612 * tn->empty_children) >= inflate_threshold *
613 * tnode_child_length(tn)
614 *
615 */
616
617 check_tnode(tn);
618
619 /* Keep root node larger */
620
621 if (!tn->parent)
622 inflate_threshold_use = inflate_threshold_root +
623 inflate_threshold_root_fix;
624 else
625 inflate_threshold_use = inflate_threshold;
626
627 err = 0;
628 max_resize = 10;
629 while ((tn->full_children > 0 && max_resize-- &&
630 50 * (tn->full_children + tnode_child_length(tn)
631 - tn->empty_children)
632 >= inflate_threshold_use * tnode_child_length(tn))) {
633
634 old_tn = tn;
635 tn = inflate(t, tn);
636
637 if (IS_ERR(tn)) {
638 tn = old_tn;
639 #ifdef CONFIG_IP_FIB_TRIE_STATS
640 t->stats.resize_node_skipped++;
641 #endif
642 break;
643 }
644 }
645
646 if (max_resize < 0) {
647 if (!tn->parent) {
648 /*
649 * It was observed that during large updates even
650 * inflate_threshold_root = 35 might be needed to avoid
651 * this warning; but it should be temporary, so let's
652 * try to handle this automatically.
653 */
654 if (inflate_threshold_root_fix < INFLATE_FIX_MAX)
655 inflate_threshold_root_fix++;
656 else
657 pr_warning("Fix inflate_threshold_root."
658 " Now=%d size=%d bits fix=%d\n",
659 inflate_threshold_root, tn->bits,
660 inflate_threshold_root_fix);
661 } else {
662 pr_warning("Fix inflate_threshold."
663 " Now=%d size=%d bits\n",
664 inflate_threshold, tn->bits);
665 }
666 } else if (max_resize > 3 && !tn->parent && inflate_threshold_root_fix)
667 inflate_threshold_root_fix--;
668
669 check_tnode(tn);
670
671 /*
672 * Halve as long as the number of empty children in this
673 * node is above threshold.
674 */
675
676
677 /* Keep root node larger */
678
679 if (!tn->parent)
680 halve_threshold_use = halve_threshold_root;
681 else
682 halve_threshold_use = halve_threshold;
683
684 err = 0;
685 max_resize = 10;
686 while (tn->bits > 1 && max_resize-- &&
687 100 * (tnode_child_length(tn) - tn->empty_children) <
688 halve_threshold_use * tnode_child_length(tn)) {
689
690 old_tn = tn;
691 tn = halve(t, tn);
692 if (IS_ERR(tn)) {
693 tn = old_tn;
694 #ifdef CONFIG_IP_FIB_TRIE_STATS
695 t->stats.resize_node_skipped++;
696 #endif
697 break;
698 }
699 }
700
701 if (max_resize < 0) {
702 if (!tn->parent)
703 pr_warning("Fix halve_threshold_root."
704 " Now=%d size=%d bits\n",
705 halve_threshold_root, tn->bits);
706 else
707 pr_warning("Fix halve_threshold."
708 " Now=%d size=%d bits\n",
709 halve_threshold, tn->bits);
710 }
711
712 /* Only one child remains */
713 if (tn->empty_children == tnode_child_length(tn) - 1)
714 for (i = 0; i < tnode_child_length(tn); i++) {
715 struct node *n;
716
717 n = tn->child[i];
718 if (!n)
719 continue;
720
721 /* compress one level */
722
723 node_set_parent(n, NULL);
724 tnode_free_safe(tn);
725 return n;
726 }
727
728 return (struct node *) tn;
729 }
730
731 static struct tnode *inflate(struct trie *t, struct tnode *tn)
732 {
733 struct tnode *oldtnode = tn;
734 int olen = tnode_child_length(tn);
735 int i;
736
737 pr_debug("In inflate\n");
738
739 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits + 1);
740
741 if (!tn)
742 return ERR_PTR(-ENOMEM);
743
744 /*
745 * Preallocate and store tnodes before the actual work so we
746 * don't get into an inconsistent state if memory allocation
747 * fails. In case of failure we return the oldnode and inflate
748 * of tnode is ignored.
749 */
750
751 for (i = 0; i < olen; i++) {
752 struct tnode *inode;
753
754 inode = (struct tnode *) tnode_get_child(oldtnode, i);
755 if (inode &&
756 IS_TNODE(inode) &&
757 inode->pos == oldtnode->pos + oldtnode->bits &&
758 inode->bits > 1) {
759 struct tnode *left, *right;
760 t_key m = ~0U << (KEYLENGTH - 1) >> inode->pos;
761
762 left = tnode_new(inode->key&(~m), inode->pos + 1,
763 inode->bits - 1);
764 if (!left)
765 goto nomem;
766
767 right = tnode_new(inode->key|m, inode->pos + 1,
768 inode->bits - 1);
769
770 if (!right) {
771 tnode_free(left);
772 goto nomem;
773 }
774
775 put_child(t, tn, 2*i, (struct node *) left);
776 put_child(t, tn, 2*i+1, (struct node *) right);
777 }
778 }
779
780 for (i = 0; i < olen; i++) {
781 struct tnode *inode;
782 struct node *node = tnode_get_child(oldtnode, i);
783 struct tnode *left, *right;
784 int size, j;
785
786 /* An empty child */
787 if (node == NULL)
788 continue;
789
790 /* A leaf or an internal node with skipped bits */
791
792 if (IS_LEAF(node) || ((struct tnode *) node)->pos >
793 tn->pos + tn->bits - 1) {
794 if (tkey_extract_bits(node->key,
795 oldtnode->pos + oldtnode->bits,
796 1) == 0)
797 put_child(t, tn, 2*i, node);
798 else
799 put_child(t, tn, 2*i+1, node);
800 continue;
801 }
802
803 /* An internal node with two children */
804 inode = (struct tnode *) node;
805
806 if (inode->bits == 1) {
807 put_child(t, tn, 2*i, inode->child[0]);
808 put_child(t, tn, 2*i+1, inode->child[1]);
809
810 tnode_free_safe(inode);
811 continue;
812 }
813
814 /* An internal node with more than two children */
815
816 /* We will replace this node 'inode' with two new
817 * ones, 'left' and 'right', each with half of the
818 * original children. The two new nodes will have
819 * a position one bit further down the key and this
820 * means that the "significant" part of their keys
821 * (see the discussion near the top of this file)
822 * will differ by one bit, which will be "0" in
823 * left's key and "1" in right's key. Since we are
824 * moving the key position by one step, the bit that
825 * we are moving away from - the bit at position
826 * (inode->pos) - is the one that will differ between
827 * left and right. So... we synthesize that bit in the
828 * two new keys.
829 * The mask 'm' below will be a single "one" bit at
830 * the position (inode->pos)
831 */
832
833 /* Use the old key, but set the new significant
834 * bit to zero.
835 */
836
837 left = (struct tnode *) tnode_get_child(tn, 2*i);
838 put_child(t, tn, 2*i, NULL);
839
840 BUG_ON(!left);
841
842 right = (struct tnode *) tnode_get_child(tn, 2*i+1);
843 put_child(t, tn, 2*i+1, NULL);
844
845 BUG_ON(!right);
846
847 size = tnode_child_length(left);
848 for (j = 0; j < size; j++) {
849 put_child(t, left, j, inode->child[j]);
850 put_child(t, right, j, inode->child[j + size]);
851 }
852 put_child(t, tn, 2*i, resize(t, left));
853 put_child(t, tn, 2*i+1, resize(t, right));
854
855 tnode_free_safe(inode);
856 }
857 tnode_free_safe(oldtnode);
858 return tn;
859 nomem:
860 {
861 int size = tnode_child_length(tn);
862 int j;
863
864 for (j = 0; j < size; j++)
865 if (tn->child[j])
866 tnode_free((struct tnode *)tn->child[j]);
867
868 tnode_free(tn);
869
870 return ERR_PTR(-ENOMEM);
871 }
872 }
873
874 static struct tnode *halve(struct trie *t, struct tnode *tn)
875 {
876 struct tnode *oldtnode = tn;
877 struct node *left, *right;
878 int i;
879 int olen = tnode_child_length(tn);
880
881 pr_debug("In halve\n");
882
883 tn = tnode_new(oldtnode->key, oldtnode->pos, oldtnode->bits - 1);
884
885 if (!tn)
886 return ERR_PTR(-ENOMEM);
887
888 /*
889 * Preallocate and store tnodes before the actual work so we
890 * don't get into an inconsistent state if memory allocation
891 * fails. In case of failure we return the oldnode and halve
892 * of tnode is ignored.
893 */
894
895 for (i = 0; i < olen; i += 2) {
896 left = tnode_get_child(oldtnode, i);
897 right = tnode_get_child(oldtnode, i+1);
898
899 /* Two nonempty children */
900 if (left && right) {
901 struct tnode *newn;
902
903 newn = tnode_new(left->key, tn->pos + tn->bits, 1);
904
905 if (!newn)
906 goto nomem;
907
908 put_child(t, tn, i/2, (struct node *)newn);
909 }
910
911 }
912
913 for (i = 0; i < olen; i += 2) {
914 struct tnode *newBinNode;
915
916 left = tnode_get_child(oldtnode, i);
917 right = tnode_get_child(oldtnode, i+1);
918
919 /* At least one of the children is empty */
920 if (left == NULL) {
921 if (right == NULL) /* Both are empty */
922 continue;
923 put_child(t, tn, i/2, right);
924 continue;
925 }
926
927 if (right == NULL) {
928 put_child(t, tn, i/2, left);
929 continue;
930 }
931
932 /* Two nonempty children */
933 newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
934 put_child(t, tn, i/2, NULL);
935 put_child(t, newBinNode, 0, left);
936 put_child(t, newBinNode, 1, right);
937 put_child(t, tn, i/2, resize(t, newBinNode));
938 }
939 tnode_free_safe(oldtnode);
940 return tn;
941 nomem:
942 {
943 int size = tnode_child_length(tn);
944 int j;
945
946 for (j = 0; j < size; j++)
947 if (tn->child[j])
948 tnode_free((struct tnode *)tn->child[j]);
949
950 tnode_free(tn);
951
952 return ERR_PTR(-ENOMEM);
953 }
954 }
955
956 /* readside must use rcu_read_lock currently dump routines
957 via get_fa_head and dump */
958
959 static struct leaf_info *find_leaf_info(struct leaf *l, int plen)
960 {
961 struct hlist_head *head = &l->list;
962 struct hlist_node *node;
963 struct leaf_info *li;
964
965 hlist_for_each_entry_rcu(li, node, head, hlist)
966 if (li->plen == plen)
967 return li;
968
969 return NULL;
970 }
971
972 static inline struct list_head *get_fa_head(struct leaf *l, int plen)
973 {
974 struct leaf_info *li = find_leaf_info(l, plen);
975
976 if (!li)
977 return NULL;
978
979 return &li->falh;
980 }
981
982 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
983 {
984 struct leaf_info *li = NULL, *last = NULL;
985 struct hlist_node *node;
986
987 if (hlist_empty(head)) {
988 hlist_add_head_rcu(&new->hlist, head);
989 } else {
990 hlist_for_each_entry(li, node, head, hlist) {
991 if (new->plen > li->plen)
992 break;
993
994 last = li;
995 }
996 if (last)
997 hlist_add_after_rcu(&last->hlist, &new->hlist);
998 else
999 hlist_add_before_rcu(&new->hlist, &li->hlist);
1000 }
1001 }
1002
1003 /* rcu_read_lock needs to be hold by caller from readside */
1004
1005 static struct leaf *
1006 fib_find_node(struct trie *t, u32 key)
1007 {
1008 int pos;
1009 struct tnode *tn;
1010 struct node *n;
1011
1012 pos = 0;
1013 n = rcu_dereference(t->trie);
1014
1015 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1016 tn = (struct tnode *) n;
1017
1018 check_tnode(tn);
1019
1020 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1021 pos = tn->pos + tn->bits;
1022 n = tnode_get_child_rcu(tn,
1023 tkey_extract_bits(key,
1024 tn->pos,
1025 tn->bits));
1026 } else
1027 break;
1028 }
1029 /* Case we have found a leaf. Compare prefixes */
1030
1031 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key))
1032 return (struct leaf *)n;
1033
1034 return NULL;
1035 }
1036
1037 static void trie_rebalance(struct trie *t, struct tnode *tn)
1038 {
1039 int wasfull;
1040 t_key cindex, key;
1041 struct tnode *tp;
1042
1043 key = tn->key;
1044
1045 while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
1046 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1047 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
1048 tn = (struct tnode *) resize(t, (struct tnode *)tn);
1049
1050 tnode_put_child_reorg((struct tnode *)tp, cindex,
1051 (struct node *)tn, wasfull);
1052
1053 tp = node_parent((struct node *) tn);
1054 if (!tp)
1055 rcu_assign_pointer(t->trie, (struct node *)tn);
1056
1057 tnode_free_flush();
1058 if (!tp)
1059 break;
1060 tn = tp;
1061 }
1062
1063 /* Handle last (top) tnode */
1064 if (IS_TNODE(tn))
1065 tn = (struct tnode *)resize(t, (struct tnode *)tn);
1066
1067 rcu_assign_pointer(t->trie, (struct node *)tn);
1068 tnode_free_flush();
1069
1070 return;
1071 }
1072
1073 /* only used from updater-side */
1074
1075 static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1076 {
1077 int pos, newpos;
1078 struct tnode *tp = NULL, *tn = NULL;
1079 struct node *n;
1080 struct leaf *l;
1081 int missbit;
1082 struct list_head *fa_head = NULL;
1083 struct leaf_info *li;
1084 t_key cindex;
1085
1086 pos = 0;
1087 n = t->trie;
1088
1089 /* If we point to NULL, stop. Either the tree is empty and we should
1090 * just put a new leaf in if, or we have reached an empty child slot,
1091 * and we should just put our new leaf in that.
1092 * If we point to a T_TNODE, check if it matches our key. Note that
1093 * a T_TNODE might be skipping any number of bits - its 'pos' need
1094 * not be the parent's 'pos'+'bits'!
1095 *
1096 * If it does match the current key, get pos/bits from it, extract
1097 * the index from our key, push the T_TNODE and walk the tree.
1098 *
1099 * If it doesn't, we have to replace it with a new T_TNODE.
1100 *
1101 * If we point to a T_LEAF, it might or might not have the same key
1102 * as we do. If it does, just change the value, update the T_LEAF's
1103 * value, and return it.
1104 * If it doesn't, we need to replace it with a T_TNODE.
1105 */
1106
1107 while (n != NULL && NODE_TYPE(n) == T_TNODE) {
1108 tn = (struct tnode *) n;
1109
1110 check_tnode(tn);
1111
1112 if (tkey_sub_equals(tn->key, pos, tn->pos-pos, key)) {
1113 tp = tn;
1114 pos = tn->pos + tn->bits;
1115 n = tnode_get_child(tn,
1116 tkey_extract_bits(key,
1117 tn->pos,
1118 tn->bits));
1119
1120 BUG_ON(n && node_parent(n) != tn);
1121 } else
1122 break;
1123 }
1124
1125 /*
1126 * n ----> NULL, LEAF or TNODE
1127 *
1128 * tp is n's (parent) ----> NULL or TNODE
1129 */
1130
1131 BUG_ON(tp && IS_LEAF(tp));
1132
1133 /* Case 1: n is a leaf. Compare prefixes */
1134
1135 if (n != NULL && IS_LEAF(n) && tkey_equals(key, n->key)) {
1136 l = (struct leaf *) n;
1137 li = leaf_info_new(plen);
1138
1139 if (!li)
1140 return NULL;
1141
1142 fa_head = &li->falh;
1143 insert_leaf_info(&l->list, li);
1144 goto done;
1145 }
1146 l = leaf_new();
1147
1148 if (!l)
1149 return NULL;
1150
1151 l->key = key;
1152 li = leaf_info_new(plen);
1153
1154 if (!li) {
1155 free_leaf(l);
1156 return NULL;
1157 }
1158
1159 fa_head = &li->falh;
1160 insert_leaf_info(&l->list, li);
1161
1162 if (t->trie && n == NULL) {
1163 /* Case 2: n is NULL, and will just insert a new leaf */
1164
1165 node_set_parent((struct node *)l, tp);
1166
1167 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1168 put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
1169 } else {
1170 /* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
1171 /*
1172 * Add a new tnode here
1173 * first tnode need some special handling
1174 */
1175
1176 if (tp)
1177 pos = tp->pos+tp->bits;
1178 else
1179 pos = 0;
1180
1181 if (n) {
1182 newpos = tkey_mismatch(key, pos, n->key);
1183 tn = tnode_new(n->key, newpos, 1);
1184 } else {
1185 newpos = 0;
1186 tn = tnode_new(key, newpos, 1); /* First tnode */
1187 }
1188
1189 if (!tn) {
1190 free_leaf_info(li);
1191 free_leaf(l);
1192 return NULL;
1193 }
1194
1195 node_set_parent((struct node *)tn, tp);
1196
1197 missbit = tkey_extract_bits(key, newpos, 1);
1198 put_child(t, tn, missbit, (struct node *)l);
1199 put_child(t, tn, 1-missbit, n);
1200
1201 if (tp) {
1202 cindex = tkey_extract_bits(key, tp->pos, tp->bits);
1203 put_child(t, (struct tnode *)tp, cindex,
1204 (struct node *)tn);
1205 } else {
1206 rcu_assign_pointer(t->trie, (struct node *)tn);
1207 tp = tn;
1208 }
1209 }
1210
1211 if (tp && tp->pos + tp->bits > 32)
1212 pr_warning("fib_trie"
1213 " tp=%p pos=%d, bits=%d, key=%0x plen=%d\n",
1214 tp, tp->pos, tp->bits, key, plen);
1215
1216 /* Rebalance the trie */
1217
1218 trie_rebalance(t, tp);
1219 done:
1220 return fa_head;
1221 }
1222
1223 /*
1224 * Caller must hold RTNL.
1225 */
1226 static int fn_trie_insert(struct fib_table *tb, struct fib_config *cfg)
1227 {
1228 struct trie *t = (struct trie *) tb->tb_data;
1229 struct fib_alias *fa, *new_fa;
1230 struct list_head *fa_head = NULL;
1231 struct fib_info *fi;
1232 int plen = cfg->fc_dst_len;
1233 u8 tos = cfg->fc_tos;
1234 u32 key, mask;
1235 int err;
1236 struct leaf *l;
1237
1238 if (plen > 32)
1239 return -EINVAL;
1240
1241 key = ntohl(cfg->fc_dst);
1242
1243 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1244
1245 mask = ntohl(inet_make_mask(plen));
1246
1247 if (key & ~mask)
1248 return -EINVAL;
1249
1250 key = key & mask;
1251
1252 fi = fib_create_info(cfg);
1253 if (IS_ERR(fi)) {
1254 err = PTR_ERR(fi);
1255 goto err;
1256 }
1257
1258 l = fib_find_node(t, key);
1259 fa = NULL;
1260
1261 if (l) {
1262 fa_head = get_fa_head(l, plen);
1263 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1264 }
1265
1266 /* Now fa, if non-NULL, points to the first fib alias
1267 * with the same keys [prefix,tos,priority], if such key already
1268 * exists or to the node before which we will insert new one.
1269 *
1270 * If fa is NULL, we will need to allocate a new one and
1271 * insert to the head of f.
1272 *
1273 * If f is NULL, no fib node matched the destination key
1274 * and we need to allocate a new one of those as well.
1275 */
1276
1277 if (fa && fa->fa_tos == tos &&
1278 fa->fa_info->fib_priority == fi->fib_priority) {
1279 struct fib_alias *fa_first, *fa_match;
1280
1281 err = -EEXIST;
1282 if (cfg->fc_nlflags & NLM_F_EXCL)
1283 goto out;
1284
1285 /* We have 2 goals:
1286 * 1. Find exact match for type, scope, fib_info to avoid
1287 * duplicate routes
1288 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1289 */
1290 fa_match = NULL;
1291 fa_first = fa;
1292 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1293 list_for_each_entry_continue(fa, fa_head, fa_list) {
1294 if (fa->fa_tos != tos)
1295 break;
1296 if (fa->fa_info->fib_priority != fi->fib_priority)
1297 break;
1298 if (fa->fa_type == cfg->fc_type &&
1299 fa->fa_scope == cfg->fc_scope &&
1300 fa->fa_info == fi) {
1301 fa_match = fa;
1302 break;
1303 }
1304 }
1305
1306 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1307 struct fib_info *fi_drop;
1308 u8 state;
1309
1310 fa = fa_first;
1311 if (fa_match) {
1312 if (fa == fa_match)
1313 err = 0;
1314 goto out;
1315 }
1316 err = -ENOBUFS;
1317 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1318 if (new_fa == NULL)
1319 goto out;
1320
1321 fi_drop = fa->fa_info;
1322 new_fa->fa_tos = fa->fa_tos;
1323 new_fa->fa_info = fi;
1324 new_fa->fa_type = cfg->fc_type;
1325 new_fa->fa_scope = cfg->fc_scope;
1326 state = fa->fa_state;
1327 new_fa->fa_state = state & ~FA_S_ACCESSED;
1328
1329 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1330 alias_free_mem_rcu(fa);
1331
1332 fib_release_info(fi_drop);
1333 if (state & FA_S_ACCESSED)
1334 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
1335 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1336 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1337
1338 goto succeeded;
1339 }
1340 /* Error if we find a perfect match which
1341 * uses the same scope, type, and nexthop
1342 * information.
1343 */
1344 if (fa_match)
1345 goto out;
1346
1347 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1348 fa = fa_first;
1349 }
1350 err = -ENOENT;
1351 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1352 goto out;
1353
1354 err = -ENOBUFS;
1355 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1356 if (new_fa == NULL)
1357 goto out;
1358
1359 new_fa->fa_info = fi;
1360 new_fa->fa_tos = tos;
1361 new_fa->fa_type = cfg->fc_type;
1362 new_fa->fa_scope = cfg->fc_scope;
1363 new_fa->fa_state = 0;
1364 /*
1365 * Insert new entry to the list.
1366 */
1367
1368 if (!fa_head) {
1369 fa_head = fib_insert_node(t, key, plen);
1370 if (unlikely(!fa_head)) {
1371 err = -ENOMEM;
1372 goto out_free_new_fa;
1373 }
1374 }
1375
1376 list_add_tail_rcu(&new_fa->fa_list,
1377 (fa ? &fa->fa_list : fa_head));
1378
1379 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
1380 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1381 &cfg->fc_nlinfo, 0);
1382 succeeded:
1383 return 0;
1384
1385 out_free_new_fa:
1386 kmem_cache_free(fn_alias_kmem, new_fa);
1387 out:
1388 fib_release_info(fi);
1389 err:
1390 return err;
1391 }
1392
1393 /* should be called with rcu_read_lock */
1394 static int check_leaf(struct trie *t, struct leaf *l,
1395 t_key key, const struct flowi *flp,
1396 struct fib_result *res)
1397 {
1398 struct leaf_info *li;
1399 struct hlist_head *hhead = &l->list;
1400 struct hlist_node *node;
1401
1402 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1403 int err;
1404 int plen = li->plen;
1405 __be32 mask = inet_make_mask(plen);
1406
1407 if (l->key != (key & ntohl(mask)))
1408 continue;
1409
1410 err = fib_semantic_match(&li->falh, flp, res, plen);
1411
1412 #ifdef CONFIG_IP_FIB_TRIE_STATS
1413 if (err <= 0)
1414 t->stats.semantic_match_passed++;
1415 else
1416 t->stats.semantic_match_miss++;
1417 #endif
1418 if (err <= 0)
1419 return err;
1420 }
1421
1422 return 1;
1423 }
1424
1425 static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
1426 struct fib_result *res)
1427 {
1428 struct trie *t = (struct trie *) tb->tb_data;
1429 int ret;
1430 struct node *n;
1431 struct tnode *pn;
1432 int pos, bits;
1433 t_key key = ntohl(flp->fl4_dst);
1434 int chopped_off;
1435 t_key cindex = 0;
1436 int current_prefix_length = KEYLENGTH;
1437 struct tnode *cn;
1438 t_key node_prefix, key_prefix, pref_mismatch;
1439 int mp;
1440
1441 rcu_read_lock();
1442
1443 n = rcu_dereference(t->trie);
1444 if (!n)
1445 goto failed;
1446
1447 #ifdef CONFIG_IP_FIB_TRIE_STATS
1448 t->stats.gets++;
1449 #endif
1450
1451 /* Just a leaf? */
1452 if (IS_LEAF(n)) {
1453 ret = check_leaf(t, (struct leaf *)n, key, flp, res);
1454 goto found;
1455 }
1456
1457 pn = (struct tnode *) n;
1458 chopped_off = 0;
1459
1460 while (pn) {
1461 pos = pn->pos;
1462 bits = pn->bits;
1463
1464 if (!chopped_off)
1465 cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
1466 pos, bits);
1467
1468 n = tnode_get_child_rcu(pn, cindex);
1469
1470 if (n == NULL) {
1471 #ifdef CONFIG_IP_FIB_TRIE_STATS
1472 t->stats.null_node_hit++;
1473 #endif
1474 goto backtrace;
1475 }
1476
1477 if (IS_LEAF(n)) {
1478 ret = check_leaf(t, (struct leaf *)n, key, flp, res);
1479 if (ret > 0)
1480 goto backtrace;
1481 goto found;
1482 }
1483
1484 cn = (struct tnode *)n;
1485
1486 /*
1487 * It's a tnode, and we can do some extra checks here if we
1488 * like, to avoid descending into a dead-end branch.
1489 * This tnode is in the parent's child array at index
1490 * key[p_pos..p_pos+p_bits] but potentially with some bits
1491 * chopped off, so in reality the index may be just a
1492 * subprefix, padded with zero at the end.
1493 * We can also take a look at any skipped bits in this
1494 * tnode - everything up to p_pos is supposed to be ok,
1495 * and the non-chopped bits of the index (se previous
1496 * paragraph) are also guaranteed ok, but the rest is
1497 * considered unknown.
1498 *
1499 * The skipped bits are key[pos+bits..cn->pos].
1500 */
1501
1502 /* If current_prefix_length < pos+bits, we are already doing
1503 * actual prefix matching, which means everything from
1504 * pos+(bits-chopped_off) onward must be zero along some
1505 * branch of this subtree - otherwise there is *no* valid
1506 * prefix present. Here we can only check the skipped
1507 * bits. Remember, since we have already indexed into the
1508 * parent's child array, we know that the bits we chopped of
1509 * *are* zero.
1510 */
1511
1512 /* NOTA BENE: Checking only skipped bits
1513 for the new node here */
1514
1515 if (current_prefix_length < pos+bits) {
1516 if (tkey_extract_bits(cn->key, current_prefix_length,
1517 cn->pos - current_prefix_length)
1518 || !(cn->child[0]))
1519 goto backtrace;
1520 }
1521
1522 /*
1523 * If chopped_off=0, the index is fully validated and we
1524 * only need to look at the skipped bits for this, the new,
1525 * tnode. What we actually want to do is to find out if
1526 * these skipped bits match our key perfectly, or if we will
1527 * have to count on finding a matching prefix further down,
1528 * because if we do, we would like to have some way of
1529 * verifying the existence of such a prefix at this point.
1530 */
1531
1532 /* The only thing we can do at this point is to verify that
1533 * any such matching prefix can indeed be a prefix to our
1534 * key, and if the bits in the node we are inspecting that
1535 * do not match our key are not ZERO, this cannot be true.
1536 * Thus, find out where there is a mismatch (before cn->pos)
1537 * and verify that all the mismatching bits are zero in the
1538 * new tnode's key.
1539 */
1540
1541 /*
1542 * Note: We aren't very concerned about the piece of
1543 * the key that precede pn->pos+pn->bits, since these
1544 * have already been checked. The bits after cn->pos
1545 * aren't checked since these are by definition
1546 * "unknown" at this point. Thus, what we want to see
1547 * is if we are about to enter the "prefix matching"
1548 * state, and in that case verify that the skipped
1549 * bits that will prevail throughout this subtree are
1550 * zero, as they have to be if we are to find a
1551 * matching prefix.
1552 */
1553
1554 node_prefix = mask_pfx(cn->key, cn->pos);
1555 key_prefix = mask_pfx(key, cn->pos);
1556 pref_mismatch = key_prefix^node_prefix;
1557 mp = 0;
1558
1559 /*
1560 * In short: If skipped bits in this node do not match
1561 * the search key, enter the "prefix matching"
1562 * state.directly.
1563 */
1564 if (pref_mismatch) {
1565 while (!(pref_mismatch & (1<<(KEYLENGTH-1)))) {
1566 mp++;
1567 pref_mismatch = pref_mismatch << 1;
1568 }
1569 key_prefix = tkey_extract_bits(cn->key, mp, cn->pos-mp);
1570
1571 if (key_prefix != 0)
1572 goto backtrace;
1573
1574 if (current_prefix_length >= cn->pos)
1575 current_prefix_length = mp;
1576 }
1577
1578 pn = (struct tnode *)n; /* Descend */
1579 chopped_off = 0;
1580 continue;
1581
1582 backtrace:
1583 chopped_off++;
1584
1585 /* As zero don't change the child key (cindex) */
1586 while ((chopped_off <= pn->bits)
1587 && !(cindex & (1<<(chopped_off-1))))
1588 chopped_off++;
1589
1590 /* Decrease current_... with bits chopped off */
1591 if (current_prefix_length > pn->pos + pn->bits - chopped_off)
1592 current_prefix_length = pn->pos + pn->bits
1593 - chopped_off;
1594
1595 /*
1596 * Either we do the actual chop off according or if we have
1597 * chopped off all bits in this tnode walk up to our parent.
1598 */
1599
1600 if (chopped_off <= pn->bits) {
1601 cindex &= ~(1 << (chopped_off-1));
1602 } else {
1603 struct tnode *parent = node_parent_rcu((struct node *) pn);
1604 if (!parent)
1605 goto failed;
1606
1607 /* Get Child's index */
1608 cindex = tkey_extract_bits(pn->key, parent->pos, parent->bits);
1609 pn = parent;
1610 chopped_off = 0;
1611
1612 #ifdef CONFIG_IP_FIB_TRIE_STATS
1613 t->stats.backtrack++;
1614 #endif
1615 goto backtrace;
1616 }
1617 }
1618 failed:
1619 ret = 1;
1620 found:
1621 rcu_read_unlock();
1622 return ret;
1623 }
1624
1625 /*
1626 * Remove the leaf and return parent.
1627 */
1628 static void trie_leaf_remove(struct trie *t, struct leaf *l)
1629 {
1630 struct tnode *tp = node_parent((struct node *) l);
1631
1632 pr_debug("entering trie_leaf_remove(%p)\n", l);
1633
1634 if (tp) {
1635 t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
1636 put_child(t, (struct tnode *)tp, cindex, NULL);
1637 trie_rebalance(t, tp);
1638 } else
1639 rcu_assign_pointer(t->trie, NULL);
1640
1641 free_leaf(l);
1642 }
1643
1644 /*
1645 * Caller must hold RTNL.
1646 */
1647 static int fn_trie_delete(struct fib_table *tb, struct fib_config *cfg)
1648 {
1649 struct trie *t = (struct trie *) tb->tb_data;
1650 u32 key, mask;
1651 int plen = cfg->fc_dst_len;
1652 u8 tos = cfg->fc_tos;
1653 struct fib_alias *fa, *fa_to_delete;
1654 struct list_head *fa_head;
1655 struct leaf *l;
1656 struct leaf_info *li;
1657
1658 if (plen > 32)
1659 return -EINVAL;
1660
1661 key = ntohl(cfg->fc_dst);
1662 mask = ntohl(inet_make_mask(plen));
1663
1664 if (key & ~mask)
1665 return -EINVAL;
1666
1667 key = key & mask;
1668 l = fib_find_node(t, key);
1669
1670 if (!l)
1671 return -ESRCH;
1672
1673 fa_head = get_fa_head(l, plen);
1674 fa = fib_find_alias(fa_head, tos, 0);
1675
1676 if (!fa)
1677 return -ESRCH;
1678
1679 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1680
1681 fa_to_delete = NULL;
1682 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1683 list_for_each_entry_continue(fa, fa_head, fa_list) {
1684 struct fib_info *fi = fa->fa_info;
1685
1686 if (fa->fa_tos != tos)
1687 break;
1688
1689 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1690 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1691 fa->fa_scope == cfg->fc_scope) &&
1692 (!cfg->fc_protocol ||
1693 fi->fib_protocol == cfg->fc_protocol) &&
1694 fib_nh_match(cfg, fi) == 0) {
1695 fa_to_delete = fa;
1696 break;
1697 }
1698 }
1699
1700 if (!fa_to_delete)
1701 return -ESRCH;
1702
1703 fa = fa_to_delete;
1704 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1705 &cfg->fc_nlinfo, 0);
1706
1707 l = fib_find_node(t, key);
1708 li = find_leaf_info(l, plen);
1709
1710 list_del_rcu(&fa->fa_list);
1711
1712 if (list_empty(fa_head)) {
1713 hlist_del_rcu(&li->hlist);
1714 free_leaf_info(li);
1715 }
1716
1717 if (hlist_empty(&l->list))
1718 trie_leaf_remove(t, l);
1719
1720 if (fa->fa_state & FA_S_ACCESSED)
1721 rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
1722
1723 fib_release_info(fa->fa_info);
1724 alias_free_mem_rcu(fa);
1725 return 0;
1726 }
1727
1728 static int trie_flush_list(struct list_head *head)
1729 {
1730 struct fib_alias *fa, *fa_node;
1731 int found = 0;
1732
1733 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1734 struct fib_info *fi = fa->fa_info;
1735
1736 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1737 list_del_rcu(&fa->fa_list);
1738 fib_release_info(fa->fa_info);
1739 alias_free_mem_rcu(fa);
1740 found++;
1741 }
1742 }
1743 return found;
1744 }
1745
1746 static int trie_flush_leaf(struct leaf *l)
1747 {
1748 int found = 0;
1749 struct hlist_head *lih = &l->list;
1750 struct hlist_node *node, *tmp;
1751 struct leaf_info *li = NULL;
1752
1753 hlist_for_each_entry_safe(li, node, tmp, lih, hlist) {
1754 found += trie_flush_list(&li->falh);
1755
1756 if (list_empty(&li->falh)) {
1757 hlist_del_rcu(&li->hlist);
1758 free_leaf_info(li);
1759 }
1760 }
1761 return found;
1762 }
1763
1764 /*
1765 * Scan for the next right leaf starting at node p->child[idx]
1766 * Since we have back pointer, no recursion necessary.
1767 */
1768 static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
1769 {
1770 do {
1771 t_key idx;
1772
1773 if (c)
1774 idx = tkey_extract_bits(c->key, p->pos, p->bits) + 1;
1775 else
1776 idx = 0;
1777
1778 while (idx < 1u << p->bits) {
1779 c = tnode_get_child_rcu(p, idx++);
1780 if (!c)
1781 continue;
1782
1783 if (IS_LEAF(c)) {
1784 prefetch(p->child[idx]);
1785 return (struct leaf *) c;
1786 }
1787
1788 /* Rescan start scanning in new node */
1789 p = (struct tnode *) c;
1790 idx = 0;
1791 }
1792
1793 /* Node empty, walk back up to parent */
1794 c = (struct node *) p;
1795 } while ( (p = node_parent_rcu(c)) != NULL);
1796
1797 return NULL; /* Root of trie */
1798 }
1799
1800 static struct leaf *trie_firstleaf(struct trie *t)
1801 {
1802 struct tnode *n = (struct tnode *) rcu_dereference(t->trie);
1803
1804 if (!n)
1805 return NULL;
1806
1807 if (IS_LEAF(n)) /* trie is just a leaf */
1808 return (struct leaf *) n;
1809
1810 return leaf_walk_rcu(n, NULL);
1811 }
1812
1813 static struct leaf *trie_nextleaf(struct leaf *l)
1814 {
1815 struct node *c = (struct node *) l;
1816 struct tnode *p = node_parent_rcu(c);
1817
1818 if (!p)
1819 return NULL; /* trie with just one leaf */
1820
1821 return leaf_walk_rcu(p, c);
1822 }
1823
1824 static struct leaf *trie_leafindex(struct trie *t, int index)
1825 {
1826 struct leaf *l = trie_firstleaf(t);
1827
1828 while (l && index-- > 0)
1829 l = trie_nextleaf(l);
1830
1831 return l;
1832 }
1833
1834
1835 /*
1836 * Caller must hold RTNL.
1837 */
1838 static int fn_trie_flush(struct fib_table *tb)
1839 {
1840 struct trie *t = (struct trie *) tb->tb_data;
1841 struct leaf *l, *ll = NULL;
1842 int found = 0;
1843
1844 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
1845 found += trie_flush_leaf(l);
1846
1847 if (ll && hlist_empty(&ll->list))
1848 trie_leaf_remove(t, ll);
1849 ll = l;
1850 }
1851
1852 if (ll && hlist_empty(&ll->list))
1853 trie_leaf_remove(t, ll);
1854
1855 pr_debug("trie_flush found=%d\n", found);
1856 return found;
1857 }
1858
1859 static void fn_trie_select_default(struct fib_table *tb,
1860 const struct flowi *flp,
1861 struct fib_result *res)
1862 {
1863 struct trie *t = (struct trie *) tb->tb_data;
1864 int order, last_idx;
1865 struct fib_info *fi = NULL;
1866 struct fib_info *last_resort;
1867 struct fib_alias *fa = NULL;
1868 struct list_head *fa_head;
1869 struct leaf *l;
1870
1871 last_idx = -1;
1872 last_resort = NULL;
1873 order = -1;
1874
1875 rcu_read_lock();
1876
1877 l = fib_find_node(t, 0);
1878 if (!l)
1879 goto out;
1880
1881 fa_head = get_fa_head(l, 0);
1882 if (!fa_head)
1883 goto out;
1884
1885 if (list_empty(fa_head))
1886 goto out;
1887
1888 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1889 struct fib_info *next_fi = fa->fa_info;
1890
1891 if (fa->fa_scope != res->scope ||
1892 fa->fa_type != RTN_UNICAST)
1893 continue;
1894
1895 if (next_fi->fib_priority > res->fi->fib_priority)
1896 break;
1897 if (!next_fi->fib_nh[0].nh_gw ||
1898 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1899 continue;
1900 fa->fa_state |= FA_S_ACCESSED;
1901
1902 if (fi == NULL) {
1903 if (next_fi != res->fi)
1904 break;
1905 } else if (!fib_detect_death(fi, order, &last_resort,
1906 &last_idx, tb->tb_default)) {
1907 fib_result_assign(res, fi);
1908 tb->tb_default = order;
1909 goto out;
1910 }
1911 fi = next_fi;
1912 order++;
1913 }
1914 if (order <= 0 || fi == NULL) {
1915 tb->tb_default = -1;
1916 goto out;
1917 }
1918
1919 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1920 tb->tb_default)) {
1921 fib_result_assign(res, fi);
1922 tb->tb_default = order;
1923 goto out;
1924 }
1925 if (last_idx >= 0)
1926 fib_result_assign(res, last_resort);
1927 tb->tb_default = last_idx;
1928 out:
1929 rcu_read_unlock();
1930 }
1931
1932 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1933 struct fib_table *tb,
1934 struct sk_buff *skb, struct netlink_callback *cb)
1935 {
1936 int i, s_i;
1937 struct fib_alias *fa;
1938 __be32 xkey = htonl(key);
1939
1940 s_i = cb->args[5];
1941 i = 0;
1942
1943 /* rcu_read_lock is hold by caller */
1944
1945 list_for_each_entry_rcu(fa, fah, fa_list) {
1946 if (i < s_i) {
1947 i++;
1948 continue;
1949 }
1950
1951 if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
1952 cb->nlh->nlmsg_seq,
1953 RTM_NEWROUTE,
1954 tb->tb_id,
1955 fa->fa_type,
1956 fa->fa_scope,
1957 xkey,
1958 plen,
1959 fa->fa_tos,
1960 fa->fa_info, NLM_F_MULTI) < 0) {
1961 cb->args[5] = i;
1962 return -1;
1963 }
1964 i++;
1965 }
1966 cb->args[5] = i;
1967 return skb->len;
1968 }
1969
1970 static int fn_trie_dump_leaf(struct leaf *l, struct fib_table *tb,
1971 struct sk_buff *skb, struct netlink_callback *cb)
1972 {
1973 struct leaf_info *li;
1974 struct hlist_node *node;
1975 int i, s_i;
1976
1977 s_i = cb->args[4];
1978 i = 0;
1979
1980 /* rcu_read_lock is hold by caller */
1981 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
1982 if (i < s_i) {
1983 i++;
1984 continue;
1985 }
1986
1987 if (i > s_i)
1988 cb->args[5] = 0;
1989
1990 if (list_empty(&li->falh))
1991 continue;
1992
1993 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
1994 cb->args[4] = i;
1995 return -1;
1996 }
1997 i++;
1998 }
1999
2000 cb->args[4] = i;
2001 return skb->len;
2002 }
2003
2004 static int fn_trie_dump(struct fib_table *tb, struct sk_buff *skb,
2005 struct netlink_callback *cb)
2006 {
2007 struct leaf *l;
2008 struct trie *t = (struct trie *) tb->tb_data;
2009 t_key key = cb->args[2];
2010 int count = cb->args[3];
2011
2012 rcu_read_lock();
2013 /* Dump starting at last key.
2014 * Note: 0.0.0.0/0 (ie default) is first key.
2015 */
2016 if (count == 0)
2017 l = trie_firstleaf(t);
2018 else {
2019 /* Normally, continue from last key, but if that is missing
2020 * fallback to using slow rescan
2021 */
2022 l = fib_find_node(t, key);
2023 if (!l)
2024 l = trie_leafindex(t, count);
2025 }
2026
2027 while (l) {
2028 cb->args[2] = l->key;
2029 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
2030 cb->args[3] = count;
2031 rcu_read_unlock();
2032 return -1;
2033 }
2034
2035 ++count;
2036 l = trie_nextleaf(l);
2037 memset(&cb->args[4], 0,
2038 sizeof(cb->args) - 4*sizeof(cb->args[0]));
2039 }
2040 cb->args[3] = count;
2041 rcu_read_unlock();
2042
2043 return skb->len;
2044 }
2045
2046 void __init fib_hash_init(void)
2047 {
2048 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
2049 sizeof(struct fib_alias),
2050 0, SLAB_PANIC, NULL);
2051
2052 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
2053 max(sizeof(struct leaf),
2054 sizeof(struct leaf_info)),
2055 0, SLAB_PANIC, NULL);
2056 }
2057
2058
2059 /* Fix more generic FIB names for init later */
2060 struct fib_table *fib_hash_table(u32 id)
2061 {
2062 struct fib_table *tb;
2063 struct trie *t;
2064
2065 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
2066 GFP_KERNEL);
2067 if (tb == NULL)
2068 return NULL;
2069
2070 tb->tb_id = id;
2071 tb->tb_default = -1;
2072 tb->tb_lookup = fn_trie_lookup;
2073 tb->tb_insert = fn_trie_insert;
2074 tb->tb_delete = fn_trie_delete;
2075 tb->tb_flush = fn_trie_flush;
2076 tb->tb_select_default = fn_trie_select_default;
2077 tb->tb_dump = fn_trie_dump;
2078
2079 t = (struct trie *) tb->tb_data;
2080 memset(t, 0, sizeof(*t));
2081
2082 if (id == RT_TABLE_LOCAL)
2083 pr_info("IPv4 FIB: Using LC-trie version %s\n", VERSION);
2084
2085 return tb;
2086 }
2087
2088 #ifdef CONFIG_PROC_FS
2089 /* Depth first Trie walk iterator */
2090 struct fib_trie_iter {
2091 struct seq_net_private p;
2092 struct fib_table *tb;
2093 struct tnode *tnode;
2094 unsigned index;
2095 unsigned depth;
2096 };
2097
2098 static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
2099 {
2100 struct tnode *tn = iter->tnode;
2101 unsigned cindex = iter->index;
2102 struct tnode *p;
2103
2104 /* A single entry routing table */
2105 if (!tn)
2106 return NULL;
2107
2108 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2109 iter->tnode, iter->index, iter->depth);
2110 rescan:
2111 while (cindex < (1<<tn->bits)) {
2112 struct node *n = tnode_get_child_rcu(tn, cindex);
2113
2114 if (n) {
2115 if (IS_LEAF(n)) {
2116 iter->tnode = tn;
2117 iter->index = cindex + 1;
2118 } else {
2119 /* push down one level */
2120 iter->tnode = (struct tnode *) n;
2121 iter->index = 0;
2122 ++iter->depth;
2123 }
2124 return n;
2125 }
2126
2127 ++cindex;
2128 }
2129
2130 /* Current node exhausted, pop back up */
2131 p = node_parent_rcu((struct node *)tn);
2132 if (p) {
2133 cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
2134 tn = p;
2135 --iter->depth;
2136 goto rescan;
2137 }
2138
2139 /* got root? */
2140 return NULL;
2141 }
2142
2143 static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
2144 struct trie *t)
2145 {
2146 struct node *n;
2147
2148 if (!t)
2149 return NULL;
2150
2151 n = rcu_dereference(t->trie);
2152 if (!n)
2153 return NULL;
2154
2155 if (IS_TNODE(n)) {
2156 iter->tnode = (struct tnode *) n;
2157 iter->index = 0;
2158 iter->depth = 1;
2159 } else {
2160 iter->tnode = NULL;
2161 iter->index = 0;
2162 iter->depth = 0;
2163 }
2164
2165 return n;
2166 }
2167
2168 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2169 {
2170 struct node *n;
2171 struct fib_trie_iter iter;
2172
2173 memset(s, 0, sizeof(*s));
2174
2175 rcu_read_lock();
2176 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
2177 if (IS_LEAF(n)) {
2178 struct leaf *l = (struct leaf *)n;
2179 struct leaf_info *li;
2180 struct hlist_node *tmp;
2181
2182 s->leaves++;
2183 s->totdepth += iter.depth;
2184 if (iter.depth > s->maxdepth)
2185 s->maxdepth = iter.depth;
2186
2187 hlist_for_each_entry_rcu(li, tmp, &l->list, hlist)
2188 ++s->prefixes;
2189 } else {
2190 const struct tnode *tn = (const struct tnode *) n;
2191 int i;
2192
2193 s->tnodes++;
2194 if (tn->bits < MAX_STAT_DEPTH)
2195 s->nodesizes[tn->bits]++;
2196
2197 for (i = 0; i < (1<<tn->bits); i++)
2198 if (!tn->child[i])
2199 s->nullpointers++;
2200 }
2201 }
2202 rcu_read_unlock();
2203 }
2204
2205 /*
2206 * This outputs /proc/net/fib_triestats
2207 */
2208 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2209 {
2210 unsigned i, max, pointers, bytes, avdepth;
2211
2212 if (stat->leaves)
2213 avdepth = stat->totdepth*100 / stat->leaves;
2214 else
2215 avdepth = 0;
2216
2217 seq_printf(seq, "\tAver depth: %u.%02d\n",
2218 avdepth / 100, avdepth % 100);
2219 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2220
2221 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2222 bytes = sizeof(struct leaf) * stat->leaves;
2223
2224 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
2225 bytes += sizeof(struct leaf_info) * stat->prefixes;
2226
2227 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
2228 bytes += sizeof(struct tnode) * stat->tnodes;
2229
2230 max = MAX_STAT_DEPTH;
2231 while (max > 0 && stat->nodesizes[max-1] == 0)
2232 max--;
2233
2234 pointers = 0;
2235 for (i = 1; i <= max; i++)
2236 if (stat->nodesizes[i] != 0) {
2237 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
2238 pointers += (1<<i) * stat->nodesizes[i];
2239 }
2240 seq_putc(seq, '\n');
2241 seq_printf(seq, "\tPointers: %u\n", pointers);
2242
2243 bytes += sizeof(struct node *) * pointers;
2244 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
2245 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
2246 }
2247
2248 #ifdef CONFIG_IP_FIB_TRIE_STATS
2249 static void trie_show_usage(struct seq_file *seq,
2250 const struct trie_use_stats *stats)
2251 {
2252 seq_printf(seq, "\nCounters:\n---------\n");
2253 seq_printf(seq, "gets = %u\n", stats->gets);
2254 seq_printf(seq, "backtracks = %u\n", stats->backtrack);
2255 seq_printf(seq, "semantic match passed = %u\n",
2256 stats->semantic_match_passed);
2257 seq_printf(seq, "semantic match miss = %u\n",
2258 stats->semantic_match_miss);
2259 seq_printf(seq, "null node hit= %u\n", stats->null_node_hit);
2260 seq_printf(seq, "skipped node resize = %u\n\n",
2261 stats->resize_node_skipped);
2262 }
2263 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2264
2265 static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
2266 {
2267 if (tb->tb_id == RT_TABLE_LOCAL)
2268 seq_puts(seq, "Local:\n");
2269 else if (tb->tb_id == RT_TABLE_MAIN)
2270 seq_puts(seq, "Main:\n");
2271 else
2272 seq_printf(seq, "Id %d:\n", tb->tb_id);
2273 }
2274
2275
2276 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2277 {
2278 struct net *net = (struct net *)seq->private;
2279 unsigned int h;
2280
2281 seq_printf(seq,
2282 "Basic info: size of leaf:"
2283 " %Zd bytes, size of tnode: %Zd bytes.\n",
2284 sizeof(struct leaf), sizeof(struct tnode));
2285
2286 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2287 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2288 struct hlist_node *node;
2289 struct fib_table *tb;
2290
2291 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
2292 struct trie *t = (struct trie *) tb->tb_data;
2293 struct trie_stat stat;
2294
2295 if (!t)
2296 continue;
2297
2298 fib_table_print(seq, tb);
2299
2300 trie_collect_stats(t, &stat);
2301 trie_show_stats(seq, &stat);
2302 #ifdef CONFIG_IP_FIB_TRIE_STATS
2303 trie_show_usage(seq, &t->stats);
2304 #endif
2305 }
2306 }
2307
2308 return 0;
2309 }
2310
2311 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2312 {
2313 return single_open_net(inode, file, fib_triestat_seq_show);
2314 }
2315
2316 static const struct file_operations fib_triestat_fops = {
2317 .owner = THIS_MODULE,
2318 .open = fib_triestat_seq_open,
2319 .read = seq_read,
2320 .llseek = seq_lseek,
2321 .release = single_release_net,
2322 };
2323
2324 static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
2325 {
2326 struct fib_trie_iter *iter = seq->private;
2327 struct net *net = seq_file_net(seq);
2328 loff_t idx = 0;
2329 unsigned int h;
2330
2331 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2332 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2333 struct hlist_node *node;
2334 struct fib_table *tb;
2335
2336 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
2337 struct node *n;
2338
2339 for (n = fib_trie_get_first(iter,
2340 (struct trie *) tb->tb_data);
2341 n; n = fib_trie_get_next(iter))
2342 if (pos == idx++) {
2343 iter->tb = tb;
2344 return n;
2345 }
2346 }
2347 }
2348
2349 return NULL;
2350 }
2351
2352 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2353 __acquires(RCU)
2354 {
2355 rcu_read_lock();
2356 return fib_trie_get_idx(seq, *pos);
2357 }
2358
2359 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2360 {
2361 struct fib_trie_iter *iter = seq->private;
2362 struct net *net = seq_file_net(seq);
2363 struct fib_table *tb = iter->tb;
2364 struct hlist_node *tb_node;
2365 unsigned int h;
2366 struct node *n;
2367
2368 ++*pos;
2369 /* next node in same table */
2370 n = fib_trie_get_next(iter);
2371 if (n)
2372 return n;
2373
2374 /* walk rest of this hash chain */
2375 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
2376 while ( (tb_node = rcu_dereference(tb->tb_hlist.next)) ) {
2377 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2378 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2379 if (n)
2380 goto found;
2381 }
2382
2383 /* new hash chain */
2384 while (++h < FIB_TABLE_HASHSZ) {
2385 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2386 hlist_for_each_entry_rcu(tb, tb_node, head, tb_hlist) {
2387 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2388 if (n)
2389 goto found;
2390 }
2391 }
2392 return NULL;
2393
2394 found:
2395 iter->tb = tb;
2396 return n;
2397 }
2398
2399 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2400 __releases(RCU)
2401 {
2402 rcu_read_unlock();
2403 }
2404
2405 static void seq_indent(struct seq_file *seq, int n)
2406 {
2407 while (n-- > 0) seq_puts(seq, " ");
2408 }
2409
2410 static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
2411 {
2412 switch (s) {
2413 case RT_SCOPE_UNIVERSE: return "universe";
2414 case RT_SCOPE_SITE: return "site";
2415 case RT_SCOPE_LINK: return "link";
2416 case RT_SCOPE_HOST: return "host";
2417 case RT_SCOPE_NOWHERE: return "nowhere";
2418 default:
2419 snprintf(buf, len, "scope=%d", s);
2420 return buf;
2421 }
2422 }
2423
2424 static const char *const rtn_type_names[__RTN_MAX] = {
2425 [RTN_UNSPEC] = "UNSPEC",
2426 [RTN_UNICAST] = "UNICAST",
2427 [RTN_LOCAL] = "LOCAL",
2428 [RTN_BROADCAST] = "BROADCAST",
2429 [RTN_ANYCAST] = "ANYCAST",
2430 [RTN_MULTICAST] = "MULTICAST",
2431 [RTN_BLACKHOLE] = "BLACKHOLE",
2432 [RTN_UNREACHABLE] = "UNREACHABLE",
2433 [RTN_PROHIBIT] = "PROHIBIT",
2434 [RTN_THROW] = "THROW",
2435 [RTN_NAT] = "NAT",
2436 [RTN_XRESOLVE] = "XRESOLVE",
2437 };
2438
2439 static inline const char *rtn_type(char *buf, size_t len, unsigned t)
2440 {
2441 if (t < __RTN_MAX && rtn_type_names[t])
2442 return rtn_type_names[t];
2443 snprintf(buf, len, "type %u", t);
2444 return buf;
2445 }
2446
2447 /* Pretty print the trie */
2448 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2449 {
2450 const struct fib_trie_iter *iter = seq->private;
2451 struct node *n = v;
2452
2453 if (!node_parent_rcu(n))
2454 fib_table_print(seq, iter->tb);
2455
2456 if (IS_TNODE(n)) {
2457 struct tnode *tn = (struct tnode *) n;
2458 __be32 prf = htonl(mask_pfx(tn->key, tn->pos));
2459
2460 seq_indent(seq, iter->depth-1);
2461 seq_printf(seq, " +-- %pI4/%d %d %d %d\n",
2462 &prf, tn->pos, tn->bits, tn->full_children,
2463 tn->empty_children);
2464
2465 } else {
2466 struct leaf *l = (struct leaf *) n;
2467 struct leaf_info *li;
2468 struct hlist_node *node;
2469 __be32 val = htonl(l->key);
2470
2471 seq_indent(seq, iter->depth);
2472 seq_printf(seq, " |-- %pI4\n", &val);
2473
2474 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
2475 struct fib_alias *fa;
2476
2477 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2478 char buf1[32], buf2[32];
2479
2480 seq_indent(seq, iter->depth+1);
2481 seq_printf(seq, " /%d %s %s", li->plen,
2482 rtn_scope(buf1, sizeof(buf1),
2483 fa->fa_scope),
2484 rtn_type(buf2, sizeof(buf2),
2485 fa->fa_type));
2486 if (fa->fa_tos)
2487 seq_printf(seq, " tos=%d", fa->fa_tos);
2488 seq_putc(seq, '\n');
2489 }
2490 }
2491 }
2492
2493 return 0;
2494 }
2495
2496 static const struct seq_operations fib_trie_seq_ops = {
2497 .start = fib_trie_seq_start,
2498 .next = fib_trie_seq_next,
2499 .stop = fib_trie_seq_stop,
2500 .show = fib_trie_seq_show,
2501 };
2502
2503 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2504 {
2505 return seq_open_net(inode, file, &fib_trie_seq_ops,
2506 sizeof(struct fib_trie_iter));
2507 }
2508
2509 static const struct file_operations fib_trie_fops = {
2510 .owner = THIS_MODULE,
2511 .open = fib_trie_seq_open,
2512 .read = seq_read,
2513 .llseek = seq_lseek,
2514 .release = seq_release_net,
2515 };
2516
2517 struct fib_route_iter {
2518 struct seq_net_private p;
2519 struct trie *main_trie;
2520 loff_t pos;
2521 t_key key;
2522 };
2523
2524 static struct leaf *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
2525 {
2526 struct leaf *l = NULL;
2527 struct trie *t = iter->main_trie;
2528
2529 /* use cache location of last found key */
2530 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2531 pos -= iter->pos;
2532 else {
2533 iter->pos = 0;
2534 l = trie_firstleaf(t);
2535 }
2536
2537 while (l && pos-- > 0) {
2538 iter->pos++;
2539 l = trie_nextleaf(l);
2540 }
2541
2542 if (l)
2543 iter->key = pos; /* remember it */
2544 else
2545 iter->pos = 0; /* forget it */
2546
2547 return l;
2548 }
2549
2550 static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2551 __acquires(RCU)
2552 {
2553 struct fib_route_iter *iter = seq->private;
2554 struct fib_table *tb;
2555
2556 rcu_read_lock();
2557 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
2558 if (!tb)
2559 return NULL;
2560
2561 iter->main_trie = (struct trie *) tb->tb_data;
2562 if (*pos == 0)
2563 return SEQ_START_TOKEN;
2564 else
2565 return fib_route_get_idx(iter, *pos - 1);
2566 }
2567
2568 static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2569 {
2570 struct fib_route_iter *iter = seq->private;
2571 struct leaf *l = v;
2572
2573 ++*pos;
2574 if (v == SEQ_START_TOKEN) {
2575 iter->pos = 0;
2576 l = trie_firstleaf(iter->main_trie);
2577 } else {
2578 iter->pos++;
2579 l = trie_nextleaf(l);
2580 }
2581
2582 if (l)
2583 iter->key = l->key;
2584 else
2585 iter->pos = 0;
2586 return l;
2587 }
2588
2589 static void fib_route_seq_stop(struct seq_file *seq, void *v)
2590 __releases(RCU)
2591 {
2592 rcu_read_unlock();
2593 }
2594
2595 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2596 {
2597 static unsigned type2flags[RTN_MAX + 1] = {
2598 [7] = RTF_REJECT, [8] = RTF_REJECT,
2599 };
2600 unsigned flags = type2flags[type];
2601
2602 if (fi && fi->fib_nh->nh_gw)
2603 flags |= RTF_GATEWAY;
2604 if (mask == htonl(0xFFFFFFFF))
2605 flags |= RTF_HOST;
2606 flags |= RTF_UP;
2607 return flags;
2608 }
2609
2610 /*
2611 * This outputs /proc/net/route.
2612 * The format of the file is not supposed to be changed
2613 * and needs to be same as fib_hash output to avoid breaking
2614 * legacy utilities
2615 */
2616 static int fib_route_seq_show(struct seq_file *seq, void *v)
2617 {
2618 struct leaf *l = v;
2619 struct leaf_info *li;
2620 struct hlist_node *node;
2621
2622 if (v == SEQ_START_TOKEN) {
2623 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2624 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2625 "\tWindow\tIRTT");
2626 return 0;
2627 }
2628
2629 hlist_for_each_entry_rcu(li, node, &l->list, hlist) {
2630 struct fib_alias *fa;
2631 __be32 mask, prefix;
2632
2633 mask = inet_make_mask(li->plen);
2634 prefix = htonl(l->key);
2635
2636 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2637 const struct fib_info *fi = fa->fa_info;
2638 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2639 int len;
2640
2641 if (fa->fa_type == RTN_BROADCAST
2642 || fa->fa_type == RTN_MULTICAST)
2643 continue;
2644
2645 if (fi)
2646 seq_printf(seq,
2647 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
2648 "%d\t%08X\t%d\t%u\t%u%n",
2649 fi->fib_dev ? fi->fib_dev->name : "*",
2650 prefix,
2651 fi->fib_nh->nh_gw, flags, 0, 0,
2652 fi->fib_priority,
2653 mask,
2654 (fi->fib_advmss ?
2655 fi->fib_advmss + 40 : 0),
2656 fi->fib_window,
2657 fi->fib_rtt >> 3, &len);
2658 else
2659 seq_printf(seq,
2660 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
2661 "%d\t%08X\t%d\t%u\t%u%n",
2662 prefix, 0, flags, 0, 0, 0,
2663 mask, 0, 0, 0, &len);
2664
2665 seq_printf(seq, "%*s\n", 127 - len, "");
2666 }
2667 }
2668
2669 return 0;
2670 }
2671
2672 static const struct seq_operations fib_route_seq_ops = {
2673 .start = fib_route_seq_start,
2674 .next = fib_route_seq_next,
2675 .stop = fib_route_seq_stop,
2676 .show = fib_route_seq_show,
2677 };
2678
2679 static int fib_route_seq_open(struct inode *inode, struct file *file)
2680 {
2681 return seq_open_net(inode, file, &fib_route_seq_ops,
2682 sizeof(struct fib_route_iter));
2683 }
2684
2685 static const struct file_operations fib_route_fops = {
2686 .owner = THIS_MODULE,
2687 .open = fib_route_seq_open,
2688 .read = seq_read,
2689 .llseek = seq_lseek,
2690 .release = seq_release_net,
2691 };
2692
2693 int __net_init fib_proc_init(struct net *net)
2694 {
2695 if (!proc_net_fops_create(net, "fib_trie", S_IRUGO, &fib_trie_fops))
2696 goto out1;
2697
2698 if (!proc_net_fops_create(net, "fib_triestat", S_IRUGO,
2699 &fib_triestat_fops))
2700 goto out2;
2701
2702 if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_route_fops))
2703 goto out3;
2704
2705 return 0;
2706
2707 out3:
2708 proc_net_remove(net, "fib_triestat");
2709 out2:
2710 proc_net_remove(net, "fib_trie");
2711 out1:
2712 return -ENOMEM;
2713 }
2714
2715 void __net_exit fib_proc_exit(struct net *net)
2716 {
2717 proc_net_remove(net, "fib_trie");
2718 proc_net_remove(net, "fib_triestat");
2719 proc_net_remove(net, "route");
2720 }
2721
2722 #endif /* CONFIG_PROC_FS */