2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
15 * This work is based on the LPC-trie which is originally described in:
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
26 * Code from fib_hash has been reused which includes the following header:
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
33 * IPv4 FIB: lookup engine and maintenance routines.
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
43 * Substantial contributions to this work comes from:
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
51 #define VERSION "0.409"
53 #include <asm/uaccess.h>
54 #include <linux/bitops.h>
55 #include <linux/types.h>
56 #include <linux/kernel.h>
58 #include <linux/string.h>
59 #include <linux/socket.h>
60 #include <linux/sockios.h>
61 #include <linux/errno.h>
63 #include <linux/inet.h>
64 #include <linux/inetdevice.h>
65 #include <linux/netdevice.h>
66 #include <linux/if_arp.h>
67 #include <linux/proc_fs.h>
68 #include <linux/rcupdate.h>
69 #include <linux/skbuff.h>
70 #include <linux/netlink.h>
71 #include <linux/init.h>
72 #include <linux/list.h>
73 #include <linux/slab.h>
74 #include <linux/export.h>
75 #include <net/net_namespace.h>
77 #include <net/protocol.h>
78 #include <net/route.h>
81 #include <net/ip_fib.h>
82 #include "fib_lookup.h"
84 #define MAX_STAT_DEPTH 32
86 #define KEYLENGTH (8*sizeof(t_key))
88 typedef unsigned int t_key;
90 #define IS_TNODE(n) ((n)->bits)
91 #define IS_LEAF(n) (!(n)->bits)
93 #define get_index(_key, _kv) (((_key) ^ (_kv)->key) >> (_kv)->pos)
97 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
98 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
99 struct tnode __rcu *parent;
102 /* The fields in this struct are valid if bits > 0 (TNODE) */
104 unsigned int full_children; /* KEYLENGTH bits needed */
105 unsigned int empty_children; /* KEYLENGTH bits needed */
106 struct tnode __rcu *child[0];
108 /* This list pointer if valid if bits == 0 (LEAF) */
109 struct hlist_head list;
114 struct hlist_node hlist;
116 u32 mask_plen; /* ntohl(inet_make_mask(plen)) */
117 struct list_head falh;
121 #ifdef CONFIG_IP_FIB_TRIE_STATS
122 struct trie_use_stats {
124 unsigned int backtrack;
125 unsigned int semantic_match_passed;
126 unsigned int semantic_match_miss;
127 unsigned int null_node_hit;
128 unsigned int resize_node_skipped;
133 unsigned int totdepth;
134 unsigned int maxdepth;
137 unsigned int nullpointers;
138 unsigned int prefixes;
139 unsigned int nodesizes[MAX_STAT_DEPTH];
143 struct tnode __rcu *trie;
144 #ifdef CONFIG_IP_FIB_TRIE_STATS
145 struct trie_use_stats __percpu *stats;
149 static void tnode_put_child_reorg(struct tnode *tn, unsigned long i,
150 struct tnode *n, int wasfull);
151 static struct tnode *resize(struct trie *t, struct tnode *tn);
152 static struct tnode *inflate(struct trie *t, struct tnode *tn);
153 static struct tnode *halve(struct trie *t, struct tnode *tn);
154 /* tnodes to free after resize(); protected by RTNL */
155 static struct callback_head *tnode_free_head;
156 static size_t tnode_free_size;
159 * synchronize_rcu after call_rcu for that many pages; it should be especially
160 * useful before resizing the root node with PREEMPT_NONE configs; the value was
161 * obtained experimentally, aiming to avoid visible slowdown.
163 static const int sync_pages = 128;
165 static struct kmem_cache *fn_alias_kmem __read_mostly;
166 static struct kmem_cache *trie_leaf_kmem __read_mostly;
168 /* caller must hold RTNL */
169 #define node_parent(n) rtnl_dereference((n)->parent)
171 /* caller must hold RCU read lock or RTNL */
172 #define node_parent_rcu(n) rcu_dereference_rtnl((n)->parent)
174 /* wrapper for rcu_assign_pointer */
175 static inline void node_set_parent(struct tnode *n, struct tnode *tp)
178 rcu_assign_pointer(n->parent, tp);
181 #define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER((n)->parent, p)
183 /* This provides us with the number of children in this node, in the case of a
184 * leaf this will return 0 meaning none of the children are accessible.
186 static inline unsigned long tnode_child_length(const struct tnode *tn)
188 return (1ul << tn->bits) & ~(1ul);
191 /* caller must hold RTNL */
192 static inline struct tnode *tnode_get_child(const struct tnode *tn,
195 BUG_ON(i >= tnode_child_length(tn));
197 return rtnl_dereference(tn->child[i]);
200 /* caller must hold RCU read lock or RTNL */
201 static inline struct tnode *tnode_get_child_rcu(const struct tnode *tn,
204 BUG_ON(i >= tnode_child_length(tn));
206 return rcu_dereference_rtnl(tn->child[i]);
209 /* To understand this stuff, an understanding of keys and all their bits is
210 * necessary. Every node in the trie has a key associated with it, but not
211 * all of the bits in that key are significant.
213 * Consider a node 'n' and its parent 'tp'.
215 * If n is a leaf, every bit in its key is significant. Its presence is
216 * necessitated by path compression, since during a tree traversal (when
217 * searching for a leaf - unless we are doing an insertion) we will completely
218 * ignore all skipped bits we encounter. Thus we need to verify, at the end of
219 * a potentially successful search, that we have indeed been walking the
222 * Note that we can never "miss" the correct key in the tree if present by
223 * following the wrong path. Path compression ensures that segments of the key
224 * that are the same for all keys with a given prefix are skipped, but the
225 * skipped part *is* identical for each node in the subtrie below the skipped
226 * bit! trie_insert() in this implementation takes care of that.
228 * if n is an internal node - a 'tnode' here, the various parts of its key
229 * have many different meanings.
232 * _________________________________________________________________
233 * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
234 * -----------------------------------------------------------------
235 * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
237 * _________________________________________________________________
238 * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
239 * -----------------------------------------------------------------
240 * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
247 * First, let's just ignore the bits that come before the parent tp, that is
248 * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
249 * point we do not use them for anything.
251 * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
252 * index into the parent's child array. That is, they will be used to find
253 * 'n' among tp's children.
255 * The bits from (n->pos + n->bits) to (tn->pos - 1) - "S" - are skipped bits
258 * All the bits we have seen so far are significant to the node n. The rest
259 * of the bits are really not needed or indeed known in n->key.
261 * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
262 * n's child array, and will of course be different for each child.
264 * The rest of the bits, from 0 to (n->pos + n->bits), are completely unknown
268 static const int halve_threshold = 25;
269 static const int inflate_threshold = 50;
270 static const int halve_threshold_root = 15;
271 static const int inflate_threshold_root = 30;
273 static void __alias_free_mem(struct rcu_head *head)
275 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
276 kmem_cache_free(fn_alias_kmem, fa);
279 static inline void alias_free_mem_rcu(struct fib_alias *fa)
281 call_rcu(&fa->rcu, __alias_free_mem);
284 #define TNODE_KMALLOC_MAX \
285 ilog2((PAGE_SIZE - sizeof(struct tnode)) / sizeof(struct tnode *))
287 static void __node_free_rcu(struct rcu_head *head)
289 struct tnode *n = container_of(head, struct tnode, rcu);
292 kmem_cache_free(trie_leaf_kmem, n);
293 else if (n->bits <= TNODE_KMALLOC_MAX)
299 #define node_free(n) call_rcu(&n->rcu, __node_free_rcu)
301 static inline void free_leaf_info(struct leaf_info *leaf)
303 kfree_rcu(leaf, rcu);
306 static struct tnode *tnode_alloc(size_t size)
308 if (size <= PAGE_SIZE)
309 return kzalloc(size, GFP_KERNEL);
311 return vzalloc(size);
314 static void tnode_free_safe(struct tnode *tn)
317 tn->rcu.next = tnode_free_head;
318 tnode_free_head = &tn->rcu;
321 static void tnode_free_flush(void)
323 struct callback_head *head;
325 while ((head = tnode_free_head)) {
326 struct tnode *tn = container_of(head, struct tnode, rcu);
328 tnode_free_head = head->next;
329 tnode_free_size += offsetof(struct tnode, child[1 << tn->bits]);
334 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
340 static struct tnode *leaf_new(t_key key)
342 struct tnode *l = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
345 /* set key and pos to reflect full key value
346 * any trailing zeros in the key should be ignored
347 * as the nodes are searched
351 /* set bits to 0 indicating we are not a tnode */
354 INIT_HLIST_HEAD(&l->list);
359 static struct leaf_info *leaf_info_new(int plen)
361 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
364 li->mask_plen = ntohl(inet_make_mask(plen));
365 INIT_LIST_HEAD(&li->falh);
370 static struct tnode *tnode_new(t_key key, int pos, int bits)
372 size_t sz = offsetof(struct tnode, child[1 << bits]);
373 struct tnode *tn = tnode_alloc(sz);
374 unsigned int shift = pos + bits;
376 /* verify bits and pos their msb bits clear and values are valid */
377 BUG_ON(!bits || (shift > KEYLENGTH));
383 tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
384 tn->full_children = 0;
385 tn->empty_children = 1<<bits;
388 pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
389 sizeof(struct tnode *) << bits);
393 /* Check whether a tnode 'n' is "full", i.e. it is an internal node
394 * and no bits are skipped. See discussion in dyntree paper p. 6
396 static inline int tnode_full(const struct tnode *tn, const struct tnode *n)
398 return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
401 static inline void put_child(struct tnode *tn, unsigned long i,
404 tnode_put_child_reorg(tn, i, n, -1);
408 * Add a child at position i overwriting the old value.
409 * Update the value of full_children and empty_children.
412 static void tnode_put_child_reorg(struct tnode *tn, unsigned long i,
413 struct tnode *n, int wasfull)
415 struct tnode *chi = rtnl_dereference(tn->child[i]);
418 BUG_ON(i >= tnode_child_length(tn));
420 /* update emptyChildren */
421 if (n == NULL && chi != NULL)
422 tn->empty_children++;
423 else if (n != NULL && chi == NULL)
424 tn->empty_children--;
426 /* update fullChildren */
428 wasfull = tnode_full(tn, chi);
430 isfull = tnode_full(tn, n);
431 if (wasfull && !isfull)
433 else if (!wasfull && isfull)
436 node_set_parent(n, tn);
438 rcu_assign_pointer(tn->child[i], n);
441 static void put_child_root(struct tnode *tp, struct trie *t,
442 t_key key, struct tnode *n)
445 put_child(tp, get_index(key, tp), n);
447 rcu_assign_pointer(t->trie, n);
451 static struct tnode *resize(struct trie *t, struct tnode *tn)
453 struct tnode *old_tn, *n = NULL;
454 int inflate_threshold_use;
455 int halve_threshold_use;
461 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
462 tn, inflate_threshold, halve_threshold);
465 if (tn->empty_children > (tnode_child_length(tn) - 1))
469 if (tn->empty_children == (tnode_child_length(tn) - 1))
472 * Double as long as the resulting node has a number of
473 * nonempty nodes that are above the threshold.
477 * From "Implementing a dynamic compressed trie" by Stefan Nilsson of
478 * the Helsinki University of Technology and Matti Tikkanen of Nokia
479 * Telecommunications, page 6:
480 * "A node is doubled if the ratio of non-empty children to all
481 * children in the *doubled* node is at least 'high'."
483 * 'high' in this instance is the variable 'inflate_threshold'. It
484 * is expressed as a percentage, so we multiply it with
485 * tnode_child_length() and instead of multiplying by 2 (since the
486 * child array will be doubled by inflate()) and multiplying
487 * the left-hand side by 100 (to handle the percentage thing) we
488 * multiply the left-hand side by 50.
490 * The left-hand side may look a bit weird: tnode_child_length(tn)
491 * - tn->empty_children is of course the number of non-null children
492 * in the current node. tn->full_children is the number of "full"
493 * children, that is non-null tnodes with a skip value of 0.
494 * All of those will be doubled in the resulting inflated tnode, so
495 * we just count them one extra time here.
497 * A clearer way to write this would be:
499 * to_be_doubled = tn->full_children;
500 * not_to_be_doubled = tnode_child_length(tn) - tn->empty_children -
503 * new_child_length = tnode_child_length(tn) * 2;
505 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
507 * if (new_fill_factor >= inflate_threshold)
509 * ...and so on, tho it would mess up the while () loop.
512 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
516 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
517 * inflate_threshold * new_child_length
519 * expand not_to_be_doubled and to_be_doubled, and shorten:
520 * 100 * (tnode_child_length(tn) - tn->empty_children +
521 * tn->full_children) >= inflate_threshold * new_child_length
523 * expand new_child_length:
524 * 100 * (tnode_child_length(tn) - tn->empty_children +
525 * tn->full_children) >=
526 * inflate_threshold * tnode_child_length(tn) * 2
529 * 50 * (tn->full_children + tnode_child_length(tn) -
530 * tn->empty_children) >= inflate_threshold *
531 * tnode_child_length(tn)
535 /* Keep root node larger */
537 if (!node_parent(tn)) {
538 inflate_threshold_use = inflate_threshold_root;
539 halve_threshold_use = halve_threshold_root;
541 inflate_threshold_use = inflate_threshold;
542 halve_threshold_use = halve_threshold;
546 while ((tn->full_children > 0 && max_work-- &&
547 50 * (tn->full_children + tnode_child_length(tn)
548 - tn->empty_children)
549 >= inflate_threshold_use * tnode_child_length(tn))) {
556 #ifdef CONFIG_IP_FIB_TRIE_STATS
557 this_cpu_inc(t->stats->resize_node_skipped);
563 /* Return if at least one inflate is run */
564 if (max_work != MAX_WORK)
568 * Halve as long as the number of empty children in this
569 * node is above threshold.
573 while (tn->bits > 1 && max_work-- &&
574 100 * (tnode_child_length(tn) - tn->empty_children) <
575 halve_threshold_use * tnode_child_length(tn)) {
581 #ifdef CONFIG_IP_FIB_TRIE_STATS
582 this_cpu_inc(t->stats->resize_node_skipped);
589 /* Only one child remains */
590 if (tn->empty_children == (tnode_child_length(tn) - 1)) {
593 for (i = tnode_child_length(tn); !n && i;)
594 n = tnode_get_child(tn, --i);
596 /* compress one level */
597 node_set_parent(n, NULL);
605 static void tnode_clean_free(struct tnode *tn)
607 struct tnode *tofree;
610 for (i = 0; i < tnode_child_length(tn); i++) {
611 tofree = tnode_get_child(tn, i);
618 static struct tnode *inflate(struct trie *t, struct tnode *oldtnode)
620 unsigned long olen = tnode_child_length(oldtnode);
625 pr_debug("In inflate\n");
627 tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
630 return ERR_PTR(-ENOMEM);
633 * Preallocate and store tnodes before the actual work so we
634 * don't get into an inconsistent state if memory allocation
635 * fails. In case of failure we return the oldnode and inflate
636 * of tnode is ignored.
638 for (i = 0, m = 1u << tn->pos; i < olen; i++) {
639 struct tnode *inode = tnode_get_child(oldtnode, i);
641 if (tnode_full(oldtnode, inode) && (inode->bits > 1)) {
642 struct tnode *left, *right;
644 left = tnode_new(inode->key & ~m, inode->pos,
649 right = tnode_new(inode->key | m, inode->pos,
657 put_child(tn, 2*i, left);
658 put_child(tn, 2*i+1, right);
662 for (i = 0; i < olen; i++) {
663 struct tnode *inode = tnode_get_child(oldtnode, i);
664 struct tnode *left, *right;
665 unsigned long size, j;
671 /* A leaf or an internal node with skipped bits */
672 if (!tnode_full(oldtnode, inode)) {
673 put_child(tn, get_index(inode->key, tn), inode);
677 /* An internal node with two children */
678 if (inode->bits == 1) {
679 put_child(tn, 2*i, rtnl_dereference(inode->child[0]));
680 put_child(tn, 2*i+1, rtnl_dereference(inode->child[1]));
682 tnode_free_safe(inode);
686 /* An internal node with more than two children */
688 /* We will replace this node 'inode' with two new
689 * ones, 'left' and 'right', each with half of the
690 * original children. The two new nodes will have
691 * a position one bit further down the key and this
692 * means that the "significant" part of their keys
693 * (see the discussion near the top of this file)
694 * will differ by one bit, which will be "0" in
695 * left's key and "1" in right's key. Since we are
696 * moving the key position by one step, the bit that
697 * we are moving away from - the bit at position
698 * (inode->pos) - is the one that will differ between
699 * left and right. So... we synthesize that bit in the
701 * The mask 'm' below will be a single "one" bit at
702 * the position (inode->pos)
705 /* Use the old key, but set the new significant
709 left = tnode_get_child(tn, 2*i);
710 put_child(tn, 2*i, NULL);
714 right = tnode_get_child(tn, 2*i+1);
715 put_child(tn, 2*i+1, NULL);
719 size = tnode_child_length(left);
720 for (j = 0; j < size; j++) {
721 put_child(left, j, rtnl_dereference(inode->child[j]));
722 put_child(right, j, rtnl_dereference(inode->child[j + size]));
724 put_child(tn, 2*i, resize(t, left));
725 put_child(tn, 2*i+1, resize(t, right));
727 tnode_free_safe(inode);
729 tnode_free_safe(oldtnode);
732 tnode_clean_free(tn);
733 return ERR_PTR(-ENOMEM);
736 static struct tnode *halve(struct trie *t, struct tnode *oldtnode)
738 unsigned long olen = tnode_child_length(oldtnode);
739 struct tnode *tn, *left, *right;
742 pr_debug("In halve\n");
744 tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
747 return ERR_PTR(-ENOMEM);
750 * Preallocate and store tnodes before the actual work so we
751 * don't get into an inconsistent state if memory allocation
752 * fails. In case of failure we return the oldnode and halve
753 * of tnode is ignored.
756 for (i = 0; i < olen; i += 2) {
757 left = tnode_get_child(oldtnode, i);
758 right = tnode_get_child(oldtnode, i+1);
760 /* Two nonempty children */
764 newn = tnode_new(left->key, oldtnode->pos, 1);
769 put_child(tn, i/2, newn);
774 for (i = 0; i < olen; i += 2) {
775 struct tnode *newBinNode;
777 left = tnode_get_child(oldtnode, i);
778 right = tnode_get_child(oldtnode, i+1);
780 /* At least one of the children is empty */
782 if (right == NULL) /* Both are empty */
784 put_child(tn, i/2, right);
789 put_child(tn, i/2, left);
793 /* Two nonempty children */
794 newBinNode = tnode_get_child(tn, i/2);
795 put_child(tn, i/2, NULL);
796 put_child(newBinNode, 0, left);
797 put_child(newBinNode, 1, right);
798 put_child(tn, i/2, resize(t, newBinNode));
800 tnode_free_safe(oldtnode);
803 tnode_clean_free(tn);
804 return ERR_PTR(-ENOMEM);
807 /* readside must use rcu_read_lock currently dump routines
808 via get_fa_head and dump */
810 static struct leaf_info *find_leaf_info(struct tnode *l, int plen)
812 struct hlist_head *head = &l->list;
813 struct leaf_info *li;
815 hlist_for_each_entry_rcu(li, head, hlist)
816 if (li->plen == plen)
822 static inline struct list_head *get_fa_head(struct tnode *l, int plen)
824 struct leaf_info *li = find_leaf_info(l, plen);
832 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new)
834 struct leaf_info *li = NULL, *last = NULL;
836 if (hlist_empty(head)) {
837 hlist_add_head_rcu(&new->hlist, head);
839 hlist_for_each_entry(li, head, hlist) {
840 if (new->plen > li->plen)
846 hlist_add_behind_rcu(&new->hlist, &last->hlist);
848 hlist_add_before_rcu(&new->hlist, &li->hlist);
852 /* rcu_read_lock needs to be hold by caller from readside */
853 static struct tnode *fib_find_node(struct trie *t, u32 key)
855 struct tnode *n = rcu_dereference_rtnl(t->trie);
858 unsigned long index = get_index(key, n);
860 /* This bit of code is a bit tricky but it combines multiple
861 * checks into a single check. The prefix consists of the
862 * prefix plus zeros for the bits in the cindex. The index
863 * is the difference between the key and this value. From
864 * this we can actually derive several pieces of data.
865 * if !(index >> bits)
866 * we know the value is cindex
868 * we have a mismatch in skip bits and failed
870 if (index >> n->bits)
873 /* we have found a leaf. Prefixes have already been compared */
877 n = rcu_dereference_rtnl(n->child[index]);
883 static void trie_rebalance(struct trie *t, struct tnode *tn)
891 while (tn != NULL && (tp = node_parent(tn)) != NULL) {
892 cindex = get_index(key, tp);
893 wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
896 tnode_put_child_reorg(tp, cindex, tn, wasfull);
898 tp = node_parent(tn);
900 rcu_assign_pointer(t->trie, tn);
908 /* Handle last (top) tnode */
912 rcu_assign_pointer(t->trie, tn);
916 /* only used from updater-side */
918 static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
920 struct list_head *fa_head = NULL;
921 struct tnode *l, *n, *tp = NULL;
922 struct leaf_info *li;
924 li = leaf_info_new(plen);
929 n = rtnl_dereference(t->trie);
931 /* If we point to NULL, stop. Either the tree is empty and we should
932 * just put a new leaf in if, or we have reached an empty child slot,
933 * and we should just put our new leaf in that.
935 * If we hit a node with a key that does't match then we should stop
936 * and create a new tnode to replace that node and insert ourselves
937 * and the other node into the new tnode.
940 unsigned long index = get_index(key, n);
942 /* This bit of code is a bit tricky but it combines multiple
943 * checks into a single check. The prefix consists of the
944 * prefix plus zeros for the "bits" in the prefix. The index
945 * is the difference between the key and this value. From
946 * this we can actually derive several pieces of data.
947 * if !(index >> bits)
948 * we know the value is child index
950 * we have a mismatch in skip bits and failed
952 if (index >> n->bits)
955 /* we have found a leaf. Prefixes have already been compared */
957 /* Case 1: n is a leaf, and prefixes match*/
958 insert_leaf_info(&n->list, li);
963 n = rcu_dereference_rtnl(n->child[index]);
972 insert_leaf_info(&l->list, li);
974 /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
976 * Add a new tnode here
977 * first tnode need some special handling
978 * leaves us in position for handling as case 3
983 tn = tnode_new(key, __fls(key ^ n->key), 1);
990 /* initialize routes out of node */
991 NODE_INIT_PARENT(tn, tp);
992 put_child(tn, get_index(key, tn) ^ 1, n);
994 /* start adding routes into the node */
995 put_child_root(tp, t, key, tn);
996 node_set_parent(n, tn);
998 /* parent now has a NULL spot where the leaf can go */
1002 /* Case 3: n is NULL, and will just insert a new leaf */
1004 NODE_INIT_PARENT(l, tp);
1005 put_child(tp, get_index(key, tp), l);
1006 trie_rebalance(t, tp);
1008 rcu_assign_pointer(t->trie, l);
1015 * Caller must hold RTNL.
1017 int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1019 struct trie *t = (struct trie *) tb->tb_data;
1020 struct fib_alias *fa, *new_fa;
1021 struct list_head *fa_head = NULL;
1022 struct fib_info *fi;
1023 int plen = cfg->fc_dst_len;
1024 u8 tos = cfg->fc_tos;
1032 key = ntohl(cfg->fc_dst);
1034 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1036 mask = ntohl(inet_make_mask(plen));
1043 fi = fib_create_info(cfg);
1049 l = fib_find_node(t, key);
1053 fa_head = get_fa_head(l, plen);
1054 fa = fib_find_alias(fa_head, tos, fi->fib_priority);
1057 /* Now fa, if non-NULL, points to the first fib alias
1058 * with the same keys [prefix,tos,priority], if such key already
1059 * exists or to the node before which we will insert new one.
1061 * If fa is NULL, we will need to allocate a new one and
1062 * insert to the head of f.
1064 * If f is NULL, no fib node matched the destination key
1065 * and we need to allocate a new one of those as well.
1068 if (fa && fa->fa_tos == tos &&
1069 fa->fa_info->fib_priority == fi->fib_priority) {
1070 struct fib_alias *fa_first, *fa_match;
1073 if (cfg->fc_nlflags & NLM_F_EXCL)
1077 * 1. Find exact match for type, scope, fib_info to avoid
1079 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1083 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1084 list_for_each_entry_continue(fa, fa_head, fa_list) {
1085 if (fa->fa_tos != tos)
1087 if (fa->fa_info->fib_priority != fi->fib_priority)
1089 if (fa->fa_type == cfg->fc_type &&
1090 fa->fa_info == fi) {
1096 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1097 struct fib_info *fi_drop;
1107 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1111 fi_drop = fa->fa_info;
1112 new_fa->fa_tos = fa->fa_tos;
1113 new_fa->fa_info = fi;
1114 new_fa->fa_type = cfg->fc_type;
1115 state = fa->fa_state;
1116 new_fa->fa_state = state & ~FA_S_ACCESSED;
1118 list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1119 alias_free_mem_rcu(fa);
1121 fib_release_info(fi_drop);
1122 if (state & FA_S_ACCESSED)
1123 rt_cache_flush(cfg->fc_nlinfo.nl_net);
1124 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1125 tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
1129 /* Error if we find a perfect match which
1130 * uses the same scope, type, and nexthop
1136 if (!(cfg->fc_nlflags & NLM_F_APPEND))
1140 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1144 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1148 new_fa->fa_info = fi;
1149 new_fa->fa_tos = tos;
1150 new_fa->fa_type = cfg->fc_type;
1151 new_fa->fa_state = 0;
1153 * Insert new entry to the list.
1157 fa_head = fib_insert_node(t, key, plen);
1158 if (unlikely(!fa_head)) {
1160 goto out_free_new_fa;
1165 tb->tb_num_default++;
1167 list_add_tail_rcu(&new_fa->fa_list,
1168 (fa ? &fa->fa_list : fa_head));
1170 rt_cache_flush(cfg->fc_nlinfo.nl_net);
1171 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id,
1172 &cfg->fc_nlinfo, 0);
1177 kmem_cache_free(fn_alias_kmem, new_fa);
1179 fib_release_info(fi);
1184 static inline t_key prefix_mismatch(t_key key, struct tnode *n)
1186 t_key prefix = n->key;
1188 return (key ^ prefix) & (prefix | -prefix);
1191 /* should be called with rcu_read_lock */
1192 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
1193 struct fib_result *res, int fib_flags)
1195 struct trie *t = (struct trie *)tb->tb_data;
1196 #ifdef CONFIG_IP_FIB_TRIE_STATS
1197 struct trie_use_stats __percpu *stats = t->stats;
1199 const t_key key = ntohl(flp->daddr);
1200 struct tnode *n, *pn;
1201 struct leaf_info *li;
1204 n = rcu_dereference(t->trie);
1208 #ifdef CONFIG_IP_FIB_TRIE_STATS
1209 this_cpu_inc(stats->gets);
1215 /* Step 1: Travel to the longest prefix match in the trie */
1217 unsigned long index = get_index(key, n);
1219 /* This bit of code is a bit tricky but it combines multiple
1220 * checks into a single check. The prefix consists of the
1221 * prefix plus zeros for the "bits" in the prefix. The index
1222 * is the difference between the key and this value. From
1223 * this we can actually derive several pieces of data.
1224 * if !(index >> bits)
1225 * we know the value is child index
1227 * we have a mismatch in skip bits and failed
1229 if (index >> n->bits)
1232 /* we have found a leaf. Prefixes have already been compared */
1236 /* only record pn and cindex if we are going to be chopping
1237 * bits later. Otherwise we are just wasting cycles.
1244 n = rcu_dereference(n->child[index]);
1249 /* Step 2: Sort out leaves and begin backtracing for longest prefix */
1251 /* record the pointer where our next node pointer is stored */
1252 struct tnode __rcu **cptr = n->child;
1254 /* This test verifies that none of the bits that differ
1255 * between the key and the prefix exist in the region of
1256 * the lsb and higher in the prefix.
1258 if (unlikely(prefix_mismatch(key, n)))
1261 /* exit out and process leaf */
1262 if (unlikely(IS_LEAF(n)))
1265 /* Don't bother recording parent info. Since we are in
1266 * prefix match mode we will have to come back to wherever
1267 * we started this traversal anyway
1270 while ((n = rcu_dereference(*cptr)) == NULL) {
1272 #ifdef CONFIG_IP_FIB_TRIE_STATS
1274 this_cpu_inc(stats->null_node_hit);
1276 /* If we are at cindex 0 there are no more bits for
1277 * us to strip at this level so we must ascend back
1278 * up one level to see if there are any more bits to
1279 * be stripped there.
1282 t_key pkey = pn->key;
1284 pn = node_parent_rcu(pn);
1287 #ifdef CONFIG_IP_FIB_TRIE_STATS
1288 this_cpu_inc(stats->backtrack);
1290 /* Get Child's index */
1291 cindex = get_index(pkey, pn);
1294 /* strip the least significant bit from the cindex */
1295 cindex &= cindex - 1;
1297 /* grab pointer for next child node */
1298 cptr = &pn->child[cindex];
1303 /* Step 3: Process the leaf, if that fails fall back to backtracing */
1304 hlist_for_each_entry_rcu(li, &n->list, hlist) {
1305 struct fib_alias *fa;
1307 if ((key ^ n->key) & li->mask_plen)
1310 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
1311 struct fib_info *fi = fa->fa_info;
1314 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1318 if (fa->fa_info->fib_scope < flp->flowi4_scope)
1320 fib_alias_accessed(fa);
1321 err = fib_props[fa->fa_type].error;
1322 if (unlikely(err < 0)) {
1323 #ifdef CONFIG_IP_FIB_TRIE_STATS
1324 this_cpu_inc(stats->semantic_match_passed);
1328 if (fi->fib_flags & RTNH_F_DEAD)
1330 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1331 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1333 if (nh->nh_flags & RTNH_F_DEAD)
1335 if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
1338 if (!(fib_flags & FIB_LOOKUP_NOREF))
1339 atomic_inc(&fi->fib_clntref);
1341 res->prefixlen = li->plen;
1342 res->nh_sel = nhsel;
1343 res->type = fa->fa_type;
1344 res->scope = fi->fib_scope;
1347 res->fa_head = &li->falh;
1348 #ifdef CONFIG_IP_FIB_TRIE_STATS
1349 this_cpu_inc(stats->semantic_match_passed);
1355 #ifdef CONFIG_IP_FIB_TRIE_STATS
1356 this_cpu_inc(stats->semantic_match_miss);
1361 EXPORT_SYMBOL_GPL(fib_table_lookup);
1364 * Remove the leaf and return parent.
1366 static void trie_leaf_remove(struct trie *t, struct tnode *l)
1368 struct tnode *tp = node_parent(l);
1370 pr_debug("entering trie_leaf_remove(%p)\n", l);
1373 put_child(tp, get_index(l->key, tp), NULL);
1374 trie_rebalance(t, tp);
1376 RCU_INIT_POINTER(t->trie, NULL);
1383 * Caller must hold RTNL.
1385 int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
1387 struct trie *t = (struct trie *) tb->tb_data;
1389 int plen = cfg->fc_dst_len;
1390 u8 tos = cfg->fc_tos;
1391 struct fib_alias *fa, *fa_to_delete;
1392 struct list_head *fa_head;
1394 struct leaf_info *li;
1399 key = ntohl(cfg->fc_dst);
1400 mask = ntohl(inet_make_mask(plen));
1406 l = fib_find_node(t, key);
1411 li = find_leaf_info(l, plen);
1416 fa_head = &li->falh;
1417 fa = fib_find_alias(fa_head, tos, 0);
1422 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1424 fa_to_delete = NULL;
1425 fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
1426 list_for_each_entry_continue(fa, fa_head, fa_list) {
1427 struct fib_info *fi = fa->fa_info;
1429 if (fa->fa_tos != tos)
1432 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1433 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1434 fa->fa_info->fib_scope == cfg->fc_scope) &&
1435 (!cfg->fc_prefsrc ||
1436 fi->fib_prefsrc == cfg->fc_prefsrc) &&
1437 (!cfg->fc_protocol ||
1438 fi->fib_protocol == cfg->fc_protocol) &&
1439 fib_nh_match(cfg, fi) == 0) {
1449 rtmsg_fib(RTM_DELROUTE, htonl(key), fa, plen, tb->tb_id,
1450 &cfg->fc_nlinfo, 0);
1452 list_del_rcu(&fa->fa_list);
1455 tb->tb_num_default--;
1457 if (list_empty(fa_head)) {
1458 hlist_del_rcu(&li->hlist);
1462 if (hlist_empty(&l->list))
1463 trie_leaf_remove(t, l);
1465 if (fa->fa_state & FA_S_ACCESSED)
1466 rt_cache_flush(cfg->fc_nlinfo.nl_net);
1468 fib_release_info(fa->fa_info);
1469 alias_free_mem_rcu(fa);
1473 static int trie_flush_list(struct list_head *head)
1475 struct fib_alias *fa, *fa_node;
1478 list_for_each_entry_safe(fa, fa_node, head, fa_list) {
1479 struct fib_info *fi = fa->fa_info;
1481 if (fi && (fi->fib_flags & RTNH_F_DEAD)) {
1482 list_del_rcu(&fa->fa_list);
1483 fib_release_info(fa->fa_info);
1484 alias_free_mem_rcu(fa);
1491 static int trie_flush_leaf(struct tnode *l)
1494 struct hlist_head *lih = &l->list;
1495 struct hlist_node *tmp;
1496 struct leaf_info *li = NULL;
1498 hlist_for_each_entry_safe(li, tmp, lih, hlist) {
1499 found += trie_flush_list(&li->falh);
1501 if (list_empty(&li->falh)) {
1502 hlist_del_rcu(&li->hlist);
1510 * Scan for the next right leaf starting at node p->child[idx]
1511 * Since we have back pointer, no recursion necessary.
1513 static struct tnode *leaf_walk_rcu(struct tnode *p, struct tnode *c)
1516 unsigned long idx = c ? idx = get_index(c->key, p) + 1 : 0;
1518 while (idx < tnode_child_length(p)) {
1519 c = tnode_get_child_rcu(p, idx++);
1526 /* Rescan start scanning in new node */
1531 /* Node empty, walk back up to parent */
1533 } while ((p = node_parent_rcu(c)) != NULL);
1535 return NULL; /* Root of trie */
1538 static struct tnode *trie_firstleaf(struct trie *t)
1540 struct tnode *n = rcu_dereference_rtnl(t->trie);
1545 if (IS_LEAF(n)) /* trie is just a leaf */
1548 return leaf_walk_rcu(n, NULL);
1551 static struct tnode *trie_nextleaf(struct tnode *l)
1553 struct tnode *p = node_parent_rcu(l);
1556 return NULL; /* trie with just one leaf */
1558 return leaf_walk_rcu(p, l);
1561 static struct tnode *trie_leafindex(struct trie *t, int index)
1563 struct tnode *l = trie_firstleaf(t);
1565 while (l && index-- > 0)
1566 l = trie_nextleaf(l);
1573 * Caller must hold RTNL.
1575 int fib_table_flush(struct fib_table *tb)
1577 struct trie *t = (struct trie *) tb->tb_data;
1578 struct tnode *l, *ll = NULL;
1581 for (l = trie_firstleaf(t); l; l = trie_nextleaf(l)) {
1582 found += trie_flush_leaf(l);
1584 if (ll && hlist_empty(&ll->list))
1585 trie_leaf_remove(t, ll);
1589 if (ll && hlist_empty(&ll->list))
1590 trie_leaf_remove(t, ll);
1592 pr_debug("trie_flush found=%d\n", found);
1596 void fib_free_table(struct fib_table *tb)
1598 #ifdef CONFIG_IP_FIB_TRIE_STATS
1599 struct trie *t = (struct trie *)tb->tb_data;
1601 free_percpu(t->stats);
1602 #endif /* CONFIG_IP_FIB_TRIE_STATS */
1606 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
1607 struct fib_table *tb,
1608 struct sk_buff *skb, struct netlink_callback *cb)
1611 struct fib_alias *fa;
1612 __be32 xkey = htonl(key);
1617 /* rcu_read_lock is hold by caller */
1619 list_for_each_entry_rcu(fa, fah, fa_list) {
1625 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
1633 fa->fa_info, NLM_F_MULTI) < 0) {
1643 static int fn_trie_dump_leaf(struct tnode *l, struct fib_table *tb,
1644 struct sk_buff *skb, struct netlink_callback *cb)
1646 struct leaf_info *li;
1652 /* rcu_read_lock is hold by caller */
1653 hlist_for_each_entry_rcu(li, &l->list, hlist) {
1662 if (list_empty(&li->falh))
1665 if (fn_trie_dump_fa(l->key, li->plen, &li->falh, tb, skb, cb) < 0) {
1676 int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
1677 struct netlink_callback *cb)
1680 struct trie *t = (struct trie *) tb->tb_data;
1681 t_key key = cb->args[2];
1682 int count = cb->args[3];
1685 /* Dump starting at last key.
1686 * Note: 0.0.0.0/0 (ie default) is first key.
1689 l = trie_firstleaf(t);
1691 /* Normally, continue from last key, but if that is missing
1692 * fallback to using slow rescan
1694 l = fib_find_node(t, key);
1696 l = trie_leafindex(t, count);
1700 cb->args[2] = l->key;
1701 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
1702 cb->args[3] = count;
1708 l = trie_nextleaf(l);
1709 memset(&cb->args[4], 0,
1710 sizeof(cb->args) - 4*sizeof(cb->args[0]));
1712 cb->args[3] = count;
1718 void __init fib_trie_init(void)
1720 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
1721 sizeof(struct fib_alias),
1722 0, SLAB_PANIC, NULL);
1724 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
1725 max(sizeof(struct tnode),
1726 sizeof(struct leaf_info)),
1727 0, SLAB_PANIC, NULL);
1731 struct fib_table *fib_trie_table(u32 id)
1733 struct fib_table *tb;
1736 tb = kmalloc(sizeof(struct fib_table) + sizeof(struct trie),
1742 tb->tb_default = -1;
1743 tb->tb_num_default = 0;
1745 t = (struct trie *) tb->tb_data;
1746 RCU_INIT_POINTER(t->trie, NULL);
1747 #ifdef CONFIG_IP_FIB_TRIE_STATS
1748 t->stats = alloc_percpu(struct trie_use_stats);
1758 #ifdef CONFIG_PROC_FS
1759 /* Depth first Trie walk iterator */
1760 struct fib_trie_iter {
1761 struct seq_net_private p;
1762 struct fib_table *tb;
1763 struct tnode *tnode;
1768 static struct tnode *fib_trie_get_next(struct fib_trie_iter *iter)
1770 unsigned long cindex = iter->index;
1771 struct tnode *tn = iter->tnode;
1774 /* A single entry routing table */
1778 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
1779 iter->tnode, iter->index, iter->depth);
1781 while (cindex < tnode_child_length(tn)) {
1782 struct tnode *n = tnode_get_child_rcu(tn, cindex);
1787 iter->index = cindex + 1;
1789 /* push down one level */
1800 /* Current node exhausted, pop back up */
1801 p = node_parent_rcu(tn);
1803 cindex = get_index(tn->key, p) + 1;
1813 static struct tnode *fib_trie_get_first(struct fib_trie_iter *iter,
1821 n = rcu_dereference(t->trie);
1838 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
1841 struct fib_trie_iter iter;
1843 memset(s, 0, sizeof(*s));
1846 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
1848 struct leaf_info *li;
1851 s->totdepth += iter.depth;
1852 if (iter.depth > s->maxdepth)
1853 s->maxdepth = iter.depth;
1855 hlist_for_each_entry_rcu(li, &n->list, hlist)
1861 if (n->bits < MAX_STAT_DEPTH)
1862 s->nodesizes[n->bits]++;
1864 for (i = 0; i < tnode_child_length(n); i++) {
1865 if (!rcu_access_pointer(n->child[i]))
1874 * This outputs /proc/net/fib_triestats
1876 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
1878 unsigned int i, max, pointers, bytes, avdepth;
1881 avdepth = stat->totdepth*100 / stat->leaves;
1885 seq_printf(seq, "\tAver depth: %u.%02d\n",
1886 avdepth / 100, avdepth % 100);
1887 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
1889 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
1890 bytes = sizeof(struct tnode) * stat->leaves;
1892 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
1893 bytes += sizeof(struct leaf_info) * stat->prefixes;
1895 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
1896 bytes += sizeof(struct tnode) * stat->tnodes;
1898 max = MAX_STAT_DEPTH;
1899 while (max > 0 && stat->nodesizes[max-1] == 0)
1903 for (i = 1; i < max; i++)
1904 if (stat->nodesizes[i] != 0) {
1905 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
1906 pointers += (1<<i) * stat->nodesizes[i];
1908 seq_putc(seq, '\n');
1909 seq_printf(seq, "\tPointers: %u\n", pointers);
1911 bytes += sizeof(struct tnode *) * pointers;
1912 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
1913 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
1916 #ifdef CONFIG_IP_FIB_TRIE_STATS
1917 static void trie_show_usage(struct seq_file *seq,
1918 const struct trie_use_stats __percpu *stats)
1920 struct trie_use_stats s = { 0 };
1923 /* loop through all of the CPUs and gather up the stats */
1924 for_each_possible_cpu(cpu) {
1925 const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
1927 s.gets += pcpu->gets;
1928 s.backtrack += pcpu->backtrack;
1929 s.semantic_match_passed += pcpu->semantic_match_passed;
1930 s.semantic_match_miss += pcpu->semantic_match_miss;
1931 s.null_node_hit += pcpu->null_node_hit;
1932 s.resize_node_skipped += pcpu->resize_node_skipped;
1935 seq_printf(seq, "\nCounters:\n---------\n");
1936 seq_printf(seq, "gets = %u\n", s.gets);
1937 seq_printf(seq, "backtracks = %u\n", s.backtrack);
1938 seq_printf(seq, "semantic match passed = %u\n",
1939 s.semantic_match_passed);
1940 seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
1941 seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
1942 seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
1944 #endif /* CONFIG_IP_FIB_TRIE_STATS */
1946 static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
1948 if (tb->tb_id == RT_TABLE_LOCAL)
1949 seq_puts(seq, "Local:\n");
1950 else if (tb->tb_id == RT_TABLE_MAIN)
1951 seq_puts(seq, "Main:\n");
1953 seq_printf(seq, "Id %d:\n", tb->tb_id);
1957 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
1959 struct net *net = (struct net *)seq->private;
1963 "Basic info: size of leaf:"
1964 " %Zd bytes, size of tnode: %Zd bytes.\n",
1965 sizeof(struct tnode), sizeof(struct tnode));
1967 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
1968 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
1969 struct fib_table *tb;
1971 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
1972 struct trie *t = (struct trie *) tb->tb_data;
1973 struct trie_stat stat;
1978 fib_table_print(seq, tb);
1980 trie_collect_stats(t, &stat);
1981 trie_show_stats(seq, &stat);
1982 #ifdef CONFIG_IP_FIB_TRIE_STATS
1983 trie_show_usage(seq, t->stats);
1991 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
1993 return single_open_net(inode, file, fib_triestat_seq_show);
1996 static const struct file_operations fib_triestat_fops = {
1997 .owner = THIS_MODULE,
1998 .open = fib_triestat_seq_open,
2000 .llseek = seq_lseek,
2001 .release = single_release_net,
2004 static struct tnode *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
2006 struct fib_trie_iter *iter = seq->private;
2007 struct net *net = seq_file_net(seq);
2011 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2012 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2013 struct fib_table *tb;
2015 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2018 for (n = fib_trie_get_first(iter,
2019 (struct trie *) tb->tb_data);
2020 n; n = fib_trie_get_next(iter))
2031 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2035 return fib_trie_get_idx(seq, *pos);
2038 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2040 struct fib_trie_iter *iter = seq->private;
2041 struct net *net = seq_file_net(seq);
2042 struct fib_table *tb = iter->tb;
2043 struct hlist_node *tb_node;
2048 /* next node in same table */
2049 n = fib_trie_get_next(iter);
2053 /* walk rest of this hash chain */
2054 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
2055 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
2056 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2057 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2062 /* new hash chain */
2063 while (++h < FIB_TABLE_HASHSZ) {
2064 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2065 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2066 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2078 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2084 static void seq_indent(struct seq_file *seq, int n)
2090 static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
2093 case RT_SCOPE_UNIVERSE: return "universe";
2094 case RT_SCOPE_SITE: return "site";
2095 case RT_SCOPE_LINK: return "link";
2096 case RT_SCOPE_HOST: return "host";
2097 case RT_SCOPE_NOWHERE: return "nowhere";
2099 snprintf(buf, len, "scope=%d", s);
2104 static const char *const rtn_type_names[__RTN_MAX] = {
2105 [RTN_UNSPEC] = "UNSPEC",
2106 [RTN_UNICAST] = "UNICAST",
2107 [RTN_LOCAL] = "LOCAL",
2108 [RTN_BROADCAST] = "BROADCAST",
2109 [RTN_ANYCAST] = "ANYCAST",
2110 [RTN_MULTICAST] = "MULTICAST",
2111 [RTN_BLACKHOLE] = "BLACKHOLE",
2112 [RTN_UNREACHABLE] = "UNREACHABLE",
2113 [RTN_PROHIBIT] = "PROHIBIT",
2114 [RTN_THROW] = "THROW",
2116 [RTN_XRESOLVE] = "XRESOLVE",
2119 static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
2121 if (t < __RTN_MAX && rtn_type_names[t])
2122 return rtn_type_names[t];
2123 snprintf(buf, len, "type %u", t);
2127 /* Pretty print the trie */
2128 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2130 const struct fib_trie_iter *iter = seq->private;
2131 struct tnode *n = v;
2133 if (!node_parent_rcu(n))
2134 fib_table_print(seq, iter->tb);
2137 __be32 prf = htonl(n->key);
2139 seq_indent(seq, iter->depth-1);
2140 seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
2141 &prf, KEYLENGTH - n->pos - n->bits, n->bits,
2142 n->full_children, n->empty_children);
2144 struct leaf_info *li;
2145 __be32 val = htonl(n->key);
2147 seq_indent(seq, iter->depth);
2148 seq_printf(seq, " |-- %pI4\n", &val);
2150 hlist_for_each_entry_rcu(li, &n->list, hlist) {
2151 struct fib_alias *fa;
2153 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2154 char buf1[32], buf2[32];
2156 seq_indent(seq, iter->depth+1);
2157 seq_printf(seq, " /%d %s %s", li->plen,
2158 rtn_scope(buf1, sizeof(buf1),
2159 fa->fa_info->fib_scope),
2160 rtn_type(buf2, sizeof(buf2),
2163 seq_printf(seq, " tos=%d", fa->fa_tos);
2164 seq_putc(seq, '\n');
2172 static const struct seq_operations fib_trie_seq_ops = {
2173 .start = fib_trie_seq_start,
2174 .next = fib_trie_seq_next,
2175 .stop = fib_trie_seq_stop,
2176 .show = fib_trie_seq_show,
2179 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2181 return seq_open_net(inode, file, &fib_trie_seq_ops,
2182 sizeof(struct fib_trie_iter));
2185 static const struct file_operations fib_trie_fops = {
2186 .owner = THIS_MODULE,
2187 .open = fib_trie_seq_open,
2189 .llseek = seq_lseek,
2190 .release = seq_release_net,
2193 struct fib_route_iter {
2194 struct seq_net_private p;
2195 struct trie *main_trie;
2200 static struct tnode *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos)
2202 struct tnode *l = NULL;
2203 struct trie *t = iter->main_trie;
2205 /* use cache location of last found key */
2206 if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key)))
2210 l = trie_firstleaf(t);
2213 while (l && pos-- > 0) {
2215 l = trie_nextleaf(l);
2219 iter->key = pos; /* remember it */
2221 iter->pos = 0; /* forget it */
2226 static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2229 struct fib_route_iter *iter = seq->private;
2230 struct fib_table *tb;
2233 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
2237 iter->main_trie = (struct trie *) tb->tb_data;
2239 return SEQ_START_TOKEN;
2241 return fib_route_get_idx(iter, *pos - 1);
2244 static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2246 struct fib_route_iter *iter = seq->private;
2247 struct tnode *l = v;
2250 if (v == SEQ_START_TOKEN) {
2252 l = trie_firstleaf(iter->main_trie);
2255 l = trie_nextleaf(l);
2265 static void fib_route_seq_stop(struct seq_file *seq, void *v)
2271 static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2273 unsigned int flags = 0;
2275 if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
2277 if (fi && fi->fib_nh->nh_gw)
2278 flags |= RTF_GATEWAY;
2279 if (mask == htonl(0xFFFFFFFF))
2286 * This outputs /proc/net/route.
2287 * The format of the file is not supposed to be changed
2288 * and needs to be same as fib_hash output to avoid breaking
2291 static int fib_route_seq_show(struct seq_file *seq, void *v)
2293 struct tnode *l = v;
2294 struct leaf_info *li;
2296 if (v == SEQ_START_TOKEN) {
2297 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2298 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2303 hlist_for_each_entry_rcu(li, &l->list, hlist) {
2304 struct fib_alias *fa;
2305 __be32 mask, prefix;
2307 mask = inet_make_mask(li->plen);
2308 prefix = htonl(l->key);
2310 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2311 const struct fib_info *fi = fa->fa_info;
2312 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
2314 if (fa->fa_type == RTN_BROADCAST
2315 || fa->fa_type == RTN_MULTICAST)
2318 seq_setwidth(seq, 127);
2322 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
2323 "%d\t%08X\t%d\t%u\t%u",
2324 fi->fib_dev ? fi->fib_dev->name : "*",
2326 fi->fib_nh->nh_gw, flags, 0, 0,
2330 fi->fib_advmss + 40 : 0),
2335 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
2336 "%d\t%08X\t%d\t%u\t%u",
2337 prefix, 0, flags, 0, 0, 0,
2347 static const struct seq_operations fib_route_seq_ops = {
2348 .start = fib_route_seq_start,
2349 .next = fib_route_seq_next,
2350 .stop = fib_route_seq_stop,
2351 .show = fib_route_seq_show,
2354 static int fib_route_seq_open(struct inode *inode, struct file *file)
2356 return seq_open_net(inode, file, &fib_route_seq_ops,
2357 sizeof(struct fib_route_iter));
2360 static const struct file_operations fib_route_fops = {
2361 .owner = THIS_MODULE,
2362 .open = fib_route_seq_open,
2364 .llseek = seq_lseek,
2365 .release = seq_release_net,
2368 int __net_init fib_proc_init(struct net *net)
2370 if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
2373 if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
2374 &fib_triestat_fops))
2377 if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
2383 remove_proc_entry("fib_triestat", net->proc_net);
2385 remove_proc_entry("fib_trie", net->proc_net);
2390 void __net_exit fib_proc_exit(struct net *net)
2392 remove_proc_entry("fib_trie", net->proc_net);
2393 remove_proc_entry("fib_triestat", net->proc_net);
2394 remove_proc_entry("route", net->proc_net);
2397 #endif /* CONFIG_PROC_FS */