]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv4/fib_trie.c
lib/vsprintf.c: remove %Z support
[mirror_ubuntu-artful-kernel.git] / net / ipv4 / fib_trie.c
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet
8 * & Swedish University of Agricultural Sciences.
9 *
10 * Jens Laas <jens.laas@data.slu.se> Swedish University of
11 * Agricultural Sciences.
12 *
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 *
15 * This work is based on the LPC-trie which is originally described in:
16 *
17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
19 * http://www.csc.kth.se/~snilsson/software/dyntrie2/
20 *
21 *
22 * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
23 * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
24 *
25 *
26 * Code from fib_hash has been reused which includes the following header:
27 *
28 *
29 * INET An implementation of the TCP/IP protocol suite for the LINUX
30 * operating system. INET is implemented using the BSD Socket
31 * interface as the means of communication with the user level.
32 *
33 * IPv4 FIB: lookup engine and maintenance routines.
34 *
35 *
36 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
37 *
38 * This program is free software; you can redistribute it and/or
39 * modify it under the terms of the GNU General Public License
40 * as published by the Free Software Foundation; either version
41 * 2 of the License, or (at your option) any later version.
42 *
43 * Substantial contributions to this work comes from:
44 *
45 * David S. Miller, <davem@davemloft.net>
46 * Stephen Hemminger <shemminger@osdl.org>
47 * Paul E. McKenney <paulmck@us.ibm.com>
48 * Patrick McHardy <kaber@trash.net>
49 */
50
51 #define VERSION "0.409"
52
53 #include <linux/uaccess.h>
54 #include <linux/bitops.h>
55 #include <linux/types.h>
56 #include <linux/kernel.h>
57 #include <linux/mm.h>
58 #include <linux/string.h>
59 #include <linux/socket.h>
60 #include <linux/sockios.h>
61 #include <linux/errno.h>
62 #include <linux/in.h>
63 #include <linux/inet.h>
64 #include <linux/inetdevice.h>
65 #include <linux/netdevice.h>
66 #include <linux/if_arp.h>
67 #include <linux/proc_fs.h>
68 #include <linux/rcupdate.h>
69 #include <linux/skbuff.h>
70 #include <linux/netlink.h>
71 #include <linux/init.h>
72 #include <linux/list.h>
73 #include <linux/slab.h>
74 #include <linux/export.h>
75 #include <linux/vmalloc.h>
76 #include <linux/notifier.h>
77 #include <net/net_namespace.h>
78 #include <net/ip.h>
79 #include <net/protocol.h>
80 #include <net/route.h>
81 #include <net/tcp.h>
82 #include <net/sock.h>
83 #include <net/ip_fib.h>
84 #include <trace/events/fib.h>
85 #include "fib_lookup.h"
86
87 static unsigned int fib_seq_sum(void)
88 {
89 unsigned int fib_seq = 0;
90 struct net *net;
91
92 rtnl_lock();
93 for_each_net(net)
94 fib_seq += net->ipv4.fib_seq;
95 rtnl_unlock();
96
97 return fib_seq;
98 }
99
100 static ATOMIC_NOTIFIER_HEAD(fib_chain);
101
102 static int call_fib_notifier(struct notifier_block *nb, struct net *net,
103 enum fib_event_type event_type,
104 struct fib_notifier_info *info)
105 {
106 info->net = net;
107 return nb->notifier_call(nb, event_type, info);
108 }
109
110 static void fib_rules_notify(struct net *net, struct notifier_block *nb,
111 enum fib_event_type event_type)
112 {
113 #ifdef CONFIG_IP_MULTIPLE_TABLES
114 struct fib_notifier_info info;
115
116 if (net->ipv4.fib_has_custom_rules)
117 call_fib_notifier(nb, net, event_type, &info);
118 #endif
119 }
120
121 static void fib_notify(struct net *net, struct notifier_block *nb,
122 enum fib_event_type event_type);
123
124 static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net,
125 enum fib_event_type event_type, u32 dst,
126 int dst_len, struct fib_info *fi,
127 u8 tos, u8 type, u32 tb_id)
128 {
129 struct fib_entry_notifier_info info = {
130 .dst = dst,
131 .dst_len = dst_len,
132 .fi = fi,
133 .tos = tos,
134 .type = type,
135 .tb_id = tb_id,
136 };
137 return call_fib_notifier(nb, net, event_type, &info.info);
138 }
139
140 static bool fib_dump_is_consistent(struct notifier_block *nb,
141 void (*cb)(struct notifier_block *nb),
142 unsigned int fib_seq)
143 {
144 atomic_notifier_chain_register(&fib_chain, nb);
145 if (fib_seq == fib_seq_sum())
146 return true;
147 atomic_notifier_chain_unregister(&fib_chain, nb);
148 if (cb)
149 cb(nb);
150 return false;
151 }
152
153 #define FIB_DUMP_MAX_RETRIES 5
154 int register_fib_notifier(struct notifier_block *nb,
155 void (*cb)(struct notifier_block *nb))
156 {
157 int retries = 0;
158
159 do {
160 unsigned int fib_seq = fib_seq_sum();
161 struct net *net;
162
163 /* Mutex semantics guarantee that every change done to
164 * FIB tries before we read the change sequence counter
165 * is now visible to us.
166 */
167 rcu_read_lock();
168 for_each_net_rcu(net) {
169 fib_rules_notify(net, nb, FIB_EVENT_RULE_ADD);
170 fib_notify(net, nb, FIB_EVENT_ENTRY_ADD);
171 }
172 rcu_read_unlock();
173
174 if (fib_dump_is_consistent(nb, cb, fib_seq))
175 return 0;
176 } while (++retries < FIB_DUMP_MAX_RETRIES);
177
178 return -EBUSY;
179 }
180 EXPORT_SYMBOL(register_fib_notifier);
181
182 int unregister_fib_notifier(struct notifier_block *nb)
183 {
184 return atomic_notifier_chain_unregister(&fib_chain, nb);
185 }
186 EXPORT_SYMBOL(unregister_fib_notifier);
187
188 int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
189 struct fib_notifier_info *info)
190 {
191 net->ipv4.fib_seq++;
192 info->net = net;
193 return atomic_notifier_call_chain(&fib_chain, event_type, info);
194 }
195
196 static int call_fib_entry_notifiers(struct net *net,
197 enum fib_event_type event_type, u32 dst,
198 int dst_len, struct fib_info *fi,
199 u8 tos, u8 type, u32 tb_id)
200 {
201 struct fib_entry_notifier_info info = {
202 .dst = dst,
203 .dst_len = dst_len,
204 .fi = fi,
205 .tos = tos,
206 .type = type,
207 .tb_id = tb_id,
208 };
209 return call_fib_notifiers(net, event_type, &info.info);
210 }
211
212 #define MAX_STAT_DEPTH 32
213
214 #define KEYLENGTH (8*sizeof(t_key))
215 #define KEY_MAX ((t_key)~0)
216
217 typedef unsigned int t_key;
218
219 #define IS_TRIE(n) ((n)->pos >= KEYLENGTH)
220 #define IS_TNODE(n) ((n)->bits)
221 #define IS_LEAF(n) (!(n)->bits)
222
223 struct key_vector {
224 t_key key;
225 unsigned char pos; /* 2log(KEYLENGTH) bits needed */
226 unsigned char bits; /* 2log(KEYLENGTH) bits needed */
227 unsigned char slen;
228 union {
229 /* This list pointer if valid if (pos | bits) == 0 (LEAF) */
230 struct hlist_head leaf;
231 /* This array is valid if (pos | bits) > 0 (TNODE) */
232 struct key_vector __rcu *tnode[0];
233 };
234 };
235
236 struct tnode {
237 struct rcu_head rcu;
238 t_key empty_children; /* KEYLENGTH bits needed */
239 t_key full_children; /* KEYLENGTH bits needed */
240 struct key_vector __rcu *parent;
241 struct key_vector kv[1];
242 #define tn_bits kv[0].bits
243 };
244
245 #define TNODE_SIZE(n) offsetof(struct tnode, kv[0].tnode[n])
246 #define LEAF_SIZE TNODE_SIZE(1)
247
248 #ifdef CONFIG_IP_FIB_TRIE_STATS
249 struct trie_use_stats {
250 unsigned int gets;
251 unsigned int backtrack;
252 unsigned int semantic_match_passed;
253 unsigned int semantic_match_miss;
254 unsigned int null_node_hit;
255 unsigned int resize_node_skipped;
256 };
257 #endif
258
259 struct trie_stat {
260 unsigned int totdepth;
261 unsigned int maxdepth;
262 unsigned int tnodes;
263 unsigned int leaves;
264 unsigned int nullpointers;
265 unsigned int prefixes;
266 unsigned int nodesizes[MAX_STAT_DEPTH];
267 };
268
269 struct trie {
270 struct key_vector kv[1];
271 #ifdef CONFIG_IP_FIB_TRIE_STATS
272 struct trie_use_stats __percpu *stats;
273 #endif
274 };
275
276 static struct key_vector *resize(struct trie *t, struct key_vector *tn);
277 static size_t tnode_free_size;
278
279 /*
280 * synchronize_rcu after call_rcu for that many pages; it should be especially
281 * useful before resizing the root node with PREEMPT_NONE configs; the value was
282 * obtained experimentally, aiming to avoid visible slowdown.
283 */
284 static const int sync_pages = 128;
285
286 static struct kmem_cache *fn_alias_kmem __read_mostly;
287 static struct kmem_cache *trie_leaf_kmem __read_mostly;
288
289 static inline struct tnode *tn_info(struct key_vector *kv)
290 {
291 return container_of(kv, struct tnode, kv[0]);
292 }
293
294 /* caller must hold RTNL */
295 #define node_parent(tn) rtnl_dereference(tn_info(tn)->parent)
296 #define get_child(tn, i) rtnl_dereference((tn)->tnode[i])
297
298 /* caller must hold RCU read lock or RTNL */
299 #define node_parent_rcu(tn) rcu_dereference_rtnl(tn_info(tn)->parent)
300 #define get_child_rcu(tn, i) rcu_dereference_rtnl((tn)->tnode[i])
301
302 /* wrapper for rcu_assign_pointer */
303 static inline void node_set_parent(struct key_vector *n, struct key_vector *tp)
304 {
305 if (n)
306 rcu_assign_pointer(tn_info(n)->parent, tp);
307 }
308
309 #define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p)
310
311 /* This provides us with the number of children in this node, in the case of a
312 * leaf this will return 0 meaning none of the children are accessible.
313 */
314 static inline unsigned long child_length(const struct key_vector *tn)
315 {
316 return (1ul << tn->bits) & ~(1ul);
317 }
318
319 #define get_cindex(key, kv) (((key) ^ (kv)->key) >> (kv)->pos)
320
321 static inline unsigned long get_index(t_key key, struct key_vector *kv)
322 {
323 unsigned long index = key ^ kv->key;
324
325 if ((BITS_PER_LONG <= KEYLENGTH) && (KEYLENGTH == kv->pos))
326 return 0;
327
328 return index >> kv->pos;
329 }
330
331 /* To understand this stuff, an understanding of keys and all their bits is
332 * necessary. Every node in the trie has a key associated with it, but not
333 * all of the bits in that key are significant.
334 *
335 * Consider a node 'n' and its parent 'tp'.
336 *
337 * If n is a leaf, every bit in its key is significant. Its presence is
338 * necessitated by path compression, since during a tree traversal (when
339 * searching for a leaf - unless we are doing an insertion) we will completely
340 * ignore all skipped bits we encounter. Thus we need to verify, at the end of
341 * a potentially successful search, that we have indeed been walking the
342 * correct key path.
343 *
344 * Note that we can never "miss" the correct key in the tree if present by
345 * following the wrong path. Path compression ensures that segments of the key
346 * that are the same for all keys with a given prefix are skipped, but the
347 * skipped part *is* identical for each node in the subtrie below the skipped
348 * bit! trie_insert() in this implementation takes care of that.
349 *
350 * if n is an internal node - a 'tnode' here, the various parts of its key
351 * have many different meanings.
352 *
353 * Example:
354 * _________________________________________________________________
355 * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C |
356 * -----------------------------------------------------------------
357 * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
358 *
359 * _________________________________________________________________
360 * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u |
361 * -----------------------------------------------------------------
362 * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
363 *
364 * tp->pos = 22
365 * tp->bits = 3
366 * n->pos = 13
367 * n->bits = 4
368 *
369 * First, let's just ignore the bits that come before the parent tp, that is
370 * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this
371 * point we do not use them for anything.
372 *
373 * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the
374 * index into the parent's child array. That is, they will be used to find
375 * 'n' among tp's children.
376 *
377 * The bits from (n->pos + n->bits) to (tp->pos - 1) - "S" - are skipped bits
378 * for the node n.
379 *
380 * All the bits we have seen so far are significant to the node n. The rest
381 * of the bits are really not needed or indeed known in n->key.
382 *
383 * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into
384 * n's child array, and will of course be different for each child.
385 *
386 * The rest of the bits, from 0 to (n->pos -1) - "u" - are completely unknown
387 * at this point.
388 */
389
390 static const int halve_threshold = 25;
391 static const int inflate_threshold = 50;
392 static const int halve_threshold_root = 15;
393 static const int inflate_threshold_root = 30;
394
395 static void __alias_free_mem(struct rcu_head *head)
396 {
397 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
398 kmem_cache_free(fn_alias_kmem, fa);
399 }
400
401 static inline void alias_free_mem_rcu(struct fib_alias *fa)
402 {
403 call_rcu(&fa->rcu, __alias_free_mem);
404 }
405
406 #define TNODE_KMALLOC_MAX \
407 ilog2((PAGE_SIZE - TNODE_SIZE(0)) / sizeof(struct key_vector *))
408 #define TNODE_VMALLOC_MAX \
409 ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *))
410
411 static void __node_free_rcu(struct rcu_head *head)
412 {
413 struct tnode *n = container_of(head, struct tnode, rcu);
414
415 if (!n->tn_bits)
416 kmem_cache_free(trie_leaf_kmem, n);
417 else
418 kvfree(n);
419 }
420
421 #define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu)
422
423 static struct tnode *tnode_alloc(int bits)
424 {
425 size_t size;
426
427 /* verify bits is within bounds */
428 if (bits > TNODE_VMALLOC_MAX)
429 return NULL;
430
431 /* determine size and verify it is non-zero and didn't overflow */
432 size = TNODE_SIZE(1ul << bits);
433
434 if (size <= PAGE_SIZE)
435 return kzalloc(size, GFP_KERNEL);
436 else
437 return vzalloc(size);
438 }
439
440 static inline void empty_child_inc(struct key_vector *n)
441 {
442 ++tn_info(n)->empty_children ? : ++tn_info(n)->full_children;
443 }
444
445 static inline void empty_child_dec(struct key_vector *n)
446 {
447 tn_info(n)->empty_children-- ? : tn_info(n)->full_children--;
448 }
449
450 static struct key_vector *leaf_new(t_key key, struct fib_alias *fa)
451 {
452 struct key_vector *l;
453 struct tnode *kv;
454
455 kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
456 if (!kv)
457 return NULL;
458
459 /* initialize key vector */
460 l = kv->kv;
461 l->key = key;
462 l->pos = 0;
463 l->bits = 0;
464 l->slen = fa->fa_slen;
465
466 /* link leaf to fib alias */
467 INIT_HLIST_HEAD(&l->leaf);
468 hlist_add_head(&fa->fa_list, &l->leaf);
469
470 return l;
471 }
472
473 static struct key_vector *tnode_new(t_key key, int pos, int bits)
474 {
475 unsigned int shift = pos + bits;
476 struct key_vector *tn;
477 struct tnode *tnode;
478
479 /* verify bits and pos their msb bits clear and values are valid */
480 BUG_ON(!bits || (shift > KEYLENGTH));
481
482 tnode = tnode_alloc(bits);
483 if (!tnode)
484 return NULL;
485
486 pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0),
487 sizeof(struct key_vector *) << bits);
488
489 if (bits == KEYLENGTH)
490 tnode->full_children = 1;
491 else
492 tnode->empty_children = 1ul << bits;
493
494 tn = tnode->kv;
495 tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0;
496 tn->pos = pos;
497 tn->bits = bits;
498 tn->slen = pos;
499
500 return tn;
501 }
502
503 /* Check whether a tnode 'n' is "full", i.e. it is an internal node
504 * and no bits are skipped. See discussion in dyntree paper p. 6
505 */
506 static inline int tnode_full(struct key_vector *tn, struct key_vector *n)
507 {
508 return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n);
509 }
510
511 /* Add a child at position i overwriting the old value.
512 * Update the value of full_children and empty_children.
513 */
514 static void put_child(struct key_vector *tn, unsigned long i,
515 struct key_vector *n)
516 {
517 struct key_vector *chi = get_child(tn, i);
518 int isfull, wasfull;
519
520 BUG_ON(i >= child_length(tn));
521
522 /* update emptyChildren, overflow into fullChildren */
523 if (!n && chi)
524 empty_child_inc(tn);
525 if (n && !chi)
526 empty_child_dec(tn);
527
528 /* update fullChildren */
529 wasfull = tnode_full(tn, chi);
530 isfull = tnode_full(tn, n);
531
532 if (wasfull && !isfull)
533 tn_info(tn)->full_children--;
534 else if (!wasfull && isfull)
535 tn_info(tn)->full_children++;
536
537 if (n && (tn->slen < n->slen))
538 tn->slen = n->slen;
539
540 rcu_assign_pointer(tn->tnode[i], n);
541 }
542
543 static void update_children(struct key_vector *tn)
544 {
545 unsigned long i;
546
547 /* update all of the child parent pointers */
548 for (i = child_length(tn); i;) {
549 struct key_vector *inode = get_child(tn, --i);
550
551 if (!inode)
552 continue;
553
554 /* Either update the children of a tnode that
555 * already belongs to us or update the child
556 * to point to ourselves.
557 */
558 if (node_parent(inode) == tn)
559 update_children(inode);
560 else
561 node_set_parent(inode, tn);
562 }
563 }
564
565 static inline void put_child_root(struct key_vector *tp, t_key key,
566 struct key_vector *n)
567 {
568 if (IS_TRIE(tp))
569 rcu_assign_pointer(tp->tnode[0], n);
570 else
571 put_child(tp, get_index(key, tp), n);
572 }
573
574 static inline void tnode_free_init(struct key_vector *tn)
575 {
576 tn_info(tn)->rcu.next = NULL;
577 }
578
579 static inline void tnode_free_append(struct key_vector *tn,
580 struct key_vector *n)
581 {
582 tn_info(n)->rcu.next = tn_info(tn)->rcu.next;
583 tn_info(tn)->rcu.next = &tn_info(n)->rcu;
584 }
585
586 static void tnode_free(struct key_vector *tn)
587 {
588 struct callback_head *head = &tn_info(tn)->rcu;
589
590 while (head) {
591 head = head->next;
592 tnode_free_size += TNODE_SIZE(1ul << tn->bits);
593 node_free(tn);
594
595 tn = container_of(head, struct tnode, rcu)->kv;
596 }
597
598 if (tnode_free_size >= PAGE_SIZE * sync_pages) {
599 tnode_free_size = 0;
600 synchronize_rcu();
601 }
602 }
603
604 static struct key_vector *replace(struct trie *t,
605 struct key_vector *oldtnode,
606 struct key_vector *tn)
607 {
608 struct key_vector *tp = node_parent(oldtnode);
609 unsigned long i;
610
611 /* setup the parent pointer out of and back into this node */
612 NODE_INIT_PARENT(tn, tp);
613 put_child_root(tp, tn->key, tn);
614
615 /* update all of the child parent pointers */
616 update_children(tn);
617
618 /* all pointers should be clean so we are done */
619 tnode_free(oldtnode);
620
621 /* resize children now that oldtnode is freed */
622 for (i = child_length(tn); i;) {
623 struct key_vector *inode = get_child(tn, --i);
624
625 /* resize child node */
626 if (tnode_full(tn, inode))
627 tn = resize(t, inode);
628 }
629
630 return tp;
631 }
632
633 static struct key_vector *inflate(struct trie *t,
634 struct key_vector *oldtnode)
635 {
636 struct key_vector *tn;
637 unsigned long i;
638 t_key m;
639
640 pr_debug("In inflate\n");
641
642 tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1);
643 if (!tn)
644 goto notnode;
645
646 /* prepare oldtnode to be freed */
647 tnode_free_init(oldtnode);
648
649 /* Assemble all of the pointers in our cluster, in this case that
650 * represents all of the pointers out of our allocated nodes that
651 * point to existing tnodes and the links between our allocated
652 * nodes.
653 */
654 for (i = child_length(oldtnode), m = 1u << tn->pos; i;) {
655 struct key_vector *inode = get_child(oldtnode, --i);
656 struct key_vector *node0, *node1;
657 unsigned long j, k;
658
659 /* An empty child */
660 if (!inode)
661 continue;
662
663 /* A leaf or an internal node with skipped bits */
664 if (!tnode_full(oldtnode, inode)) {
665 put_child(tn, get_index(inode->key, tn), inode);
666 continue;
667 }
668
669 /* drop the node in the old tnode free list */
670 tnode_free_append(oldtnode, inode);
671
672 /* An internal node with two children */
673 if (inode->bits == 1) {
674 put_child(tn, 2 * i + 1, get_child(inode, 1));
675 put_child(tn, 2 * i, get_child(inode, 0));
676 continue;
677 }
678
679 /* We will replace this node 'inode' with two new
680 * ones, 'node0' and 'node1', each with half of the
681 * original children. The two new nodes will have
682 * a position one bit further down the key and this
683 * means that the "significant" part of their keys
684 * (see the discussion near the top of this file)
685 * will differ by one bit, which will be "0" in
686 * node0's key and "1" in node1's key. Since we are
687 * moving the key position by one step, the bit that
688 * we are moving away from - the bit at position
689 * (tn->pos) - is the one that will differ between
690 * node0 and node1. So... we synthesize that bit in the
691 * two new keys.
692 */
693 node1 = tnode_new(inode->key | m, inode->pos, inode->bits - 1);
694 if (!node1)
695 goto nomem;
696 node0 = tnode_new(inode->key, inode->pos, inode->bits - 1);
697
698 tnode_free_append(tn, node1);
699 if (!node0)
700 goto nomem;
701 tnode_free_append(tn, node0);
702
703 /* populate child pointers in new nodes */
704 for (k = child_length(inode), j = k / 2; j;) {
705 put_child(node1, --j, get_child(inode, --k));
706 put_child(node0, j, get_child(inode, j));
707 put_child(node1, --j, get_child(inode, --k));
708 put_child(node0, j, get_child(inode, j));
709 }
710
711 /* link new nodes to parent */
712 NODE_INIT_PARENT(node1, tn);
713 NODE_INIT_PARENT(node0, tn);
714
715 /* link parent to nodes */
716 put_child(tn, 2 * i + 1, node1);
717 put_child(tn, 2 * i, node0);
718 }
719
720 /* setup the parent pointers into and out of this node */
721 return replace(t, oldtnode, tn);
722 nomem:
723 /* all pointers should be clean so we are done */
724 tnode_free(tn);
725 notnode:
726 return NULL;
727 }
728
729 static struct key_vector *halve(struct trie *t,
730 struct key_vector *oldtnode)
731 {
732 struct key_vector *tn;
733 unsigned long i;
734
735 pr_debug("In halve\n");
736
737 tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1);
738 if (!tn)
739 goto notnode;
740
741 /* prepare oldtnode to be freed */
742 tnode_free_init(oldtnode);
743
744 /* Assemble all of the pointers in our cluster, in this case that
745 * represents all of the pointers out of our allocated nodes that
746 * point to existing tnodes and the links between our allocated
747 * nodes.
748 */
749 for (i = child_length(oldtnode); i;) {
750 struct key_vector *node1 = get_child(oldtnode, --i);
751 struct key_vector *node0 = get_child(oldtnode, --i);
752 struct key_vector *inode;
753
754 /* At least one of the children is empty */
755 if (!node1 || !node0) {
756 put_child(tn, i / 2, node1 ? : node0);
757 continue;
758 }
759
760 /* Two nonempty children */
761 inode = tnode_new(node0->key, oldtnode->pos, 1);
762 if (!inode)
763 goto nomem;
764 tnode_free_append(tn, inode);
765
766 /* initialize pointers out of node */
767 put_child(inode, 1, node1);
768 put_child(inode, 0, node0);
769 NODE_INIT_PARENT(inode, tn);
770
771 /* link parent to node */
772 put_child(tn, i / 2, inode);
773 }
774
775 /* setup the parent pointers into and out of this node */
776 return replace(t, oldtnode, tn);
777 nomem:
778 /* all pointers should be clean so we are done */
779 tnode_free(tn);
780 notnode:
781 return NULL;
782 }
783
784 static struct key_vector *collapse(struct trie *t,
785 struct key_vector *oldtnode)
786 {
787 struct key_vector *n, *tp;
788 unsigned long i;
789
790 /* scan the tnode looking for that one child that might still exist */
791 for (n = NULL, i = child_length(oldtnode); !n && i;)
792 n = get_child(oldtnode, --i);
793
794 /* compress one level */
795 tp = node_parent(oldtnode);
796 put_child_root(tp, oldtnode->key, n);
797 node_set_parent(n, tp);
798
799 /* drop dead node */
800 node_free(oldtnode);
801
802 return tp;
803 }
804
805 static unsigned char update_suffix(struct key_vector *tn)
806 {
807 unsigned char slen = tn->pos;
808 unsigned long stride, i;
809 unsigned char slen_max;
810
811 /* only vector 0 can have a suffix length greater than or equal to
812 * tn->pos + tn->bits, the second highest node will have a suffix
813 * length at most of tn->pos + tn->bits - 1
814 */
815 slen_max = min_t(unsigned char, tn->pos + tn->bits - 1, tn->slen);
816
817 /* search though the list of children looking for nodes that might
818 * have a suffix greater than the one we currently have. This is
819 * why we start with a stride of 2 since a stride of 1 would
820 * represent the nodes with suffix length equal to tn->pos
821 */
822 for (i = 0, stride = 0x2ul ; i < child_length(tn); i += stride) {
823 struct key_vector *n = get_child(tn, i);
824
825 if (!n || (n->slen <= slen))
826 continue;
827
828 /* update stride and slen based on new value */
829 stride <<= (n->slen - slen);
830 slen = n->slen;
831 i &= ~(stride - 1);
832
833 /* stop searching if we have hit the maximum possible value */
834 if (slen >= slen_max)
835 break;
836 }
837
838 tn->slen = slen;
839
840 return slen;
841 }
842
843 /* From "Implementing a dynamic compressed trie" by Stefan Nilsson of
844 * the Helsinki University of Technology and Matti Tikkanen of Nokia
845 * Telecommunications, page 6:
846 * "A node is doubled if the ratio of non-empty children to all
847 * children in the *doubled* node is at least 'high'."
848 *
849 * 'high' in this instance is the variable 'inflate_threshold'. It
850 * is expressed as a percentage, so we multiply it with
851 * child_length() and instead of multiplying by 2 (since the
852 * child array will be doubled by inflate()) and multiplying
853 * the left-hand side by 100 (to handle the percentage thing) we
854 * multiply the left-hand side by 50.
855 *
856 * The left-hand side may look a bit weird: child_length(tn)
857 * - tn->empty_children is of course the number of non-null children
858 * in the current node. tn->full_children is the number of "full"
859 * children, that is non-null tnodes with a skip value of 0.
860 * All of those will be doubled in the resulting inflated tnode, so
861 * we just count them one extra time here.
862 *
863 * A clearer way to write this would be:
864 *
865 * to_be_doubled = tn->full_children;
866 * not_to_be_doubled = child_length(tn) - tn->empty_children -
867 * tn->full_children;
868 *
869 * new_child_length = child_length(tn) * 2;
870 *
871 * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) /
872 * new_child_length;
873 * if (new_fill_factor >= inflate_threshold)
874 *
875 * ...and so on, tho it would mess up the while () loop.
876 *
877 * anyway,
878 * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >=
879 * inflate_threshold
880 *
881 * avoid a division:
882 * 100 * (not_to_be_doubled + 2*to_be_doubled) >=
883 * inflate_threshold * new_child_length
884 *
885 * expand not_to_be_doubled and to_be_doubled, and shorten:
886 * 100 * (child_length(tn) - tn->empty_children +
887 * tn->full_children) >= inflate_threshold * new_child_length
888 *
889 * expand new_child_length:
890 * 100 * (child_length(tn) - tn->empty_children +
891 * tn->full_children) >=
892 * inflate_threshold * child_length(tn) * 2
893 *
894 * shorten again:
895 * 50 * (tn->full_children + child_length(tn) -
896 * tn->empty_children) >= inflate_threshold *
897 * child_length(tn)
898 *
899 */
900 static inline bool should_inflate(struct key_vector *tp, struct key_vector *tn)
901 {
902 unsigned long used = child_length(tn);
903 unsigned long threshold = used;
904
905 /* Keep root node larger */
906 threshold *= IS_TRIE(tp) ? inflate_threshold_root : inflate_threshold;
907 used -= tn_info(tn)->empty_children;
908 used += tn_info(tn)->full_children;
909
910 /* if bits == KEYLENGTH then pos = 0, and will fail below */
911
912 return (used > 1) && tn->pos && ((50 * used) >= threshold);
913 }
914
915 static inline bool should_halve(struct key_vector *tp, struct key_vector *tn)
916 {
917 unsigned long used = child_length(tn);
918 unsigned long threshold = used;
919
920 /* Keep root node larger */
921 threshold *= IS_TRIE(tp) ? halve_threshold_root : halve_threshold;
922 used -= tn_info(tn)->empty_children;
923
924 /* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */
925
926 return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold);
927 }
928
929 static inline bool should_collapse(struct key_vector *tn)
930 {
931 unsigned long used = child_length(tn);
932
933 used -= tn_info(tn)->empty_children;
934
935 /* account for bits == KEYLENGTH case */
936 if ((tn->bits == KEYLENGTH) && tn_info(tn)->full_children)
937 used -= KEY_MAX;
938
939 /* One child or none, time to drop us from the trie */
940 return used < 2;
941 }
942
943 #define MAX_WORK 10
944 static struct key_vector *resize(struct trie *t, struct key_vector *tn)
945 {
946 #ifdef CONFIG_IP_FIB_TRIE_STATS
947 struct trie_use_stats __percpu *stats = t->stats;
948 #endif
949 struct key_vector *tp = node_parent(tn);
950 unsigned long cindex = get_index(tn->key, tp);
951 int max_work = MAX_WORK;
952
953 pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n",
954 tn, inflate_threshold, halve_threshold);
955
956 /* track the tnode via the pointer from the parent instead of
957 * doing it ourselves. This way we can let RCU fully do its
958 * thing without us interfering
959 */
960 BUG_ON(tn != get_child(tp, cindex));
961
962 /* Double as long as the resulting node has a number of
963 * nonempty nodes that are above the threshold.
964 */
965 while (should_inflate(tp, tn) && max_work) {
966 tp = inflate(t, tn);
967 if (!tp) {
968 #ifdef CONFIG_IP_FIB_TRIE_STATS
969 this_cpu_inc(stats->resize_node_skipped);
970 #endif
971 break;
972 }
973
974 max_work--;
975 tn = get_child(tp, cindex);
976 }
977
978 /* update parent in case inflate failed */
979 tp = node_parent(tn);
980
981 /* Return if at least one inflate is run */
982 if (max_work != MAX_WORK)
983 return tp;
984
985 /* Halve as long as the number of empty children in this
986 * node is above threshold.
987 */
988 while (should_halve(tp, tn) && max_work) {
989 tp = halve(t, tn);
990 if (!tp) {
991 #ifdef CONFIG_IP_FIB_TRIE_STATS
992 this_cpu_inc(stats->resize_node_skipped);
993 #endif
994 break;
995 }
996
997 max_work--;
998 tn = get_child(tp, cindex);
999 }
1000
1001 /* Only one child remains */
1002 if (should_collapse(tn))
1003 return collapse(t, tn);
1004
1005 /* update parent in case halve failed */
1006 return node_parent(tn);
1007 }
1008
1009 static void node_pull_suffix(struct key_vector *tn, unsigned char slen)
1010 {
1011 unsigned char node_slen = tn->slen;
1012
1013 while ((node_slen > tn->pos) && (node_slen > slen)) {
1014 slen = update_suffix(tn);
1015 if (node_slen == slen)
1016 break;
1017
1018 tn = node_parent(tn);
1019 node_slen = tn->slen;
1020 }
1021 }
1022
1023 static void node_push_suffix(struct key_vector *tn, unsigned char slen)
1024 {
1025 while (tn->slen < slen) {
1026 tn->slen = slen;
1027 tn = node_parent(tn);
1028 }
1029 }
1030
1031 /* rcu_read_lock needs to be hold by caller from readside */
1032 static struct key_vector *fib_find_node(struct trie *t,
1033 struct key_vector **tp, u32 key)
1034 {
1035 struct key_vector *pn, *n = t->kv;
1036 unsigned long index = 0;
1037
1038 do {
1039 pn = n;
1040 n = get_child_rcu(n, index);
1041
1042 if (!n)
1043 break;
1044
1045 index = get_cindex(key, n);
1046
1047 /* This bit of code is a bit tricky but it combines multiple
1048 * checks into a single check. The prefix consists of the
1049 * prefix plus zeros for the bits in the cindex. The index
1050 * is the difference between the key and this value. From
1051 * this we can actually derive several pieces of data.
1052 * if (index >= (1ul << bits))
1053 * we have a mismatch in skip bits and failed
1054 * else
1055 * we know the value is cindex
1056 *
1057 * This check is safe even if bits == KEYLENGTH due to the
1058 * fact that we can only allocate a node with 32 bits if a
1059 * long is greater than 32 bits.
1060 */
1061 if (index >= (1ul << n->bits)) {
1062 n = NULL;
1063 break;
1064 }
1065
1066 /* keep searching until we find a perfect match leaf or NULL */
1067 } while (IS_TNODE(n));
1068
1069 *tp = pn;
1070
1071 return n;
1072 }
1073
1074 /* Return the first fib alias matching TOS with
1075 * priority less than or equal to PRIO.
1076 */
1077 static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen,
1078 u8 tos, u32 prio, u32 tb_id)
1079 {
1080 struct fib_alias *fa;
1081
1082 if (!fah)
1083 return NULL;
1084
1085 hlist_for_each_entry(fa, fah, fa_list) {
1086 if (fa->fa_slen < slen)
1087 continue;
1088 if (fa->fa_slen != slen)
1089 break;
1090 if (fa->tb_id > tb_id)
1091 continue;
1092 if (fa->tb_id != tb_id)
1093 break;
1094 if (fa->fa_tos > tos)
1095 continue;
1096 if (fa->fa_info->fib_priority >= prio || fa->fa_tos < tos)
1097 return fa;
1098 }
1099
1100 return NULL;
1101 }
1102
1103 static void trie_rebalance(struct trie *t, struct key_vector *tn)
1104 {
1105 while (!IS_TRIE(tn))
1106 tn = resize(t, tn);
1107 }
1108
1109 static int fib_insert_node(struct trie *t, struct key_vector *tp,
1110 struct fib_alias *new, t_key key)
1111 {
1112 struct key_vector *n, *l;
1113
1114 l = leaf_new(key, new);
1115 if (!l)
1116 goto noleaf;
1117
1118 /* retrieve child from parent node */
1119 n = get_child(tp, get_index(key, tp));
1120
1121 /* Case 2: n is a LEAF or a TNODE and the key doesn't match.
1122 *
1123 * Add a new tnode here
1124 * first tnode need some special handling
1125 * leaves us in position for handling as case 3
1126 */
1127 if (n) {
1128 struct key_vector *tn;
1129
1130 tn = tnode_new(key, __fls(key ^ n->key), 1);
1131 if (!tn)
1132 goto notnode;
1133
1134 /* initialize routes out of node */
1135 NODE_INIT_PARENT(tn, tp);
1136 put_child(tn, get_index(key, tn) ^ 1, n);
1137
1138 /* start adding routes into the node */
1139 put_child_root(tp, key, tn);
1140 node_set_parent(n, tn);
1141
1142 /* parent now has a NULL spot where the leaf can go */
1143 tp = tn;
1144 }
1145
1146 /* Case 3: n is NULL, and will just insert a new leaf */
1147 node_push_suffix(tp, new->fa_slen);
1148 NODE_INIT_PARENT(l, tp);
1149 put_child_root(tp, key, l);
1150 trie_rebalance(t, tp);
1151
1152 return 0;
1153 notnode:
1154 node_free(l);
1155 noleaf:
1156 return -ENOMEM;
1157 }
1158
1159 static int fib_insert_alias(struct trie *t, struct key_vector *tp,
1160 struct key_vector *l, struct fib_alias *new,
1161 struct fib_alias *fa, t_key key)
1162 {
1163 if (!l)
1164 return fib_insert_node(t, tp, new, key);
1165
1166 if (fa) {
1167 hlist_add_before_rcu(&new->fa_list, &fa->fa_list);
1168 } else {
1169 struct fib_alias *last;
1170
1171 hlist_for_each_entry(last, &l->leaf, fa_list) {
1172 if (new->fa_slen < last->fa_slen)
1173 break;
1174 if ((new->fa_slen == last->fa_slen) &&
1175 (new->tb_id > last->tb_id))
1176 break;
1177 fa = last;
1178 }
1179
1180 if (fa)
1181 hlist_add_behind_rcu(&new->fa_list, &fa->fa_list);
1182 else
1183 hlist_add_head_rcu(&new->fa_list, &l->leaf);
1184 }
1185
1186 /* if we added to the tail node then we need to update slen */
1187 if (l->slen < new->fa_slen) {
1188 l->slen = new->fa_slen;
1189 node_push_suffix(tp, new->fa_slen);
1190 }
1191
1192 return 0;
1193 }
1194
1195 /* Caller must hold RTNL. */
1196 int fib_table_insert(struct net *net, struct fib_table *tb,
1197 struct fib_config *cfg)
1198 {
1199 enum fib_event_type event = FIB_EVENT_ENTRY_ADD;
1200 struct trie *t = (struct trie *)tb->tb_data;
1201 struct fib_alias *fa, *new_fa;
1202 struct key_vector *l, *tp;
1203 u16 nlflags = NLM_F_EXCL;
1204 struct fib_info *fi;
1205 u8 plen = cfg->fc_dst_len;
1206 u8 slen = KEYLENGTH - plen;
1207 u8 tos = cfg->fc_tos;
1208 u32 key;
1209 int err;
1210
1211 if (plen > KEYLENGTH)
1212 return -EINVAL;
1213
1214 key = ntohl(cfg->fc_dst);
1215
1216 pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen);
1217
1218 if ((plen < KEYLENGTH) && (key << plen))
1219 return -EINVAL;
1220
1221 fi = fib_create_info(cfg);
1222 if (IS_ERR(fi)) {
1223 err = PTR_ERR(fi);
1224 goto err;
1225 }
1226
1227 l = fib_find_node(t, &tp, key);
1228 fa = l ? fib_find_alias(&l->leaf, slen, tos, fi->fib_priority,
1229 tb->tb_id) : NULL;
1230
1231 /* Now fa, if non-NULL, points to the first fib alias
1232 * with the same keys [prefix,tos,priority], if such key already
1233 * exists or to the node before which we will insert new one.
1234 *
1235 * If fa is NULL, we will need to allocate a new one and
1236 * insert to the tail of the section matching the suffix length
1237 * of the new alias.
1238 */
1239
1240 if (fa && fa->fa_tos == tos &&
1241 fa->fa_info->fib_priority == fi->fib_priority) {
1242 struct fib_alias *fa_first, *fa_match;
1243
1244 err = -EEXIST;
1245 if (cfg->fc_nlflags & NLM_F_EXCL)
1246 goto out;
1247
1248 nlflags &= ~NLM_F_EXCL;
1249
1250 /* We have 2 goals:
1251 * 1. Find exact match for type, scope, fib_info to avoid
1252 * duplicate routes
1253 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
1254 */
1255 fa_match = NULL;
1256 fa_first = fa;
1257 hlist_for_each_entry_from(fa, fa_list) {
1258 if ((fa->fa_slen != slen) ||
1259 (fa->tb_id != tb->tb_id) ||
1260 (fa->fa_tos != tos))
1261 break;
1262 if (fa->fa_info->fib_priority != fi->fib_priority)
1263 break;
1264 if (fa->fa_type == cfg->fc_type &&
1265 fa->fa_info == fi) {
1266 fa_match = fa;
1267 break;
1268 }
1269 }
1270
1271 if (cfg->fc_nlflags & NLM_F_REPLACE) {
1272 struct fib_info *fi_drop;
1273 u8 state;
1274
1275 nlflags |= NLM_F_REPLACE;
1276 fa = fa_first;
1277 if (fa_match) {
1278 if (fa == fa_match)
1279 err = 0;
1280 goto out;
1281 }
1282 err = -ENOBUFS;
1283 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1284 if (!new_fa)
1285 goto out;
1286
1287 fi_drop = fa->fa_info;
1288 new_fa->fa_tos = fa->fa_tos;
1289 new_fa->fa_info = fi;
1290 new_fa->fa_type = cfg->fc_type;
1291 state = fa->fa_state;
1292 new_fa->fa_state = state & ~FA_S_ACCESSED;
1293 new_fa->fa_slen = fa->fa_slen;
1294 new_fa->tb_id = tb->tb_id;
1295 new_fa->fa_default = -1;
1296
1297 call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1298 key, plen, fi,
1299 new_fa->fa_tos, cfg->fc_type,
1300 tb->tb_id);
1301 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
1302 tb->tb_id, &cfg->fc_nlinfo, nlflags);
1303
1304 hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list);
1305
1306 alias_free_mem_rcu(fa);
1307
1308 fib_release_info(fi_drop);
1309 if (state & FA_S_ACCESSED)
1310 rt_cache_flush(cfg->fc_nlinfo.nl_net);
1311
1312 goto succeeded;
1313 }
1314 /* Error if we find a perfect match which
1315 * uses the same scope, type, and nexthop
1316 * information.
1317 */
1318 if (fa_match)
1319 goto out;
1320
1321 if (cfg->fc_nlflags & NLM_F_APPEND) {
1322 event = FIB_EVENT_ENTRY_APPEND;
1323 nlflags |= NLM_F_APPEND;
1324 } else {
1325 fa = fa_first;
1326 }
1327 }
1328 err = -ENOENT;
1329 if (!(cfg->fc_nlflags & NLM_F_CREATE))
1330 goto out;
1331
1332 nlflags |= NLM_F_CREATE;
1333 err = -ENOBUFS;
1334 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1335 if (!new_fa)
1336 goto out;
1337
1338 new_fa->fa_info = fi;
1339 new_fa->fa_tos = tos;
1340 new_fa->fa_type = cfg->fc_type;
1341 new_fa->fa_state = 0;
1342 new_fa->fa_slen = slen;
1343 new_fa->tb_id = tb->tb_id;
1344 new_fa->fa_default = -1;
1345
1346 /* Insert new entry to the list. */
1347 err = fib_insert_alias(t, tp, l, new_fa, fa, key);
1348 if (err)
1349 goto out_free_new_fa;
1350
1351 if (!plen)
1352 tb->tb_num_default++;
1353
1354 rt_cache_flush(cfg->fc_nlinfo.nl_net);
1355 call_fib_entry_notifiers(net, event, key, plen, fi, tos, cfg->fc_type,
1356 tb->tb_id);
1357 rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
1358 &cfg->fc_nlinfo, nlflags);
1359 succeeded:
1360 return 0;
1361
1362 out_free_new_fa:
1363 kmem_cache_free(fn_alias_kmem, new_fa);
1364 out:
1365 fib_release_info(fi);
1366 err:
1367 return err;
1368 }
1369
1370 static inline t_key prefix_mismatch(t_key key, struct key_vector *n)
1371 {
1372 t_key prefix = n->key;
1373
1374 return (key ^ prefix) & (prefix | -prefix);
1375 }
1376
1377 /* should be called with rcu_read_lock */
1378 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
1379 struct fib_result *res, int fib_flags)
1380 {
1381 struct trie *t = (struct trie *) tb->tb_data;
1382 #ifdef CONFIG_IP_FIB_TRIE_STATS
1383 struct trie_use_stats __percpu *stats = t->stats;
1384 #endif
1385 const t_key key = ntohl(flp->daddr);
1386 struct key_vector *n, *pn;
1387 struct fib_alias *fa;
1388 unsigned long index;
1389 t_key cindex;
1390
1391 trace_fib_table_lookup(tb->tb_id, flp);
1392
1393 pn = t->kv;
1394 cindex = 0;
1395
1396 n = get_child_rcu(pn, cindex);
1397 if (!n)
1398 return -EAGAIN;
1399
1400 #ifdef CONFIG_IP_FIB_TRIE_STATS
1401 this_cpu_inc(stats->gets);
1402 #endif
1403
1404 /* Step 1: Travel to the longest prefix match in the trie */
1405 for (;;) {
1406 index = get_cindex(key, n);
1407
1408 /* This bit of code is a bit tricky but it combines multiple
1409 * checks into a single check. The prefix consists of the
1410 * prefix plus zeros for the "bits" in the prefix. The index
1411 * is the difference between the key and this value. From
1412 * this we can actually derive several pieces of data.
1413 * if (index >= (1ul << bits))
1414 * we have a mismatch in skip bits and failed
1415 * else
1416 * we know the value is cindex
1417 *
1418 * This check is safe even if bits == KEYLENGTH due to the
1419 * fact that we can only allocate a node with 32 bits if a
1420 * long is greater than 32 bits.
1421 */
1422 if (index >= (1ul << n->bits))
1423 break;
1424
1425 /* we have found a leaf. Prefixes have already been compared */
1426 if (IS_LEAF(n))
1427 goto found;
1428
1429 /* only record pn and cindex if we are going to be chopping
1430 * bits later. Otherwise we are just wasting cycles.
1431 */
1432 if (n->slen > n->pos) {
1433 pn = n;
1434 cindex = index;
1435 }
1436
1437 n = get_child_rcu(n, index);
1438 if (unlikely(!n))
1439 goto backtrace;
1440 }
1441
1442 /* Step 2: Sort out leaves and begin backtracing for longest prefix */
1443 for (;;) {
1444 /* record the pointer where our next node pointer is stored */
1445 struct key_vector __rcu **cptr = n->tnode;
1446
1447 /* This test verifies that none of the bits that differ
1448 * between the key and the prefix exist in the region of
1449 * the lsb and higher in the prefix.
1450 */
1451 if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos))
1452 goto backtrace;
1453
1454 /* exit out and process leaf */
1455 if (unlikely(IS_LEAF(n)))
1456 break;
1457
1458 /* Don't bother recording parent info. Since we are in
1459 * prefix match mode we will have to come back to wherever
1460 * we started this traversal anyway
1461 */
1462
1463 while ((n = rcu_dereference(*cptr)) == NULL) {
1464 backtrace:
1465 #ifdef CONFIG_IP_FIB_TRIE_STATS
1466 if (!n)
1467 this_cpu_inc(stats->null_node_hit);
1468 #endif
1469 /* If we are at cindex 0 there are no more bits for
1470 * us to strip at this level so we must ascend back
1471 * up one level to see if there are any more bits to
1472 * be stripped there.
1473 */
1474 while (!cindex) {
1475 t_key pkey = pn->key;
1476
1477 /* If we don't have a parent then there is
1478 * nothing for us to do as we do not have any
1479 * further nodes to parse.
1480 */
1481 if (IS_TRIE(pn))
1482 return -EAGAIN;
1483 #ifdef CONFIG_IP_FIB_TRIE_STATS
1484 this_cpu_inc(stats->backtrack);
1485 #endif
1486 /* Get Child's index */
1487 pn = node_parent_rcu(pn);
1488 cindex = get_index(pkey, pn);
1489 }
1490
1491 /* strip the least significant bit from the cindex */
1492 cindex &= cindex - 1;
1493
1494 /* grab pointer for next child node */
1495 cptr = &pn->tnode[cindex];
1496 }
1497 }
1498
1499 found:
1500 /* this line carries forward the xor from earlier in the function */
1501 index = key ^ n->key;
1502
1503 /* Step 3: Process the leaf, if that fails fall back to backtracing */
1504 hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
1505 struct fib_info *fi = fa->fa_info;
1506 int nhsel, err;
1507
1508 if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) {
1509 if (index >= (1ul << fa->fa_slen))
1510 continue;
1511 }
1512 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1513 continue;
1514 if (fi->fib_dead)
1515 continue;
1516 if (fa->fa_info->fib_scope < flp->flowi4_scope)
1517 continue;
1518 fib_alias_accessed(fa);
1519 err = fib_props[fa->fa_type].error;
1520 if (unlikely(err < 0)) {
1521 #ifdef CONFIG_IP_FIB_TRIE_STATS
1522 this_cpu_inc(stats->semantic_match_passed);
1523 #endif
1524 return err;
1525 }
1526 if (fi->fib_flags & RTNH_F_DEAD)
1527 continue;
1528 for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
1529 const struct fib_nh *nh = &fi->fib_nh[nhsel];
1530 struct in_device *in_dev = __in_dev_get_rcu(nh->nh_dev);
1531
1532 if (nh->nh_flags & RTNH_F_DEAD)
1533 continue;
1534 if (in_dev &&
1535 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1536 nh->nh_flags & RTNH_F_LINKDOWN &&
1537 !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
1538 continue;
1539 if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) {
1540 if (flp->flowi4_oif &&
1541 flp->flowi4_oif != nh->nh_oif)
1542 continue;
1543 }
1544
1545 if (!(fib_flags & FIB_LOOKUP_NOREF))
1546 atomic_inc(&fi->fib_clntref);
1547
1548 res->prefixlen = KEYLENGTH - fa->fa_slen;
1549 res->nh_sel = nhsel;
1550 res->type = fa->fa_type;
1551 res->scope = fi->fib_scope;
1552 res->fi = fi;
1553 res->table = tb;
1554 res->fa_head = &n->leaf;
1555 #ifdef CONFIG_IP_FIB_TRIE_STATS
1556 this_cpu_inc(stats->semantic_match_passed);
1557 #endif
1558 trace_fib_table_lookup_nh(nh);
1559
1560 return err;
1561 }
1562 }
1563 #ifdef CONFIG_IP_FIB_TRIE_STATS
1564 this_cpu_inc(stats->semantic_match_miss);
1565 #endif
1566 goto backtrace;
1567 }
1568 EXPORT_SYMBOL_GPL(fib_table_lookup);
1569
1570 static void fib_remove_alias(struct trie *t, struct key_vector *tp,
1571 struct key_vector *l, struct fib_alias *old)
1572 {
1573 /* record the location of the previous list_info entry */
1574 struct hlist_node **pprev = old->fa_list.pprev;
1575 struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next);
1576
1577 /* remove the fib_alias from the list */
1578 hlist_del_rcu(&old->fa_list);
1579
1580 /* if we emptied the list this leaf will be freed and we can sort
1581 * out parent suffix lengths as a part of trie_rebalance
1582 */
1583 if (hlist_empty(&l->leaf)) {
1584 if (tp->slen == l->slen)
1585 node_pull_suffix(tp, tp->pos);
1586 put_child_root(tp, l->key, NULL);
1587 node_free(l);
1588 trie_rebalance(t, tp);
1589 return;
1590 }
1591
1592 /* only access fa if it is pointing at the last valid hlist_node */
1593 if (*pprev)
1594 return;
1595
1596 /* update the trie with the latest suffix length */
1597 l->slen = fa->fa_slen;
1598 node_pull_suffix(tp, fa->fa_slen);
1599 }
1600
1601 /* Caller must hold RTNL. */
1602 int fib_table_delete(struct net *net, struct fib_table *tb,
1603 struct fib_config *cfg)
1604 {
1605 struct trie *t = (struct trie *) tb->tb_data;
1606 struct fib_alias *fa, *fa_to_delete;
1607 struct key_vector *l, *tp;
1608 u8 plen = cfg->fc_dst_len;
1609 u8 slen = KEYLENGTH - plen;
1610 u8 tos = cfg->fc_tos;
1611 u32 key;
1612
1613 if (plen > KEYLENGTH)
1614 return -EINVAL;
1615
1616 key = ntohl(cfg->fc_dst);
1617
1618 if ((plen < KEYLENGTH) && (key << plen))
1619 return -EINVAL;
1620
1621 l = fib_find_node(t, &tp, key);
1622 if (!l)
1623 return -ESRCH;
1624
1625 fa = fib_find_alias(&l->leaf, slen, tos, 0, tb->tb_id);
1626 if (!fa)
1627 return -ESRCH;
1628
1629 pr_debug("Deleting %08x/%d tos=%d t=%p\n", key, plen, tos, t);
1630
1631 fa_to_delete = NULL;
1632 hlist_for_each_entry_from(fa, fa_list) {
1633 struct fib_info *fi = fa->fa_info;
1634
1635 if ((fa->fa_slen != slen) ||
1636 (fa->tb_id != tb->tb_id) ||
1637 (fa->fa_tos != tos))
1638 break;
1639
1640 if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) &&
1641 (cfg->fc_scope == RT_SCOPE_NOWHERE ||
1642 fa->fa_info->fib_scope == cfg->fc_scope) &&
1643 (!cfg->fc_prefsrc ||
1644 fi->fib_prefsrc == cfg->fc_prefsrc) &&
1645 (!cfg->fc_protocol ||
1646 fi->fib_protocol == cfg->fc_protocol) &&
1647 fib_nh_match(cfg, fi) == 0) {
1648 fa_to_delete = fa;
1649 break;
1650 }
1651 }
1652
1653 if (!fa_to_delete)
1654 return -ESRCH;
1655
1656 call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, key, plen,
1657 fa_to_delete->fa_info, tos,
1658 fa_to_delete->fa_type, tb->tb_id);
1659 rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
1660 &cfg->fc_nlinfo, 0);
1661
1662 if (!plen)
1663 tb->tb_num_default--;
1664
1665 fib_remove_alias(t, tp, l, fa_to_delete);
1666
1667 if (fa_to_delete->fa_state & FA_S_ACCESSED)
1668 rt_cache_flush(cfg->fc_nlinfo.nl_net);
1669
1670 fib_release_info(fa_to_delete->fa_info);
1671 alias_free_mem_rcu(fa_to_delete);
1672 return 0;
1673 }
1674
1675 /* Scan for the next leaf starting at the provided key value */
1676 static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key)
1677 {
1678 struct key_vector *pn, *n = *tn;
1679 unsigned long cindex;
1680
1681 /* this loop is meant to try and find the key in the trie */
1682 do {
1683 /* record parent and next child index */
1684 pn = n;
1685 cindex = (key > pn->key) ? get_index(key, pn) : 0;
1686
1687 if (cindex >> pn->bits)
1688 break;
1689
1690 /* descend into the next child */
1691 n = get_child_rcu(pn, cindex++);
1692 if (!n)
1693 break;
1694
1695 /* guarantee forward progress on the keys */
1696 if (IS_LEAF(n) && (n->key >= key))
1697 goto found;
1698 } while (IS_TNODE(n));
1699
1700 /* this loop will search for the next leaf with a greater key */
1701 while (!IS_TRIE(pn)) {
1702 /* if we exhausted the parent node we will need to climb */
1703 if (cindex >= (1ul << pn->bits)) {
1704 t_key pkey = pn->key;
1705
1706 pn = node_parent_rcu(pn);
1707 cindex = get_index(pkey, pn) + 1;
1708 continue;
1709 }
1710
1711 /* grab the next available node */
1712 n = get_child_rcu(pn, cindex++);
1713 if (!n)
1714 continue;
1715
1716 /* no need to compare keys since we bumped the index */
1717 if (IS_LEAF(n))
1718 goto found;
1719
1720 /* Rescan start scanning in new node */
1721 pn = n;
1722 cindex = 0;
1723 }
1724
1725 *tn = pn;
1726 return NULL; /* Root of trie */
1727 found:
1728 /* if we are at the limit for keys just return NULL for the tnode */
1729 *tn = pn;
1730 return n;
1731 }
1732
1733 static void fib_trie_free(struct fib_table *tb)
1734 {
1735 struct trie *t = (struct trie *)tb->tb_data;
1736 struct key_vector *pn = t->kv;
1737 unsigned long cindex = 1;
1738 struct hlist_node *tmp;
1739 struct fib_alias *fa;
1740
1741 /* walk trie in reverse order and free everything */
1742 for (;;) {
1743 struct key_vector *n;
1744
1745 if (!(cindex--)) {
1746 t_key pkey = pn->key;
1747
1748 if (IS_TRIE(pn))
1749 break;
1750
1751 n = pn;
1752 pn = node_parent(pn);
1753
1754 /* drop emptied tnode */
1755 put_child_root(pn, n->key, NULL);
1756 node_free(n);
1757
1758 cindex = get_index(pkey, pn);
1759
1760 continue;
1761 }
1762
1763 /* grab the next available node */
1764 n = get_child(pn, cindex);
1765 if (!n)
1766 continue;
1767
1768 if (IS_TNODE(n)) {
1769 /* record pn and cindex for leaf walking */
1770 pn = n;
1771 cindex = 1ul << n->bits;
1772
1773 continue;
1774 }
1775
1776 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
1777 hlist_del_rcu(&fa->fa_list);
1778 alias_free_mem_rcu(fa);
1779 }
1780
1781 put_child_root(pn, n->key, NULL);
1782 node_free(n);
1783 }
1784
1785 #ifdef CONFIG_IP_FIB_TRIE_STATS
1786 free_percpu(t->stats);
1787 #endif
1788 kfree(tb);
1789 }
1790
1791 struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
1792 {
1793 struct trie *ot = (struct trie *)oldtb->tb_data;
1794 struct key_vector *l, *tp = ot->kv;
1795 struct fib_table *local_tb;
1796 struct fib_alias *fa;
1797 struct trie *lt;
1798 t_key key = 0;
1799
1800 if (oldtb->tb_data == oldtb->__data)
1801 return oldtb;
1802
1803 local_tb = fib_trie_table(RT_TABLE_LOCAL, NULL);
1804 if (!local_tb)
1805 return NULL;
1806
1807 lt = (struct trie *)local_tb->tb_data;
1808
1809 while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
1810 struct key_vector *local_l = NULL, *local_tp;
1811
1812 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
1813 struct fib_alias *new_fa;
1814
1815 if (local_tb->tb_id != fa->tb_id)
1816 continue;
1817
1818 /* clone fa for new local table */
1819 new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
1820 if (!new_fa)
1821 goto out;
1822
1823 memcpy(new_fa, fa, sizeof(*fa));
1824
1825 /* insert clone into table */
1826 if (!local_l)
1827 local_l = fib_find_node(lt, &local_tp, l->key);
1828
1829 if (fib_insert_alias(lt, local_tp, local_l, new_fa,
1830 NULL, l->key)) {
1831 kmem_cache_free(fn_alias_kmem, new_fa);
1832 goto out;
1833 }
1834 }
1835
1836 /* stop loop if key wrapped back to 0 */
1837 key = l->key + 1;
1838 if (key < l->key)
1839 break;
1840 }
1841
1842 return local_tb;
1843 out:
1844 fib_trie_free(local_tb);
1845
1846 return NULL;
1847 }
1848
1849 /* Caller must hold RTNL */
1850 void fib_table_flush_external(struct fib_table *tb)
1851 {
1852 struct trie *t = (struct trie *)tb->tb_data;
1853 struct key_vector *pn = t->kv;
1854 unsigned long cindex = 1;
1855 struct hlist_node *tmp;
1856 struct fib_alias *fa;
1857
1858 /* walk trie in reverse order */
1859 for (;;) {
1860 unsigned char slen = 0;
1861 struct key_vector *n;
1862
1863 if (!(cindex--)) {
1864 t_key pkey = pn->key;
1865
1866 /* cannot resize the trie vector */
1867 if (IS_TRIE(pn))
1868 break;
1869
1870 /* update the suffix to address pulled leaves */
1871 if (pn->slen > pn->pos)
1872 update_suffix(pn);
1873
1874 /* resize completed node */
1875 pn = resize(t, pn);
1876 cindex = get_index(pkey, pn);
1877
1878 continue;
1879 }
1880
1881 /* grab the next available node */
1882 n = get_child(pn, cindex);
1883 if (!n)
1884 continue;
1885
1886 if (IS_TNODE(n)) {
1887 /* record pn and cindex for leaf walking */
1888 pn = n;
1889 cindex = 1ul << n->bits;
1890
1891 continue;
1892 }
1893
1894 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
1895 /* if alias was cloned to local then we just
1896 * need to remove the local copy from main
1897 */
1898 if (tb->tb_id != fa->tb_id) {
1899 hlist_del_rcu(&fa->fa_list);
1900 alias_free_mem_rcu(fa);
1901 continue;
1902 }
1903
1904 /* record local slen */
1905 slen = fa->fa_slen;
1906 }
1907
1908 /* update leaf slen */
1909 n->slen = slen;
1910
1911 if (hlist_empty(&n->leaf)) {
1912 put_child_root(pn, n->key, NULL);
1913 node_free(n);
1914 }
1915 }
1916 }
1917
1918 /* Caller must hold RTNL. */
1919 int fib_table_flush(struct net *net, struct fib_table *tb)
1920 {
1921 struct trie *t = (struct trie *)tb->tb_data;
1922 struct key_vector *pn = t->kv;
1923 unsigned long cindex = 1;
1924 struct hlist_node *tmp;
1925 struct fib_alias *fa;
1926 int found = 0;
1927
1928 /* walk trie in reverse order */
1929 for (;;) {
1930 unsigned char slen = 0;
1931 struct key_vector *n;
1932
1933 if (!(cindex--)) {
1934 t_key pkey = pn->key;
1935
1936 /* cannot resize the trie vector */
1937 if (IS_TRIE(pn))
1938 break;
1939
1940 /* update the suffix to address pulled leaves */
1941 if (pn->slen > pn->pos)
1942 update_suffix(pn);
1943
1944 /* resize completed node */
1945 pn = resize(t, pn);
1946 cindex = get_index(pkey, pn);
1947
1948 continue;
1949 }
1950
1951 /* grab the next available node */
1952 n = get_child(pn, cindex);
1953 if (!n)
1954 continue;
1955
1956 if (IS_TNODE(n)) {
1957 /* record pn and cindex for leaf walking */
1958 pn = n;
1959 cindex = 1ul << n->bits;
1960
1961 continue;
1962 }
1963
1964 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
1965 struct fib_info *fi = fa->fa_info;
1966
1967 if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
1968 tb->tb_id != fa->tb_id) {
1969 slen = fa->fa_slen;
1970 continue;
1971 }
1972
1973 call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_DEL,
1974 n->key,
1975 KEYLENGTH - fa->fa_slen,
1976 fi, fa->fa_tos, fa->fa_type,
1977 tb->tb_id);
1978 hlist_del_rcu(&fa->fa_list);
1979 fib_release_info(fa->fa_info);
1980 alias_free_mem_rcu(fa);
1981 found++;
1982 }
1983
1984 /* update leaf slen */
1985 n->slen = slen;
1986
1987 if (hlist_empty(&n->leaf)) {
1988 put_child_root(pn, n->key, NULL);
1989 node_free(n);
1990 }
1991 }
1992
1993 pr_debug("trie_flush found=%d\n", found);
1994 return found;
1995 }
1996
1997 static void fib_leaf_notify(struct net *net, struct key_vector *l,
1998 struct fib_table *tb, struct notifier_block *nb,
1999 enum fib_event_type event_type)
2000 {
2001 struct fib_alias *fa;
2002
2003 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
2004 struct fib_info *fi = fa->fa_info;
2005
2006 if (!fi)
2007 continue;
2008
2009 /* local and main table can share the same trie,
2010 * so don't notify twice for the same entry.
2011 */
2012 if (tb->tb_id != fa->tb_id)
2013 continue;
2014
2015 call_fib_entry_notifier(nb, net, event_type, l->key,
2016 KEYLENGTH - fa->fa_slen, fi, fa->fa_tos,
2017 fa->fa_type, fa->tb_id);
2018 }
2019 }
2020
2021 static void fib_table_notify(struct net *net, struct fib_table *tb,
2022 struct notifier_block *nb,
2023 enum fib_event_type event_type)
2024 {
2025 struct trie *t = (struct trie *)tb->tb_data;
2026 struct key_vector *l, *tp = t->kv;
2027 t_key key = 0;
2028
2029 while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
2030 fib_leaf_notify(net, l, tb, nb, event_type);
2031
2032 key = l->key + 1;
2033 /* stop in case of wrap around */
2034 if (key < l->key)
2035 break;
2036 }
2037 }
2038
2039 static void fib_notify(struct net *net, struct notifier_block *nb,
2040 enum fib_event_type event_type)
2041 {
2042 unsigned int h;
2043
2044 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2045 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2046 struct fib_table *tb;
2047
2048 hlist_for_each_entry_rcu(tb, head, tb_hlist)
2049 fib_table_notify(net, tb, nb, event_type);
2050 }
2051 }
2052
2053 static void __trie_free_rcu(struct rcu_head *head)
2054 {
2055 struct fib_table *tb = container_of(head, struct fib_table, rcu);
2056 #ifdef CONFIG_IP_FIB_TRIE_STATS
2057 struct trie *t = (struct trie *)tb->tb_data;
2058
2059 if (tb->tb_data == tb->__data)
2060 free_percpu(t->stats);
2061 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2062 kfree(tb);
2063 }
2064
2065 void fib_free_table(struct fib_table *tb)
2066 {
2067 call_rcu(&tb->rcu, __trie_free_rcu);
2068 }
2069
2070 static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
2071 struct sk_buff *skb, struct netlink_callback *cb)
2072 {
2073 __be32 xkey = htonl(l->key);
2074 struct fib_alias *fa;
2075 int i, s_i;
2076
2077 s_i = cb->args[4];
2078 i = 0;
2079
2080 /* rcu_read_lock is hold by caller */
2081 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
2082 if (i < s_i) {
2083 i++;
2084 continue;
2085 }
2086
2087 if (tb->tb_id != fa->tb_id) {
2088 i++;
2089 continue;
2090 }
2091
2092 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
2093 cb->nlh->nlmsg_seq,
2094 RTM_NEWROUTE,
2095 tb->tb_id,
2096 fa->fa_type,
2097 xkey,
2098 KEYLENGTH - fa->fa_slen,
2099 fa->fa_tos,
2100 fa->fa_info, NLM_F_MULTI) < 0) {
2101 cb->args[4] = i;
2102 return -1;
2103 }
2104 i++;
2105 }
2106
2107 cb->args[4] = i;
2108 return skb->len;
2109 }
2110
2111 /* rcu_read_lock needs to be hold by caller from readside */
2112 int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
2113 struct netlink_callback *cb)
2114 {
2115 struct trie *t = (struct trie *)tb->tb_data;
2116 struct key_vector *l, *tp = t->kv;
2117 /* Dump starting at last key.
2118 * Note: 0.0.0.0/0 (ie default) is first key.
2119 */
2120 int count = cb->args[2];
2121 t_key key = cb->args[3];
2122
2123 while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
2124 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
2125 cb->args[3] = key;
2126 cb->args[2] = count;
2127 return -1;
2128 }
2129
2130 ++count;
2131 key = l->key + 1;
2132
2133 memset(&cb->args[4], 0,
2134 sizeof(cb->args) - 4*sizeof(cb->args[0]));
2135
2136 /* stop loop if key wrapped back to 0 */
2137 if (key < l->key)
2138 break;
2139 }
2140
2141 cb->args[3] = key;
2142 cb->args[2] = count;
2143
2144 return skb->len;
2145 }
2146
2147 void __init fib_trie_init(void)
2148 {
2149 fn_alias_kmem = kmem_cache_create("ip_fib_alias",
2150 sizeof(struct fib_alias),
2151 0, SLAB_PANIC, NULL);
2152
2153 trie_leaf_kmem = kmem_cache_create("ip_fib_trie",
2154 LEAF_SIZE,
2155 0, SLAB_PANIC, NULL);
2156 }
2157
2158 struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
2159 {
2160 struct fib_table *tb;
2161 struct trie *t;
2162 size_t sz = sizeof(*tb);
2163
2164 if (!alias)
2165 sz += sizeof(struct trie);
2166
2167 tb = kzalloc(sz, GFP_KERNEL);
2168 if (!tb)
2169 return NULL;
2170
2171 tb->tb_id = id;
2172 tb->tb_num_default = 0;
2173 tb->tb_data = (alias ? alias->__data : tb->__data);
2174
2175 if (alias)
2176 return tb;
2177
2178 t = (struct trie *) tb->tb_data;
2179 t->kv[0].pos = KEYLENGTH;
2180 t->kv[0].slen = KEYLENGTH;
2181 #ifdef CONFIG_IP_FIB_TRIE_STATS
2182 t->stats = alloc_percpu(struct trie_use_stats);
2183 if (!t->stats) {
2184 kfree(tb);
2185 tb = NULL;
2186 }
2187 #endif
2188
2189 return tb;
2190 }
2191
2192 #ifdef CONFIG_PROC_FS
2193 /* Depth first Trie walk iterator */
2194 struct fib_trie_iter {
2195 struct seq_net_private p;
2196 struct fib_table *tb;
2197 struct key_vector *tnode;
2198 unsigned int index;
2199 unsigned int depth;
2200 };
2201
2202 static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter)
2203 {
2204 unsigned long cindex = iter->index;
2205 struct key_vector *pn = iter->tnode;
2206 t_key pkey;
2207
2208 pr_debug("get_next iter={node=%p index=%d depth=%d}\n",
2209 iter->tnode, iter->index, iter->depth);
2210
2211 while (!IS_TRIE(pn)) {
2212 while (cindex < child_length(pn)) {
2213 struct key_vector *n = get_child_rcu(pn, cindex++);
2214
2215 if (!n)
2216 continue;
2217
2218 if (IS_LEAF(n)) {
2219 iter->tnode = pn;
2220 iter->index = cindex;
2221 } else {
2222 /* push down one level */
2223 iter->tnode = n;
2224 iter->index = 0;
2225 ++iter->depth;
2226 }
2227
2228 return n;
2229 }
2230
2231 /* Current node exhausted, pop back up */
2232 pkey = pn->key;
2233 pn = node_parent_rcu(pn);
2234 cindex = get_index(pkey, pn) + 1;
2235 --iter->depth;
2236 }
2237
2238 /* record root node so further searches know we are done */
2239 iter->tnode = pn;
2240 iter->index = 0;
2241
2242 return NULL;
2243 }
2244
2245 static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter,
2246 struct trie *t)
2247 {
2248 struct key_vector *n, *pn;
2249
2250 if (!t)
2251 return NULL;
2252
2253 pn = t->kv;
2254 n = rcu_dereference(pn->tnode[0]);
2255 if (!n)
2256 return NULL;
2257
2258 if (IS_TNODE(n)) {
2259 iter->tnode = n;
2260 iter->index = 0;
2261 iter->depth = 1;
2262 } else {
2263 iter->tnode = pn;
2264 iter->index = 0;
2265 iter->depth = 0;
2266 }
2267
2268 return n;
2269 }
2270
2271 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
2272 {
2273 struct key_vector *n;
2274 struct fib_trie_iter iter;
2275
2276 memset(s, 0, sizeof(*s));
2277
2278 rcu_read_lock();
2279 for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) {
2280 if (IS_LEAF(n)) {
2281 struct fib_alias *fa;
2282
2283 s->leaves++;
2284 s->totdepth += iter.depth;
2285 if (iter.depth > s->maxdepth)
2286 s->maxdepth = iter.depth;
2287
2288 hlist_for_each_entry_rcu(fa, &n->leaf, fa_list)
2289 ++s->prefixes;
2290 } else {
2291 s->tnodes++;
2292 if (n->bits < MAX_STAT_DEPTH)
2293 s->nodesizes[n->bits]++;
2294 s->nullpointers += tn_info(n)->empty_children;
2295 }
2296 }
2297 rcu_read_unlock();
2298 }
2299
2300 /*
2301 * This outputs /proc/net/fib_triestats
2302 */
2303 static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
2304 {
2305 unsigned int i, max, pointers, bytes, avdepth;
2306
2307 if (stat->leaves)
2308 avdepth = stat->totdepth*100 / stat->leaves;
2309 else
2310 avdepth = 0;
2311
2312 seq_printf(seq, "\tAver depth: %u.%02d\n",
2313 avdepth / 100, avdepth % 100);
2314 seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth);
2315
2316 seq_printf(seq, "\tLeaves: %u\n", stat->leaves);
2317 bytes = LEAF_SIZE * stat->leaves;
2318
2319 seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes);
2320 bytes += sizeof(struct fib_alias) * stat->prefixes;
2321
2322 seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes);
2323 bytes += TNODE_SIZE(0) * stat->tnodes;
2324
2325 max = MAX_STAT_DEPTH;
2326 while (max > 0 && stat->nodesizes[max-1] == 0)
2327 max--;
2328
2329 pointers = 0;
2330 for (i = 1; i < max; i++)
2331 if (stat->nodesizes[i] != 0) {
2332 seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
2333 pointers += (1<<i) * stat->nodesizes[i];
2334 }
2335 seq_putc(seq, '\n');
2336 seq_printf(seq, "\tPointers: %u\n", pointers);
2337
2338 bytes += sizeof(struct key_vector *) * pointers;
2339 seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
2340 seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
2341 }
2342
2343 #ifdef CONFIG_IP_FIB_TRIE_STATS
2344 static void trie_show_usage(struct seq_file *seq,
2345 const struct trie_use_stats __percpu *stats)
2346 {
2347 struct trie_use_stats s = { 0 };
2348 int cpu;
2349
2350 /* loop through all of the CPUs and gather up the stats */
2351 for_each_possible_cpu(cpu) {
2352 const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu);
2353
2354 s.gets += pcpu->gets;
2355 s.backtrack += pcpu->backtrack;
2356 s.semantic_match_passed += pcpu->semantic_match_passed;
2357 s.semantic_match_miss += pcpu->semantic_match_miss;
2358 s.null_node_hit += pcpu->null_node_hit;
2359 s.resize_node_skipped += pcpu->resize_node_skipped;
2360 }
2361
2362 seq_printf(seq, "\nCounters:\n---------\n");
2363 seq_printf(seq, "gets = %u\n", s.gets);
2364 seq_printf(seq, "backtracks = %u\n", s.backtrack);
2365 seq_printf(seq, "semantic match passed = %u\n",
2366 s.semantic_match_passed);
2367 seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss);
2368 seq_printf(seq, "null node hit= %u\n", s.null_node_hit);
2369 seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped);
2370 }
2371 #endif /* CONFIG_IP_FIB_TRIE_STATS */
2372
2373 static void fib_table_print(struct seq_file *seq, struct fib_table *tb)
2374 {
2375 if (tb->tb_id == RT_TABLE_LOCAL)
2376 seq_puts(seq, "Local:\n");
2377 else if (tb->tb_id == RT_TABLE_MAIN)
2378 seq_puts(seq, "Main:\n");
2379 else
2380 seq_printf(seq, "Id %d:\n", tb->tb_id);
2381 }
2382
2383
2384 static int fib_triestat_seq_show(struct seq_file *seq, void *v)
2385 {
2386 struct net *net = (struct net *)seq->private;
2387 unsigned int h;
2388
2389 seq_printf(seq,
2390 "Basic info: size of leaf:"
2391 " %zd bytes, size of tnode: %zd bytes.\n",
2392 LEAF_SIZE, TNODE_SIZE(0));
2393
2394 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2395 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2396 struct fib_table *tb;
2397
2398 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2399 struct trie *t = (struct trie *) tb->tb_data;
2400 struct trie_stat stat;
2401
2402 if (!t)
2403 continue;
2404
2405 fib_table_print(seq, tb);
2406
2407 trie_collect_stats(t, &stat);
2408 trie_show_stats(seq, &stat);
2409 #ifdef CONFIG_IP_FIB_TRIE_STATS
2410 trie_show_usage(seq, t->stats);
2411 #endif
2412 }
2413 }
2414
2415 return 0;
2416 }
2417
2418 static int fib_triestat_seq_open(struct inode *inode, struct file *file)
2419 {
2420 return single_open_net(inode, file, fib_triestat_seq_show);
2421 }
2422
2423 static const struct file_operations fib_triestat_fops = {
2424 .owner = THIS_MODULE,
2425 .open = fib_triestat_seq_open,
2426 .read = seq_read,
2427 .llseek = seq_lseek,
2428 .release = single_release_net,
2429 };
2430
2431 static struct key_vector *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
2432 {
2433 struct fib_trie_iter *iter = seq->private;
2434 struct net *net = seq_file_net(seq);
2435 loff_t idx = 0;
2436 unsigned int h;
2437
2438 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
2439 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2440 struct fib_table *tb;
2441
2442 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2443 struct key_vector *n;
2444
2445 for (n = fib_trie_get_first(iter,
2446 (struct trie *) tb->tb_data);
2447 n; n = fib_trie_get_next(iter))
2448 if (pos == idx++) {
2449 iter->tb = tb;
2450 return n;
2451 }
2452 }
2453 }
2454
2455 return NULL;
2456 }
2457
2458 static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos)
2459 __acquires(RCU)
2460 {
2461 rcu_read_lock();
2462 return fib_trie_get_idx(seq, *pos);
2463 }
2464
2465 static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2466 {
2467 struct fib_trie_iter *iter = seq->private;
2468 struct net *net = seq_file_net(seq);
2469 struct fib_table *tb = iter->tb;
2470 struct hlist_node *tb_node;
2471 unsigned int h;
2472 struct key_vector *n;
2473
2474 ++*pos;
2475 /* next node in same table */
2476 n = fib_trie_get_next(iter);
2477 if (n)
2478 return n;
2479
2480 /* walk rest of this hash chain */
2481 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
2482 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
2483 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2484 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2485 if (n)
2486 goto found;
2487 }
2488
2489 /* new hash chain */
2490 while (++h < FIB_TABLE_HASHSZ) {
2491 struct hlist_head *head = &net->ipv4.fib_table_hash[h];
2492 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
2493 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2494 if (n)
2495 goto found;
2496 }
2497 }
2498 return NULL;
2499
2500 found:
2501 iter->tb = tb;
2502 return n;
2503 }
2504
2505 static void fib_trie_seq_stop(struct seq_file *seq, void *v)
2506 __releases(RCU)
2507 {
2508 rcu_read_unlock();
2509 }
2510
2511 static void seq_indent(struct seq_file *seq, int n)
2512 {
2513 while (n-- > 0)
2514 seq_puts(seq, " ");
2515 }
2516
2517 static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
2518 {
2519 switch (s) {
2520 case RT_SCOPE_UNIVERSE: return "universe";
2521 case RT_SCOPE_SITE: return "site";
2522 case RT_SCOPE_LINK: return "link";
2523 case RT_SCOPE_HOST: return "host";
2524 case RT_SCOPE_NOWHERE: return "nowhere";
2525 default:
2526 snprintf(buf, len, "scope=%d", s);
2527 return buf;
2528 }
2529 }
2530
2531 static const char *const rtn_type_names[__RTN_MAX] = {
2532 [RTN_UNSPEC] = "UNSPEC",
2533 [RTN_UNICAST] = "UNICAST",
2534 [RTN_LOCAL] = "LOCAL",
2535 [RTN_BROADCAST] = "BROADCAST",
2536 [RTN_ANYCAST] = "ANYCAST",
2537 [RTN_MULTICAST] = "MULTICAST",
2538 [RTN_BLACKHOLE] = "BLACKHOLE",
2539 [RTN_UNREACHABLE] = "UNREACHABLE",
2540 [RTN_PROHIBIT] = "PROHIBIT",
2541 [RTN_THROW] = "THROW",
2542 [RTN_NAT] = "NAT",
2543 [RTN_XRESOLVE] = "XRESOLVE",
2544 };
2545
2546 static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
2547 {
2548 if (t < __RTN_MAX && rtn_type_names[t])
2549 return rtn_type_names[t];
2550 snprintf(buf, len, "type %u", t);
2551 return buf;
2552 }
2553
2554 /* Pretty print the trie */
2555 static int fib_trie_seq_show(struct seq_file *seq, void *v)
2556 {
2557 const struct fib_trie_iter *iter = seq->private;
2558 struct key_vector *n = v;
2559
2560 if (IS_TRIE(node_parent_rcu(n)))
2561 fib_table_print(seq, iter->tb);
2562
2563 if (IS_TNODE(n)) {
2564 __be32 prf = htonl(n->key);
2565
2566 seq_indent(seq, iter->depth-1);
2567 seq_printf(seq, " +-- %pI4/%zu %u %u %u\n",
2568 &prf, KEYLENGTH - n->pos - n->bits, n->bits,
2569 tn_info(n)->full_children,
2570 tn_info(n)->empty_children);
2571 } else {
2572 __be32 val = htonl(n->key);
2573 struct fib_alias *fa;
2574
2575 seq_indent(seq, iter->depth);
2576 seq_printf(seq, " |-- %pI4\n", &val);
2577
2578 hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) {
2579 char buf1[32], buf2[32];
2580
2581 seq_indent(seq, iter->depth + 1);
2582 seq_printf(seq, " /%zu %s %s",
2583 KEYLENGTH - fa->fa_slen,
2584 rtn_scope(buf1, sizeof(buf1),
2585 fa->fa_info->fib_scope),
2586 rtn_type(buf2, sizeof(buf2),
2587 fa->fa_type));
2588 if (fa->fa_tos)
2589 seq_printf(seq, " tos=%d", fa->fa_tos);
2590 seq_putc(seq, '\n');
2591 }
2592 }
2593
2594 return 0;
2595 }
2596
2597 static const struct seq_operations fib_trie_seq_ops = {
2598 .start = fib_trie_seq_start,
2599 .next = fib_trie_seq_next,
2600 .stop = fib_trie_seq_stop,
2601 .show = fib_trie_seq_show,
2602 };
2603
2604 static int fib_trie_seq_open(struct inode *inode, struct file *file)
2605 {
2606 return seq_open_net(inode, file, &fib_trie_seq_ops,
2607 sizeof(struct fib_trie_iter));
2608 }
2609
2610 static const struct file_operations fib_trie_fops = {
2611 .owner = THIS_MODULE,
2612 .open = fib_trie_seq_open,
2613 .read = seq_read,
2614 .llseek = seq_lseek,
2615 .release = seq_release_net,
2616 };
2617
2618 struct fib_route_iter {
2619 struct seq_net_private p;
2620 struct fib_table *main_tb;
2621 struct key_vector *tnode;
2622 loff_t pos;
2623 t_key key;
2624 };
2625
2626 static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
2627 loff_t pos)
2628 {
2629 struct key_vector *l, **tp = &iter->tnode;
2630 t_key key;
2631
2632 /* use cached location of previously found key */
2633 if (iter->pos > 0 && pos >= iter->pos) {
2634 key = iter->key;
2635 } else {
2636 iter->pos = 1;
2637 key = 0;
2638 }
2639
2640 pos -= iter->pos;
2641
2642 while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) {
2643 key = l->key + 1;
2644 iter->pos++;
2645 l = NULL;
2646
2647 /* handle unlikely case of a key wrap */
2648 if (!key)
2649 break;
2650 }
2651
2652 if (l)
2653 iter->key = l->key; /* remember it */
2654 else
2655 iter->pos = 0; /* forget it */
2656
2657 return l;
2658 }
2659
2660 static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos)
2661 __acquires(RCU)
2662 {
2663 struct fib_route_iter *iter = seq->private;
2664 struct fib_table *tb;
2665 struct trie *t;
2666
2667 rcu_read_lock();
2668
2669 tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
2670 if (!tb)
2671 return NULL;
2672
2673 iter->main_tb = tb;
2674 t = (struct trie *)tb->tb_data;
2675 iter->tnode = t->kv;
2676
2677 if (*pos != 0)
2678 return fib_route_get_idx(iter, *pos);
2679
2680 iter->pos = 0;
2681 iter->key = KEY_MAX;
2682
2683 return SEQ_START_TOKEN;
2684 }
2685
2686 static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2687 {
2688 struct fib_route_iter *iter = seq->private;
2689 struct key_vector *l = NULL;
2690 t_key key = iter->key + 1;
2691
2692 ++*pos;
2693
2694 /* only allow key of 0 for start of sequence */
2695 if ((v == SEQ_START_TOKEN) || key)
2696 l = leaf_walk_rcu(&iter->tnode, key);
2697
2698 if (l) {
2699 iter->key = l->key;
2700 iter->pos++;
2701 } else {
2702 iter->pos = 0;
2703 }
2704
2705 return l;
2706 }
2707
2708 static void fib_route_seq_stop(struct seq_file *seq, void *v)
2709 __releases(RCU)
2710 {
2711 rcu_read_unlock();
2712 }
2713
2714 static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
2715 {
2716 unsigned int flags = 0;
2717
2718 if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
2719 flags = RTF_REJECT;
2720 if (fi && fi->fib_nh->nh_gw)
2721 flags |= RTF_GATEWAY;
2722 if (mask == htonl(0xFFFFFFFF))
2723 flags |= RTF_HOST;
2724 flags |= RTF_UP;
2725 return flags;
2726 }
2727
2728 /*
2729 * This outputs /proc/net/route.
2730 * The format of the file is not supposed to be changed
2731 * and needs to be same as fib_hash output to avoid breaking
2732 * legacy utilities
2733 */
2734 static int fib_route_seq_show(struct seq_file *seq, void *v)
2735 {
2736 struct fib_route_iter *iter = seq->private;
2737 struct fib_table *tb = iter->main_tb;
2738 struct fib_alias *fa;
2739 struct key_vector *l = v;
2740 __be32 prefix;
2741
2742 if (v == SEQ_START_TOKEN) {
2743 seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
2744 "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
2745 "\tWindow\tIRTT");
2746 return 0;
2747 }
2748
2749 prefix = htonl(l->key);
2750
2751 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
2752 const struct fib_info *fi = fa->fa_info;
2753 __be32 mask = inet_make_mask(KEYLENGTH - fa->fa_slen);
2754 unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
2755
2756 if ((fa->fa_type == RTN_BROADCAST) ||
2757 (fa->fa_type == RTN_MULTICAST))
2758 continue;
2759
2760 if (fa->tb_id != tb->tb_id)
2761 continue;
2762
2763 seq_setwidth(seq, 127);
2764
2765 if (fi)
2766 seq_printf(seq,
2767 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
2768 "%d\t%08X\t%d\t%u\t%u",
2769 fi->fib_dev ? fi->fib_dev->name : "*",
2770 prefix,
2771 fi->fib_nh->nh_gw, flags, 0, 0,
2772 fi->fib_priority,
2773 mask,
2774 (fi->fib_advmss ?
2775 fi->fib_advmss + 40 : 0),
2776 fi->fib_window,
2777 fi->fib_rtt >> 3);
2778 else
2779 seq_printf(seq,
2780 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
2781 "%d\t%08X\t%d\t%u\t%u",
2782 prefix, 0, flags, 0, 0, 0,
2783 mask, 0, 0, 0);
2784
2785 seq_pad(seq, '\n');
2786 }
2787
2788 return 0;
2789 }
2790
2791 static const struct seq_operations fib_route_seq_ops = {
2792 .start = fib_route_seq_start,
2793 .next = fib_route_seq_next,
2794 .stop = fib_route_seq_stop,
2795 .show = fib_route_seq_show,
2796 };
2797
2798 static int fib_route_seq_open(struct inode *inode, struct file *file)
2799 {
2800 return seq_open_net(inode, file, &fib_route_seq_ops,
2801 sizeof(struct fib_route_iter));
2802 }
2803
2804 static const struct file_operations fib_route_fops = {
2805 .owner = THIS_MODULE,
2806 .open = fib_route_seq_open,
2807 .read = seq_read,
2808 .llseek = seq_lseek,
2809 .release = seq_release_net,
2810 };
2811
2812 int __net_init fib_proc_init(struct net *net)
2813 {
2814 if (!proc_create("fib_trie", S_IRUGO, net->proc_net, &fib_trie_fops))
2815 goto out1;
2816
2817 if (!proc_create("fib_triestat", S_IRUGO, net->proc_net,
2818 &fib_triestat_fops))
2819 goto out2;
2820
2821 if (!proc_create("route", S_IRUGO, net->proc_net, &fib_route_fops))
2822 goto out3;
2823
2824 return 0;
2825
2826 out3:
2827 remove_proc_entry("fib_triestat", net->proc_net);
2828 out2:
2829 remove_proc_entry("fib_trie", net->proc_net);
2830 out1:
2831 return -ENOMEM;
2832 }
2833
2834 void __net_exit fib_proc_exit(struct net *net)
2835 {
2836 remove_proc_entry("fib_trie", net->proc_net);
2837 remove_proc_entry("fib_triestat", net->proc_net);
2838 remove_proc_entry("route", net->proc_net);
2839 }
2840
2841 #endif /* CONFIG_PROC_FS */