]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/core/neighbour.c
[NET] NETNS: Omit namespace comparision without CONFIG_NET_NS.
[mirror_ubuntu-bionic-kernel.git] / net / core / neighbour.c
1 /*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
38
39 #define NEIGH_DEBUG 1
40
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
55
56 #define PNEIGH_HASHMASK 0xF
57
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static const struct file_operations neigh_stat_seq_fops;
66 #endif
67
68 /*
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
75 cache.
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
78
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
82
83 Reference count prevents destruction.
84
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
87 - timer
88 - resolution queue
89
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
94
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
97 */
98
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103 kfree_skb(skb);
104 return -ENETDOWN;
105 }
106
107 static void neigh_cleanup_and_release(struct neighbour *neigh)
108 {
109 if (neigh->parms->neigh_cleanup)
110 neigh->parms->neigh_cleanup(neigh);
111
112 __neigh_notify(neigh, RTM_DELNEIGH, 0);
113 neigh_release(neigh);
114 }
115
116 /*
117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
118 * It corresponds to default IPv6 settings and is not overridable,
119 * because it is really reasonable choice.
120 */
121
122 unsigned long neigh_rand_reach_time(unsigned long base)
123 {
124 return (base ? (net_random() % base) + (base >> 1) : 0);
125 }
126
127
128 static int neigh_forced_gc(struct neigh_table *tbl)
129 {
130 int shrunk = 0;
131 int i;
132
133 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
134
135 write_lock_bh(&tbl->lock);
136 for (i = 0; i <= tbl->hash_mask; i++) {
137 struct neighbour *n, **np;
138
139 np = &tbl->hash_buckets[i];
140 while ((n = *np) != NULL) {
141 /* Neighbour record may be discarded if:
142 * - nobody refers to it.
143 * - it is not permanent
144 */
145 write_lock(&n->lock);
146 if (atomic_read(&n->refcnt) == 1 &&
147 !(n->nud_state & NUD_PERMANENT)) {
148 *np = n->next;
149 n->dead = 1;
150 shrunk = 1;
151 write_unlock(&n->lock);
152 neigh_cleanup_and_release(n);
153 continue;
154 }
155 write_unlock(&n->lock);
156 np = &n->next;
157 }
158 }
159
160 tbl->last_flush = jiffies;
161
162 write_unlock_bh(&tbl->lock);
163
164 return shrunk;
165 }
166
167 static void neigh_add_timer(struct neighbour *n, unsigned long when)
168 {
169 neigh_hold(n);
170 if (unlikely(mod_timer(&n->timer, when))) {
171 printk("NEIGH: BUG, double timer add, state is %x\n",
172 n->nud_state);
173 dump_stack();
174 }
175 }
176
177 static int neigh_del_timer(struct neighbour *n)
178 {
179 if ((n->nud_state & NUD_IN_TIMER) &&
180 del_timer(&n->timer)) {
181 neigh_release(n);
182 return 1;
183 }
184 return 0;
185 }
186
187 static void pneigh_queue_purge(struct sk_buff_head *list)
188 {
189 struct sk_buff *skb;
190
191 while ((skb = skb_dequeue(list)) != NULL) {
192 dev_put(skb->dev);
193 kfree_skb(skb);
194 }
195 }
196
197 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
198 {
199 int i;
200
201 for (i = 0; i <= tbl->hash_mask; i++) {
202 struct neighbour *n, **np = &tbl->hash_buckets[i];
203
204 while ((n = *np) != NULL) {
205 if (dev && n->dev != dev) {
206 np = &n->next;
207 continue;
208 }
209 *np = n->next;
210 write_lock(&n->lock);
211 neigh_del_timer(n);
212 n->dead = 1;
213
214 if (atomic_read(&n->refcnt) != 1) {
215 /* The most unpleasant situation.
216 We must destroy neighbour entry,
217 but someone still uses it.
218
219 The destroy will be delayed until
220 the last user releases us, but
221 we must kill timers etc. and move
222 it to safe state.
223 */
224 skb_queue_purge(&n->arp_queue);
225 n->output = neigh_blackhole;
226 if (n->nud_state & NUD_VALID)
227 n->nud_state = NUD_NOARP;
228 else
229 n->nud_state = NUD_NONE;
230 NEIGH_PRINTK2("neigh %p is stray.\n", n);
231 }
232 write_unlock(&n->lock);
233 neigh_cleanup_and_release(n);
234 }
235 }
236 }
237
238 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
239 {
240 write_lock_bh(&tbl->lock);
241 neigh_flush_dev(tbl, dev);
242 write_unlock_bh(&tbl->lock);
243 }
244
245 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
246 {
247 write_lock_bh(&tbl->lock);
248 neigh_flush_dev(tbl, dev);
249 pneigh_ifdown(tbl, dev);
250 write_unlock_bh(&tbl->lock);
251
252 del_timer_sync(&tbl->proxy_timer);
253 pneigh_queue_purge(&tbl->proxy_queue);
254 return 0;
255 }
256
257 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
258 {
259 struct neighbour *n = NULL;
260 unsigned long now = jiffies;
261 int entries;
262
263 entries = atomic_inc_return(&tbl->entries) - 1;
264 if (entries >= tbl->gc_thresh3 ||
265 (entries >= tbl->gc_thresh2 &&
266 time_after(now, tbl->last_flush + 5 * HZ))) {
267 if (!neigh_forced_gc(tbl) &&
268 entries >= tbl->gc_thresh3)
269 goto out_entries;
270 }
271
272 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
273 if (!n)
274 goto out_entries;
275
276 skb_queue_head_init(&n->arp_queue);
277 rwlock_init(&n->lock);
278 n->updated = n->used = now;
279 n->nud_state = NUD_NONE;
280 n->output = neigh_blackhole;
281 n->parms = neigh_parms_clone(&tbl->parms);
282 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
283
284 NEIGH_CACHE_STAT_INC(tbl, allocs);
285 n->tbl = tbl;
286 atomic_set(&n->refcnt, 1);
287 n->dead = 1;
288 out:
289 return n;
290
291 out_entries:
292 atomic_dec(&tbl->entries);
293 goto out;
294 }
295
296 static struct neighbour **neigh_hash_alloc(unsigned int entries)
297 {
298 unsigned long size = entries * sizeof(struct neighbour *);
299 struct neighbour **ret;
300
301 if (size <= PAGE_SIZE) {
302 ret = kzalloc(size, GFP_ATOMIC);
303 } else {
304 ret = (struct neighbour **)
305 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
306 }
307 return ret;
308 }
309
310 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
311 {
312 unsigned long size = entries * sizeof(struct neighbour *);
313
314 if (size <= PAGE_SIZE)
315 kfree(hash);
316 else
317 free_pages((unsigned long)hash, get_order(size));
318 }
319
320 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
321 {
322 struct neighbour **new_hash, **old_hash;
323 unsigned int i, new_hash_mask, old_entries;
324
325 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
326
327 BUG_ON(!is_power_of_2(new_entries));
328 new_hash = neigh_hash_alloc(new_entries);
329 if (!new_hash)
330 return;
331
332 old_entries = tbl->hash_mask + 1;
333 new_hash_mask = new_entries - 1;
334 old_hash = tbl->hash_buckets;
335
336 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
337 for (i = 0; i < old_entries; i++) {
338 struct neighbour *n, *next;
339
340 for (n = old_hash[i]; n; n = next) {
341 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
342
343 hash_val &= new_hash_mask;
344 next = n->next;
345
346 n->next = new_hash[hash_val];
347 new_hash[hash_val] = n;
348 }
349 }
350 tbl->hash_buckets = new_hash;
351 tbl->hash_mask = new_hash_mask;
352
353 neigh_hash_free(old_hash, old_entries);
354 }
355
356 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
357 struct net_device *dev)
358 {
359 struct neighbour *n;
360 int key_len = tbl->key_len;
361 u32 hash_val;
362
363 NEIGH_CACHE_STAT_INC(tbl, lookups);
364
365 read_lock_bh(&tbl->lock);
366 hash_val = tbl->hash(pkey, dev);
367 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
368 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
369 neigh_hold(n);
370 NEIGH_CACHE_STAT_INC(tbl, hits);
371 break;
372 }
373 }
374 read_unlock_bh(&tbl->lock);
375 return n;
376 }
377
378 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
379 const void *pkey)
380 {
381 struct neighbour *n;
382 int key_len = tbl->key_len;
383 u32 hash_val;
384
385 NEIGH_CACHE_STAT_INC(tbl, lookups);
386
387 read_lock_bh(&tbl->lock);
388 hash_val = tbl->hash(pkey, NULL);
389 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
390 if (!memcmp(n->primary_key, pkey, key_len) &&
391 net_eq(dev_net(n->dev), net)) {
392 neigh_hold(n);
393 NEIGH_CACHE_STAT_INC(tbl, hits);
394 break;
395 }
396 }
397 read_unlock_bh(&tbl->lock);
398 return n;
399 }
400
401 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
402 struct net_device *dev)
403 {
404 u32 hash_val;
405 int key_len = tbl->key_len;
406 int error;
407 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
408
409 if (!n) {
410 rc = ERR_PTR(-ENOBUFS);
411 goto out;
412 }
413
414 memcpy(n->primary_key, pkey, key_len);
415 n->dev = dev;
416 dev_hold(dev);
417
418 /* Protocol specific setup. */
419 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
420 rc = ERR_PTR(error);
421 goto out_neigh_release;
422 }
423
424 /* Device specific setup. */
425 if (n->parms->neigh_setup &&
426 (error = n->parms->neigh_setup(n)) < 0) {
427 rc = ERR_PTR(error);
428 goto out_neigh_release;
429 }
430
431 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
432
433 write_lock_bh(&tbl->lock);
434
435 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
436 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
437
438 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
439
440 if (n->parms->dead) {
441 rc = ERR_PTR(-EINVAL);
442 goto out_tbl_unlock;
443 }
444
445 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
446 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
447 neigh_hold(n1);
448 rc = n1;
449 goto out_tbl_unlock;
450 }
451 }
452
453 n->next = tbl->hash_buckets[hash_val];
454 tbl->hash_buckets[hash_val] = n;
455 n->dead = 0;
456 neigh_hold(n);
457 write_unlock_bh(&tbl->lock);
458 NEIGH_PRINTK2("neigh %p is created.\n", n);
459 rc = n;
460 out:
461 return rc;
462 out_tbl_unlock:
463 write_unlock_bh(&tbl->lock);
464 out_neigh_release:
465 neigh_release(n);
466 goto out;
467 }
468
469 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
470 struct net *net, const void *pkey,
471 struct net_device *dev, int creat)
472 {
473 struct pneigh_entry *n;
474 int key_len = tbl->key_len;
475 u32 hash_val = *(u32 *)(pkey + key_len - 4);
476
477 hash_val ^= (hash_val >> 16);
478 hash_val ^= hash_val >> 8;
479 hash_val ^= hash_val >> 4;
480 hash_val &= PNEIGH_HASHMASK;
481
482 read_lock_bh(&tbl->lock);
483
484 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
485 if (!memcmp(n->key, pkey, key_len) &&
486 net_eq(pneigh_net(n), net) &&
487 (n->dev == dev || !n->dev)) {
488 read_unlock_bh(&tbl->lock);
489 goto out;
490 }
491 }
492 read_unlock_bh(&tbl->lock);
493 n = NULL;
494 if (!creat)
495 goto out;
496
497 ASSERT_RTNL();
498
499 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
500 if (!n)
501 goto out;
502
503 #ifdef CONFIG_NET_NS
504 n->net = hold_net(net);
505 #endif
506 memcpy(n->key, pkey, key_len);
507 n->dev = dev;
508 if (dev)
509 dev_hold(dev);
510
511 if (tbl->pconstructor && tbl->pconstructor(n)) {
512 if (dev)
513 dev_put(dev);
514 release_net(net);
515 kfree(n);
516 n = NULL;
517 goto out;
518 }
519
520 write_lock_bh(&tbl->lock);
521 n->next = tbl->phash_buckets[hash_val];
522 tbl->phash_buckets[hash_val] = n;
523 write_unlock_bh(&tbl->lock);
524 out:
525 return n;
526 }
527
528
529 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
530 struct net_device *dev)
531 {
532 struct pneigh_entry *n, **np;
533 int key_len = tbl->key_len;
534 u32 hash_val = *(u32 *)(pkey + key_len - 4);
535
536 hash_val ^= (hash_val >> 16);
537 hash_val ^= hash_val >> 8;
538 hash_val ^= hash_val >> 4;
539 hash_val &= PNEIGH_HASHMASK;
540
541 write_lock_bh(&tbl->lock);
542 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
543 np = &n->next) {
544 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
545 net_eq(pneigh_net(n), net)) {
546 *np = n->next;
547 write_unlock_bh(&tbl->lock);
548 if (tbl->pdestructor)
549 tbl->pdestructor(n);
550 if (n->dev)
551 dev_put(n->dev);
552 release_net(pneigh_net(n));
553 kfree(n);
554 return 0;
555 }
556 }
557 write_unlock_bh(&tbl->lock);
558 return -ENOENT;
559 }
560
561 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
562 {
563 struct pneigh_entry *n, **np;
564 u32 h;
565
566 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
567 np = &tbl->phash_buckets[h];
568 while ((n = *np) != NULL) {
569 if (!dev || n->dev == dev) {
570 *np = n->next;
571 if (tbl->pdestructor)
572 tbl->pdestructor(n);
573 if (n->dev)
574 dev_put(n->dev);
575 release_net(pneigh_net(n));
576 kfree(n);
577 continue;
578 }
579 np = &n->next;
580 }
581 }
582 return -ENOENT;
583 }
584
585 static void neigh_parms_destroy(struct neigh_parms *parms);
586
587 static inline void neigh_parms_put(struct neigh_parms *parms)
588 {
589 if (atomic_dec_and_test(&parms->refcnt))
590 neigh_parms_destroy(parms);
591 }
592
593 /*
594 * neighbour must already be out of the table;
595 *
596 */
597 void neigh_destroy(struct neighbour *neigh)
598 {
599 struct hh_cache *hh;
600
601 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
602
603 if (!neigh->dead) {
604 printk(KERN_WARNING
605 "Destroying alive neighbour %p\n", neigh);
606 dump_stack();
607 return;
608 }
609
610 if (neigh_del_timer(neigh))
611 printk(KERN_WARNING "Impossible event.\n");
612
613 while ((hh = neigh->hh) != NULL) {
614 neigh->hh = hh->hh_next;
615 hh->hh_next = NULL;
616
617 write_seqlock_bh(&hh->hh_lock);
618 hh->hh_output = neigh_blackhole;
619 write_sequnlock_bh(&hh->hh_lock);
620 if (atomic_dec_and_test(&hh->hh_refcnt))
621 kfree(hh);
622 }
623
624 skb_queue_purge(&neigh->arp_queue);
625
626 dev_put(neigh->dev);
627 neigh_parms_put(neigh->parms);
628
629 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
630
631 atomic_dec(&neigh->tbl->entries);
632 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
633 }
634
635 /* Neighbour state is suspicious;
636 disable fast path.
637
638 Called with write_locked neigh.
639 */
640 static void neigh_suspect(struct neighbour *neigh)
641 {
642 struct hh_cache *hh;
643
644 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
645
646 neigh->output = neigh->ops->output;
647
648 for (hh = neigh->hh; hh; hh = hh->hh_next)
649 hh->hh_output = neigh->ops->output;
650 }
651
652 /* Neighbour state is OK;
653 enable fast path.
654
655 Called with write_locked neigh.
656 */
657 static void neigh_connect(struct neighbour *neigh)
658 {
659 struct hh_cache *hh;
660
661 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
662
663 neigh->output = neigh->ops->connected_output;
664
665 for (hh = neigh->hh; hh; hh = hh->hh_next)
666 hh->hh_output = neigh->ops->hh_output;
667 }
668
669 static void neigh_periodic_timer(unsigned long arg)
670 {
671 struct neigh_table *tbl = (struct neigh_table *)arg;
672 struct neighbour *n, **np;
673 unsigned long expire, now = jiffies;
674
675 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
676
677 write_lock(&tbl->lock);
678
679 /*
680 * periodically recompute ReachableTime from random function
681 */
682
683 if (time_after(now, tbl->last_rand + 300 * HZ)) {
684 struct neigh_parms *p;
685 tbl->last_rand = now;
686 for (p = &tbl->parms; p; p = p->next)
687 p->reachable_time =
688 neigh_rand_reach_time(p->base_reachable_time);
689 }
690
691 np = &tbl->hash_buckets[tbl->hash_chain_gc];
692 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
693
694 while ((n = *np) != NULL) {
695 unsigned int state;
696
697 write_lock(&n->lock);
698
699 state = n->nud_state;
700 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
701 write_unlock(&n->lock);
702 goto next_elt;
703 }
704
705 if (time_before(n->used, n->confirmed))
706 n->used = n->confirmed;
707
708 if (atomic_read(&n->refcnt) == 1 &&
709 (state == NUD_FAILED ||
710 time_after(now, n->used + n->parms->gc_staletime))) {
711 *np = n->next;
712 n->dead = 1;
713 write_unlock(&n->lock);
714 neigh_cleanup_and_release(n);
715 continue;
716 }
717 write_unlock(&n->lock);
718
719 next_elt:
720 np = &n->next;
721 }
722
723 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
724 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
725 * base_reachable_time.
726 */
727 expire = tbl->parms.base_reachable_time >> 1;
728 expire /= (tbl->hash_mask + 1);
729 if (!expire)
730 expire = 1;
731
732 if (expire>HZ)
733 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
734 else
735 mod_timer(&tbl->gc_timer, now + expire);
736
737 write_unlock(&tbl->lock);
738 }
739
740 static __inline__ int neigh_max_probes(struct neighbour *n)
741 {
742 struct neigh_parms *p = n->parms;
743 return (n->nud_state & NUD_PROBE ?
744 p->ucast_probes :
745 p->ucast_probes + p->app_probes + p->mcast_probes);
746 }
747
748 /* Called when a timer expires for a neighbour entry. */
749
750 static void neigh_timer_handler(unsigned long arg)
751 {
752 unsigned long now, next;
753 struct neighbour *neigh = (struct neighbour *)arg;
754 unsigned state;
755 int notify = 0;
756
757 write_lock(&neigh->lock);
758
759 state = neigh->nud_state;
760 now = jiffies;
761 next = now + HZ;
762
763 if (!(state & NUD_IN_TIMER)) {
764 #ifndef CONFIG_SMP
765 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
766 #endif
767 goto out;
768 }
769
770 if (state & NUD_REACHABLE) {
771 if (time_before_eq(now,
772 neigh->confirmed + neigh->parms->reachable_time)) {
773 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
774 next = neigh->confirmed + neigh->parms->reachable_time;
775 } else if (time_before_eq(now,
776 neigh->used + neigh->parms->delay_probe_time)) {
777 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
778 neigh->nud_state = NUD_DELAY;
779 neigh->updated = jiffies;
780 neigh_suspect(neigh);
781 next = now + neigh->parms->delay_probe_time;
782 } else {
783 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
784 neigh->nud_state = NUD_STALE;
785 neigh->updated = jiffies;
786 neigh_suspect(neigh);
787 notify = 1;
788 }
789 } else if (state & NUD_DELAY) {
790 if (time_before_eq(now,
791 neigh->confirmed + neigh->parms->delay_probe_time)) {
792 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
793 neigh->nud_state = NUD_REACHABLE;
794 neigh->updated = jiffies;
795 neigh_connect(neigh);
796 notify = 1;
797 next = neigh->confirmed + neigh->parms->reachable_time;
798 } else {
799 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
800 neigh->nud_state = NUD_PROBE;
801 neigh->updated = jiffies;
802 atomic_set(&neigh->probes, 0);
803 next = now + neigh->parms->retrans_time;
804 }
805 } else {
806 /* NUD_PROBE|NUD_INCOMPLETE */
807 next = now + neigh->parms->retrans_time;
808 }
809
810 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
811 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
812 struct sk_buff *skb;
813
814 neigh->nud_state = NUD_FAILED;
815 neigh->updated = jiffies;
816 notify = 1;
817 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
818 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
819
820 /* It is very thin place. report_unreachable is very complicated
821 routine. Particularly, it can hit the same neighbour entry!
822
823 So that, we try to be accurate and avoid dead loop. --ANK
824 */
825 while (neigh->nud_state == NUD_FAILED &&
826 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
827 write_unlock(&neigh->lock);
828 neigh->ops->error_report(neigh, skb);
829 write_lock(&neigh->lock);
830 }
831 skb_queue_purge(&neigh->arp_queue);
832 }
833
834 if (neigh->nud_state & NUD_IN_TIMER) {
835 if (time_before(next, jiffies + HZ/2))
836 next = jiffies + HZ/2;
837 if (!mod_timer(&neigh->timer, next))
838 neigh_hold(neigh);
839 }
840 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
841 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
842 /* keep skb alive even if arp_queue overflows */
843 if (skb)
844 skb = skb_copy(skb, GFP_ATOMIC);
845 write_unlock(&neigh->lock);
846 neigh->ops->solicit(neigh, skb);
847 atomic_inc(&neigh->probes);
848 if (skb)
849 kfree_skb(skb);
850 } else {
851 out:
852 write_unlock(&neigh->lock);
853 }
854
855 if (notify)
856 neigh_update_notify(neigh);
857
858 neigh_release(neigh);
859 }
860
861 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
862 {
863 int rc;
864 unsigned long now;
865
866 write_lock_bh(&neigh->lock);
867
868 rc = 0;
869 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
870 goto out_unlock_bh;
871
872 now = jiffies;
873
874 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
875 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
876 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
877 neigh->nud_state = NUD_INCOMPLETE;
878 neigh->updated = jiffies;
879 neigh_add_timer(neigh, now + 1);
880 } else {
881 neigh->nud_state = NUD_FAILED;
882 neigh->updated = jiffies;
883 write_unlock_bh(&neigh->lock);
884
885 if (skb)
886 kfree_skb(skb);
887 return 1;
888 }
889 } else if (neigh->nud_state & NUD_STALE) {
890 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
891 neigh->nud_state = NUD_DELAY;
892 neigh->updated = jiffies;
893 neigh_add_timer(neigh,
894 jiffies + neigh->parms->delay_probe_time);
895 }
896
897 if (neigh->nud_state == NUD_INCOMPLETE) {
898 if (skb) {
899 if (skb_queue_len(&neigh->arp_queue) >=
900 neigh->parms->queue_len) {
901 struct sk_buff *buff;
902 buff = neigh->arp_queue.next;
903 __skb_unlink(buff, &neigh->arp_queue);
904 kfree_skb(buff);
905 }
906 __skb_queue_tail(&neigh->arp_queue, skb);
907 }
908 rc = 1;
909 }
910 out_unlock_bh:
911 write_unlock_bh(&neigh->lock);
912 return rc;
913 }
914
915 static void neigh_update_hhs(struct neighbour *neigh)
916 {
917 struct hh_cache *hh;
918 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
919 = neigh->dev->header_ops->cache_update;
920
921 if (update) {
922 for (hh = neigh->hh; hh; hh = hh->hh_next) {
923 write_seqlock_bh(&hh->hh_lock);
924 update(hh, neigh->dev, neigh->ha);
925 write_sequnlock_bh(&hh->hh_lock);
926 }
927 }
928 }
929
930
931
932 /* Generic update routine.
933 -- lladdr is new lladdr or NULL, if it is not supplied.
934 -- new is new state.
935 -- flags
936 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
937 if it is different.
938 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
939 lladdr instead of overriding it
940 if it is different.
941 It also allows to retain current state
942 if lladdr is unchanged.
943 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
944
945 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
946 NTF_ROUTER flag.
947 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
948 a router.
949
950 Caller MUST hold reference count on the entry.
951 */
952
953 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
954 u32 flags)
955 {
956 u8 old;
957 int err;
958 int notify = 0;
959 struct net_device *dev;
960 int update_isrouter = 0;
961
962 write_lock_bh(&neigh->lock);
963
964 dev = neigh->dev;
965 old = neigh->nud_state;
966 err = -EPERM;
967
968 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
969 (old & (NUD_NOARP | NUD_PERMANENT)))
970 goto out;
971
972 if (!(new & NUD_VALID)) {
973 neigh_del_timer(neigh);
974 if (old & NUD_CONNECTED)
975 neigh_suspect(neigh);
976 neigh->nud_state = new;
977 err = 0;
978 notify = old & NUD_VALID;
979 goto out;
980 }
981
982 /* Compare new lladdr with cached one */
983 if (!dev->addr_len) {
984 /* First case: device needs no address. */
985 lladdr = neigh->ha;
986 } else if (lladdr) {
987 /* The second case: if something is already cached
988 and a new address is proposed:
989 - compare new & old
990 - if they are different, check override flag
991 */
992 if ((old & NUD_VALID) &&
993 !memcmp(lladdr, neigh->ha, dev->addr_len))
994 lladdr = neigh->ha;
995 } else {
996 /* No address is supplied; if we know something,
997 use it, otherwise discard the request.
998 */
999 err = -EINVAL;
1000 if (!(old & NUD_VALID))
1001 goto out;
1002 lladdr = neigh->ha;
1003 }
1004
1005 if (new & NUD_CONNECTED)
1006 neigh->confirmed = jiffies;
1007 neigh->updated = jiffies;
1008
1009 /* If entry was valid and address is not changed,
1010 do not change entry state, if new one is STALE.
1011 */
1012 err = 0;
1013 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1014 if (old & NUD_VALID) {
1015 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1016 update_isrouter = 0;
1017 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1018 (old & NUD_CONNECTED)) {
1019 lladdr = neigh->ha;
1020 new = NUD_STALE;
1021 } else
1022 goto out;
1023 } else {
1024 if (lladdr == neigh->ha && new == NUD_STALE &&
1025 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1026 (old & NUD_CONNECTED))
1027 )
1028 new = old;
1029 }
1030 }
1031
1032 if (new != old) {
1033 neigh_del_timer(neigh);
1034 if (new & NUD_IN_TIMER)
1035 neigh_add_timer(neigh, (jiffies +
1036 ((new & NUD_REACHABLE) ?
1037 neigh->parms->reachable_time :
1038 0)));
1039 neigh->nud_state = new;
1040 }
1041
1042 if (lladdr != neigh->ha) {
1043 memcpy(&neigh->ha, lladdr, dev->addr_len);
1044 neigh_update_hhs(neigh);
1045 if (!(new & NUD_CONNECTED))
1046 neigh->confirmed = jiffies -
1047 (neigh->parms->base_reachable_time << 1);
1048 notify = 1;
1049 }
1050 if (new == old)
1051 goto out;
1052 if (new & NUD_CONNECTED)
1053 neigh_connect(neigh);
1054 else
1055 neigh_suspect(neigh);
1056 if (!(old & NUD_VALID)) {
1057 struct sk_buff *skb;
1058
1059 /* Again: avoid dead loop if something went wrong */
1060
1061 while (neigh->nud_state & NUD_VALID &&
1062 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1063 struct neighbour *n1 = neigh;
1064 write_unlock_bh(&neigh->lock);
1065 /* On shaper/eql skb->dst->neighbour != neigh :( */
1066 if (skb->dst && skb->dst->neighbour)
1067 n1 = skb->dst->neighbour;
1068 n1->output(skb);
1069 write_lock_bh(&neigh->lock);
1070 }
1071 skb_queue_purge(&neigh->arp_queue);
1072 }
1073 out:
1074 if (update_isrouter) {
1075 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1076 (neigh->flags | NTF_ROUTER) :
1077 (neigh->flags & ~NTF_ROUTER);
1078 }
1079 write_unlock_bh(&neigh->lock);
1080
1081 if (notify)
1082 neigh_update_notify(neigh);
1083
1084 return err;
1085 }
1086
1087 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1088 u8 *lladdr, void *saddr,
1089 struct net_device *dev)
1090 {
1091 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1092 lladdr || !dev->addr_len);
1093 if (neigh)
1094 neigh_update(neigh, lladdr, NUD_STALE,
1095 NEIGH_UPDATE_F_OVERRIDE);
1096 return neigh;
1097 }
1098
1099 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1100 __be16 protocol)
1101 {
1102 struct hh_cache *hh;
1103 struct net_device *dev = dst->dev;
1104
1105 for (hh = n->hh; hh; hh = hh->hh_next)
1106 if (hh->hh_type == protocol)
1107 break;
1108
1109 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1110 seqlock_init(&hh->hh_lock);
1111 hh->hh_type = protocol;
1112 atomic_set(&hh->hh_refcnt, 0);
1113 hh->hh_next = NULL;
1114
1115 if (dev->header_ops->cache(n, hh)) {
1116 kfree(hh);
1117 hh = NULL;
1118 } else {
1119 atomic_inc(&hh->hh_refcnt);
1120 hh->hh_next = n->hh;
1121 n->hh = hh;
1122 if (n->nud_state & NUD_CONNECTED)
1123 hh->hh_output = n->ops->hh_output;
1124 else
1125 hh->hh_output = n->ops->output;
1126 }
1127 }
1128 if (hh) {
1129 atomic_inc(&hh->hh_refcnt);
1130 dst->hh = hh;
1131 }
1132 }
1133
1134 /* This function can be used in contexts, where only old dev_queue_xmit
1135 worked, f.e. if you want to override normal output path (eql, shaper),
1136 but resolution is not made yet.
1137 */
1138
1139 int neigh_compat_output(struct sk_buff *skb)
1140 {
1141 struct net_device *dev = skb->dev;
1142
1143 __skb_pull(skb, skb_network_offset(skb));
1144
1145 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1146 skb->len) < 0 &&
1147 dev->header_ops->rebuild(skb))
1148 return 0;
1149
1150 return dev_queue_xmit(skb);
1151 }
1152
1153 /* Slow and careful. */
1154
1155 int neigh_resolve_output(struct sk_buff *skb)
1156 {
1157 struct dst_entry *dst = skb->dst;
1158 struct neighbour *neigh;
1159 int rc = 0;
1160
1161 if (!dst || !(neigh = dst->neighbour))
1162 goto discard;
1163
1164 __skb_pull(skb, skb_network_offset(skb));
1165
1166 if (!neigh_event_send(neigh, skb)) {
1167 int err;
1168 struct net_device *dev = neigh->dev;
1169 if (dev->header_ops->cache && !dst->hh) {
1170 write_lock_bh(&neigh->lock);
1171 if (!dst->hh)
1172 neigh_hh_init(neigh, dst, dst->ops->protocol);
1173 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1174 neigh->ha, NULL, skb->len);
1175 write_unlock_bh(&neigh->lock);
1176 } else {
1177 read_lock_bh(&neigh->lock);
1178 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1179 neigh->ha, NULL, skb->len);
1180 read_unlock_bh(&neigh->lock);
1181 }
1182 if (err >= 0)
1183 rc = neigh->ops->queue_xmit(skb);
1184 else
1185 goto out_kfree_skb;
1186 }
1187 out:
1188 return rc;
1189 discard:
1190 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1191 dst, dst ? dst->neighbour : NULL);
1192 out_kfree_skb:
1193 rc = -EINVAL;
1194 kfree_skb(skb);
1195 goto out;
1196 }
1197
1198 /* As fast as possible without hh cache */
1199
1200 int neigh_connected_output(struct sk_buff *skb)
1201 {
1202 int err;
1203 struct dst_entry *dst = skb->dst;
1204 struct neighbour *neigh = dst->neighbour;
1205 struct net_device *dev = neigh->dev;
1206
1207 __skb_pull(skb, skb_network_offset(skb));
1208
1209 read_lock_bh(&neigh->lock);
1210 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1211 neigh->ha, NULL, skb->len);
1212 read_unlock_bh(&neigh->lock);
1213 if (err >= 0)
1214 err = neigh->ops->queue_xmit(skb);
1215 else {
1216 err = -EINVAL;
1217 kfree_skb(skb);
1218 }
1219 return err;
1220 }
1221
1222 static void neigh_proxy_process(unsigned long arg)
1223 {
1224 struct neigh_table *tbl = (struct neigh_table *)arg;
1225 long sched_next = 0;
1226 unsigned long now = jiffies;
1227 struct sk_buff *skb;
1228
1229 spin_lock(&tbl->proxy_queue.lock);
1230
1231 skb = tbl->proxy_queue.next;
1232
1233 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1234 struct sk_buff *back = skb;
1235 long tdif = NEIGH_CB(back)->sched_next - now;
1236
1237 skb = skb->next;
1238 if (tdif <= 0) {
1239 struct net_device *dev = back->dev;
1240 __skb_unlink(back, &tbl->proxy_queue);
1241 if (tbl->proxy_redo && netif_running(dev))
1242 tbl->proxy_redo(back);
1243 else
1244 kfree_skb(back);
1245
1246 dev_put(dev);
1247 } else if (!sched_next || tdif < sched_next)
1248 sched_next = tdif;
1249 }
1250 del_timer(&tbl->proxy_timer);
1251 if (sched_next)
1252 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1253 spin_unlock(&tbl->proxy_queue.lock);
1254 }
1255
1256 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1257 struct sk_buff *skb)
1258 {
1259 unsigned long now = jiffies;
1260 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1261
1262 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1263 kfree_skb(skb);
1264 return;
1265 }
1266
1267 NEIGH_CB(skb)->sched_next = sched_next;
1268 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1269
1270 spin_lock(&tbl->proxy_queue.lock);
1271 if (del_timer(&tbl->proxy_timer)) {
1272 if (time_before(tbl->proxy_timer.expires, sched_next))
1273 sched_next = tbl->proxy_timer.expires;
1274 }
1275 dst_release(skb->dst);
1276 skb->dst = NULL;
1277 dev_hold(skb->dev);
1278 __skb_queue_tail(&tbl->proxy_queue, skb);
1279 mod_timer(&tbl->proxy_timer, sched_next);
1280 spin_unlock(&tbl->proxy_queue.lock);
1281 }
1282
1283 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1284 struct net *net, int ifindex)
1285 {
1286 struct neigh_parms *p;
1287
1288 for (p = &tbl->parms; p; p = p->next) {
1289 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1290 (!p->dev && !ifindex))
1291 return p;
1292 }
1293
1294 return NULL;
1295 }
1296
1297 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1298 struct neigh_table *tbl)
1299 {
1300 struct neigh_parms *p, *ref;
1301 struct net *net;
1302
1303 net = dev_net(dev);
1304 ref = lookup_neigh_params(tbl, net, 0);
1305 if (!ref)
1306 return NULL;
1307
1308 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1309 if (p) {
1310 p->tbl = tbl;
1311 atomic_set(&p->refcnt, 1);
1312 INIT_RCU_HEAD(&p->rcu_head);
1313 p->reachable_time =
1314 neigh_rand_reach_time(p->base_reachable_time);
1315
1316 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1317 kfree(p);
1318 return NULL;
1319 }
1320
1321 dev_hold(dev);
1322 p->dev = dev;
1323 #ifdef CONFIG_NET_NS
1324 p->net = hold_net(net);
1325 #endif
1326 p->sysctl_table = NULL;
1327 write_lock_bh(&tbl->lock);
1328 p->next = tbl->parms.next;
1329 tbl->parms.next = p;
1330 write_unlock_bh(&tbl->lock);
1331 }
1332 return p;
1333 }
1334
1335 static void neigh_rcu_free_parms(struct rcu_head *head)
1336 {
1337 struct neigh_parms *parms =
1338 container_of(head, struct neigh_parms, rcu_head);
1339
1340 neigh_parms_put(parms);
1341 }
1342
1343 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1344 {
1345 struct neigh_parms **p;
1346
1347 if (!parms || parms == &tbl->parms)
1348 return;
1349 write_lock_bh(&tbl->lock);
1350 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1351 if (*p == parms) {
1352 *p = parms->next;
1353 parms->dead = 1;
1354 write_unlock_bh(&tbl->lock);
1355 if (parms->dev)
1356 dev_put(parms->dev);
1357 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1358 return;
1359 }
1360 }
1361 write_unlock_bh(&tbl->lock);
1362 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1363 }
1364
1365 static void neigh_parms_destroy(struct neigh_parms *parms)
1366 {
1367 release_net(neigh_parms_net(parms));
1368 kfree(parms);
1369 }
1370
1371 static struct lock_class_key neigh_table_proxy_queue_class;
1372
1373 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1374 {
1375 unsigned long now = jiffies;
1376 unsigned long phsize;
1377
1378 #ifdef CONFIG_NET_NS
1379 tbl->parms.net = &init_net;
1380 #endif
1381 atomic_set(&tbl->parms.refcnt, 1);
1382 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1383 tbl->parms.reachable_time =
1384 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1385
1386 if (!tbl->kmem_cachep)
1387 tbl->kmem_cachep =
1388 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1389 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1390 NULL);
1391 tbl->stats = alloc_percpu(struct neigh_statistics);
1392 if (!tbl->stats)
1393 panic("cannot create neighbour cache statistics");
1394
1395 #ifdef CONFIG_PROC_FS
1396 tbl->pde = proc_create(tbl->id, 0, init_net.proc_net_stat,
1397 &neigh_stat_seq_fops);
1398 if (!tbl->pde)
1399 panic("cannot create neighbour proc dir entry");
1400 tbl->pde->data = tbl;
1401 #endif
1402
1403 tbl->hash_mask = 1;
1404 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1405
1406 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1407 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1408
1409 if (!tbl->hash_buckets || !tbl->phash_buckets)
1410 panic("cannot allocate neighbour cache hashes");
1411
1412 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1413
1414 rwlock_init(&tbl->lock);
1415 setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
1416 tbl->gc_timer.expires = now + 1;
1417 add_timer(&tbl->gc_timer);
1418
1419 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1420 skb_queue_head_init_class(&tbl->proxy_queue,
1421 &neigh_table_proxy_queue_class);
1422
1423 tbl->last_flush = now;
1424 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1425 }
1426
1427 void neigh_table_init(struct neigh_table *tbl)
1428 {
1429 struct neigh_table *tmp;
1430
1431 neigh_table_init_no_netlink(tbl);
1432 write_lock(&neigh_tbl_lock);
1433 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1434 if (tmp->family == tbl->family)
1435 break;
1436 }
1437 tbl->next = neigh_tables;
1438 neigh_tables = tbl;
1439 write_unlock(&neigh_tbl_lock);
1440
1441 if (unlikely(tmp)) {
1442 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1443 "family %d\n", tbl->family);
1444 dump_stack();
1445 }
1446 }
1447
1448 int neigh_table_clear(struct neigh_table *tbl)
1449 {
1450 struct neigh_table **tp;
1451
1452 /* It is not clean... Fix it to unload IPv6 module safely */
1453 del_timer_sync(&tbl->gc_timer);
1454 del_timer_sync(&tbl->proxy_timer);
1455 pneigh_queue_purge(&tbl->proxy_queue);
1456 neigh_ifdown(tbl, NULL);
1457 if (atomic_read(&tbl->entries))
1458 printk(KERN_CRIT "neighbour leakage\n");
1459 write_lock(&neigh_tbl_lock);
1460 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1461 if (*tp == tbl) {
1462 *tp = tbl->next;
1463 break;
1464 }
1465 }
1466 write_unlock(&neigh_tbl_lock);
1467
1468 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1469 tbl->hash_buckets = NULL;
1470
1471 kfree(tbl->phash_buckets);
1472 tbl->phash_buckets = NULL;
1473
1474 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1475
1476 free_percpu(tbl->stats);
1477 tbl->stats = NULL;
1478
1479 kmem_cache_destroy(tbl->kmem_cachep);
1480 tbl->kmem_cachep = NULL;
1481
1482 return 0;
1483 }
1484
1485 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1486 {
1487 struct net *net = sock_net(skb->sk);
1488 struct ndmsg *ndm;
1489 struct nlattr *dst_attr;
1490 struct neigh_table *tbl;
1491 struct net_device *dev = NULL;
1492 int err = -EINVAL;
1493
1494 if (nlmsg_len(nlh) < sizeof(*ndm))
1495 goto out;
1496
1497 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1498 if (dst_attr == NULL)
1499 goto out;
1500
1501 ndm = nlmsg_data(nlh);
1502 if (ndm->ndm_ifindex) {
1503 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1504 if (dev == NULL) {
1505 err = -ENODEV;
1506 goto out;
1507 }
1508 }
1509
1510 read_lock(&neigh_tbl_lock);
1511 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1512 struct neighbour *neigh;
1513
1514 if (tbl->family != ndm->ndm_family)
1515 continue;
1516 read_unlock(&neigh_tbl_lock);
1517
1518 if (nla_len(dst_attr) < tbl->key_len)
1519 goto out_dev_put;
1520
1521 if (ndm->ndm_flags & NTF_PROXY) {
1522 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1523 goto out_dev_put;
1524 }
1525
1526 if (dev == NULL)
1527 goto out_dev_put;
1528
1529 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1530 if (neigh == NULL) {
1531 err = -ENOENT;
1532 goto out_dev_put;
1533 }
1534
1535 err = neigh_update(neigh, NULL, NUD_FAILED,
1536 NEIGH_UPDATE_F_OVERRIDE |
1537 NEIGH_UPDATE_F_ADMIN);
1538 neigh_release(neigh);
1539 goto out_dev_put;
1540 }
1541 read_unlock(&neigh_tbl_lock);
1542 err = -EAFNOSUPPORT;
1543
1544 out_dev_put:
1545 if (dev)
1546 dev_put(dev);
1547 out:
1548 return err;
1549 }
1550
1551 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1552 {
1553 struct net *net = sock_net(skb->sk);
1554 struct ndmsg *ndm;
1555 struct nlattr *tb[NDA_MAX+1];
1556 struct neigh_table *tbl;
1557 struct net_device *dev = NULL;
1558 int err;
1559
1560 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1561 if (err < 0)
1562 goto out;
1563
1564 err = -EINVAL;
1565 if (tb[NDA_DST] == NULL)
1566 goto out;
1567
1568 ndm = nlmsg_data(nlh);
1569 if (ndm->ndm_ifindex) {
1570 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1571 if (dev == NULL) {
1572 err = -ENODEV;
1573 goto out;
1574 }
1575
1576 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1577 goto out_dev_put;
1578 }
1579
1580 read_lock(&neigh_tbl_lock);
1581 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1582 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1583 struct neighbour *neigh;
1584 void *dst, *lladdr;
1585
1586 if (tbl->family != ndm->ndm_family)
1587 continue;
1588 read_unlock(&neigh_tbl_lock);
1589
1590 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1591 goto out_dev_put;
1592 dst = nla_data(tb[NDA_DST]);
1593 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1594
1595 if (ndm->ndm_flags & NTF_PROXY) {
1596 struct pneigh_entry *pn;
1597
1598 err = -ENOBUFS;
1599 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1600 if (pn) {
1601 pn->flags = ndm->ndm_flags;
1602 err = 0;
1603 }
1604 goto out_dev_put;
1605 }
1606
1607 if (dev == NULL)
1608 goto out_dev_put;
1609
1610 neigh = neigh_lookup(tbl, dst, dev);
1611 if (neigh == NULL) {
1612 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1613 err = -ENOENT;
1614 goto out_dev_put;
1615 }
1616
1617 neigh = __neigh_lookup_errno(tbl, dst, dev);
1618 if (IS_ERR(neigh)) {
1619 err = PTR_ERR(neigh);
1620 goto out_dev_put;
1621 }
1622 } else {
1623 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1624 err = -EEXIST;
1625 neigh_release(neigh);
1626 goto out_dev_put;
1627 }
1628
1629 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1630 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1631 }
1632
1633 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1634 neigh_release(neigh);
1635 goto out_dev_put;
1636 }
1637
1638 read_unlock(&neigh_tbl_lock);
1639 err = -EAFNOSUPPORT;
1640
1641 out_dev_put:
1642 if (dev)
1643 dev_put(dev);
1644 out:
1645 return err;
1646 }
1647
1648 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1649 {
1650 struct nlattr *nest;
1651
1652 nest = nla_nest_start(skb, NDTA_PARMS);
1653 if (nest == NULL)
1654 return -ENOBUFS;
1655
1656 if (parms->dev)
1657 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1658
1659 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1660 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1661 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1662 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1663 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1664 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1665 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1666 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1667 parms->base_reachable_time);
1668 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1669 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1670 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1671 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1672 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1673 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1674
1675 return nla_nest_end(skb, nest);
1676
1677 nla_put_failure:
1678 return nla_nest_cancel(skb, nest);
1679 }
1680
1681 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1682 u32 pid, u32 seq, int type, int flags)
1683 {
1684 struct nlmsghdr *nlh;
1685 struct ndtmsg *ndtmsg;
1686
1687 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1688 if (nlh == NULL)
1689 return -EMSGSIZE;
1690
1691 ndtmsg = nlmsg_data(nlh);
1692
1693 read_lock_bh(&tbl->lock);
1694 ndtmsg->ndtm_family = tbl->family;
1695 ndtmsg->ndtm_pad1 = 0;
1696 ndtmsg->ndtm_pad2 = 0;
1697
1698 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1699 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1700 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1701 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1702 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1703
1704 {
1705 unsigned long now = jiffies;
1706 unsigned int flush_delta = now - tbl->last_flush;
1707 unsigned int rand_delta = now - tbl->last_rand;
1708
1709 struct ndt_config ndc = {
1710 .ndtc_key_len = tbl->key_len,
1711 .ndtc_entry_size = tbl->entry_size,
1712 .ndtc_entries = atomic_read(&tbl->entries),
1713 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1714 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1715 .ndtc_hash_rnd = tbl->hash_rnd,
1716 .ndtc_hash_mask = tbl->hash_mask,
1717 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1718 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1719 };
1720
1721 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1722 }
1723
1724 {
1725 int cpu;
1726 struct ndt_stats ndst;
1727
1728 memset(&ndst, 0, sizeof(ndst));
1729
1730 for_each_possible_cpu(cpu) {
1731 struct neigh_statistics *st;
1732
1733 st = per_cpu_ptr(tbl->stats, cpu);
1734 ndst.ndts_allocs += st->allocs;
1735 ndst.ndts_destroys += st->destroys;
1736 ndst.ndts_hash_grows += st->hash_grows;
1737 ndst.ndts_res_failed += st->res_failed;
1738 ndst.ndts_lookups += st->lookups;
1739 ndst.ndts_hits += st->hits;
1740 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1741 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1742 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1743 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1744 }
1745
1746 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1747 }
1748
1749 BUG_ON(tbl->parms.dev);
1750 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1751 goto nla_put_failure;
1752
1753 read_unlock_bh(&tbl->lock);
1754 return nlmsg_end(skb, nlh);
1755
1756 nla_put_failure:
1757 read_unlock_bh(&tbl->lock);
1758 nlmsg_cancel(skb, nlh);
1759 return -EMSGSIZE;
1760 }
1761
1762 static int neightbl_fill_param_info(struct sk_buff *skb,
1763 struct neigh_table *tbl,
1764 struct neigh_parms *parms,
1765 u32 pid, u32 seq, int type,
1766 unsigned int flags)
1767 {
1768 struct ndtmsg *ndtmsg;
1769 struct nlmsghdr *nlh;
1770
1771 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1772 if (nlh == NULL)
1773 return -EMSGSIZE;
1774
1775 ndtmsg = nlmsg_data(nlh);
1776
1777 read_lock_bh(&tbl->lock);
1778 ndtmsg->ndtm_family = tbl->family;
1779 ndtmsg->ndtm_pad1 = 0;
1780 ndtmsg->ndtm_pad2 = 0;
1781
1782 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1783 neightbl_fill_parms(skb, parms) < 0)
1784 goto errout;
1785
1786 read_unlock_bh(&tbl->lock);
1787 return nlmsg_end(skb, nlh);
1788 errout:
1789 read_unlock_bh(&tbl->lock);
1790 nlmsg_cancel(skb, nlh);
1791 return -EMSGSIZE;
1792 }
1793
1794 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1795 [NDTA_NAME] = { .type = NLA_STRING },
1796 [NDTA_THRESH1] = { .type = NLA_U32 },
1797 [NDTA_THRESH2] = { .type = NLA_U32 },
1798 [NDTA_THRESH3] = { .type = NLA_U32 },
1799 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1800 [NDTA_PARMS] = { .type = NLA_NESTED },
1801 };
1802
1803 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1804 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1805 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1806 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1807 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1808 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1809 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1810 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1811 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1812 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1813 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1814 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1815 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1816 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1817 };
1818
1819 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1820 {
1821 struct net *net = sock_net(skb->sk);
1822 struct neigh_table *tbl;
1823 struct ndtmsg *ndtmsg;
1824 struct nlattr *tb[NDTA_MAX+1];
1825 int err;
1826
1827 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1828 nl_neightbl_policy);
1829 if (err < 0)
1830 goto errout;
1831
1832 if (tb[NDTA_NAME] == NULL) {
1833 err = -EINVAL;
1834 goto errout;
1835 }
1836
1837 ndtmsg = nlmsg_data(nlh);
1838 read_lock(&neigh_tbl_lock);
1839 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1840 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1841 continue;
1842
1843 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1844 break;
1845 }
1846
1847 if (tbl == NULL) {
1848 err = -ENOENT;
1849 goto errout_locked;
1850 }
1851
1852 /*
1853 * We acquire tbl->lock to be nice to the periodic timers and
1854 * make sure they always see a consistent set of values.
1855 */
1856 write_lock_bh(&tbl->lock);
1857
1858 if (tb[NDTA_PARMS]) {
1859 struct nlattr *tbp[NDTPA_MAX+1];
1860 struct neigh_parms *p;
1861 int i, ifindex = 0;
1862
1863 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1864 nl_ntbl_parm_policy);
1865 if (err < 0)
1866 goto errout_tbl_lock;
1867
1868 if (tbp[NDTPA_IFINDEX])
1869 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1870
1871 p = lookup_neigh_params(tbl, net, ifindex);
1872 if (p == NULL) {
1873 err = -ENOENT;
1874 goto errout_tbl_lock;
1875 }
1876
1877 for (i = 1; i <= NDTPA_MAX; i++) {
1878 if (tbp[i] == NULL)
1879 continue;
1880
1881 switch (i) {
1882 case NDTPA_QUEUE_LEN:
1883 p->queue_len = nla_get_u32(tbp[i]);
1884 break;
1885 case NDTPA_PROXY_QLEN:
1886 p->proxy_qlen = nla_get_u32(tbp[i]);
1887 break;
1888 case NDTPA_APP_PROBES:
1889 p->app_probes = nla_get_u32(tbp[i]);
1890 break;
1891 case NDTPA_UCAST_PROBES:
1892 p->ucast_probes = nla_get_u32(tbp[i]);
1893 break;
1894 case NDTPA_MCAST_PROBES:
1895 p->mcast_probes = nla_get_u32(tbp[i]);
1896 break;
1897 case NDTPA_BASE_REACHABLE_TIME:
1898 p->base_reachable_time = nla_get_msecs(tbp[i]);
1899 break;
1900 case NDTPA_GC_STALETIME:
1901 p->gc_staletime = nla_get_msecs(tbp[i]);
1902 break;
1903 case NDTPA_DELAY_PROBE_TIME:
1904 p->delay_probe_time = nla_get_msecs(tbp[i]);
1905 break;
1906 case NDTPA_RETRANS_TIME:
1907 p->retrans_time = nla_get_msecs(tbp[i]);
1908 break;
1909 case NDTPA_ANYCAST_DELAY:
1910 p->anycast_delay = nla_get_msecs(tbp[i]);
1911 break;
1912 case NDTPA_PROXY_DELAY:
1913 p->proxy_delay = nla_get_msecs(tbp[i]);
1914 break;
1915 case NDTPA_LOCKTIME:
1916 p->locktime = nla_get_msecs(tbp[i]);
1917 break;
1918 }
1919 }
1920 }
1921
1922 if (tb[NDTA_THRESH1])
1923 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1924
1925 if (tb[NDTA_THRESH2])
1926 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1927
1928 if (tb[NDTA_THRESH3])
1929 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1930
1931 if (tb[NDTA_GC_INTERVAL])
1932 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1933
1934 err = 0;
1935
1936 errout_tbl_lock:
1937 write_unlock_bh(&tbl->lock);
1938 errout_locked:
1939 read_unlock(&neigh_tbl_lock);
1940 errout:
1941 return err;
1942 }
1943
1944 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1945 {
1946 struct net *net = sock_net(skb->sk);
1947 int family, tidx, nidx = 0;
1948 int tbl_skip = cb->args[0];
1949 int neigh_skip = cb->args[1];
1950 struct neigh_table *tbl;
1951
1952 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1953
1954 read_lock(&neigh_tbl_lock);
1955 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1956 struct neigh_parms *p;
1957
1958 if (tidx < tbl_skip || (family && tbl->family != family))
1959 continue;
1960
1961 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1962 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1963 NLM_F_MULTI) <= 0)
1964 break;
1965
1966 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
1967 if (!net_eq(neigh_parms_net(p), net))
1968 continue;
1969
1970 if (nidx++ < neigh_skip)
1971 continue;
1972
1973 if (neightbl_fill_param_info(skb, tbl, p,
1974 NETLINK_CB(cb->skb).pid,
1975 cb->nlh->nlmsg_seq,
1976 RTM_NEWNEIGHTBL,
1977 NLM_F_MULTI) <= 0)
1978 goto out;
1979 }
1980
1981 neigh_skip = 0;
1982 }
1983 out:
1984 read_unlock(&neigh_tbl_lock);
1985 cb->args[0] = tidx;
1986 cb->args[1] = nidx;
1987
1988 return skb->len;
1989 }
1990
1991 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1992 u32 pid, u32 seq, int type, unsigned int flags)
1993 {
1994 unsigned long now = jiffies;
1995 struct nda_cacheinfo ci;
1996 struct nlmsghdr *nlh;
1997 struct ndmsg *ndm;
1998
1999 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2000 if (nlh == NULL)
2001 return -EMSGSIZE;
2002
2003 ndm = nlmsg_data(nlh);
2004 ndm->ndm_family = neigh->ops->family;
2005 ndm->ndm_pad1 = 0;
2006 ndm->ndm_pad2 = 0;
2007 ndm->ndm_flags = neigh->flags;
2008 ndm->ndm_type = neigh->type;
2009 ndm->ndm_ifindex = neigh->dev->ifindex;
2010
2011 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2012
2013 read_lock_bh(&neigh->lock);
2014 ndm->ndm_state = neigh->nud_state;
2015 if ((neigh->nud_state & NUD_VALID) &&
2016 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2017 read_unlock_bh(&neigh->lock);
2018 goto nla_put_failure;
2019 }
2020
2021 ci.ndm_used = now - neigh->used;
2022 ci.ndm_confirmed = now - neigh->confirmed;
2023 ci.ndm_updated = now - neigh->updated;
2024 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2025 read_unlock_bh(&neigh->lock);
2026
2027 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2028 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2029
2030 return nlmsg_end(skb, nlh);
2031
2032 nla_put_failure:
2033 nlmsg_cancel(skb, nlh);
2034 return -EMSGSIZE;
2035 }
2036
2037 static void neigh_update_notify(struct neighbour *neigh)
2038 {
2039 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2040 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2041 }
2042
2043 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2044 struct netlink_callback *cb)
2045 {
2046 struct net * net = sock_net(skb->sk);
2047 struct neighbour *n;
2048 int rc, h, s_h = cb->args[1];
2049 int idx, s_idx = idx = cb->args[2];
2050
2051 read_lock_bh(&tbl->lock);
2052 for (h = 0; h <= tbl->hash_mask; h++) {
2053 if (h < s_h)
2054 continue;
2055 if (h > s_h)
2056 s_idx = 0;
2057 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2058 int lidx;
2059 if (dev_net(n->dev) != net)
2060 continue;
2061 lidx = idx++;
2062 if (lidx < s_idx)
2063 continue;
2064 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2065 cb->nlh->nlmsg_seq,
2066 RTM_NEWNEIGH,
2067 NLM_F_MULTI) <= 0) {
2068 read_unlock_bh(&tbl->lock);
2069 rc = -1;
2070 goto out;
2071 }
2072 }
2073 }
2074 read_unlock_bh(&tbl->lock);
2075 rc = skb->len;
2076 out:
2077 cb->args[1] = h;
2078 cb->args[2] = idx;
2079 return rc;
2080 }
2081
2082 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2083 {
2084 struct neigh_table *tbl;
2085 int t, family, s_t;
2086
2087 read_lock(&neigh_tbl_lock);
2088 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2089 s_t = cb->args[0];
2090
2091 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2092 if (t < s_t || (family && tbl->family != family))
2093 continue;
2094 if (t > s_t)
2095 memset(&cb->args[1], 0, sizeof(cb->args) -
2096 sizeof(cb->args[0]));
2097 if (neigh_dump_table(tbl, skb, cb) < 0)
2098 break;
2099 }
2100 read_unlock(&neigh_tbl_lock);
2101
2102 cb->args[0] = t;
2103 return skb->len;
2104 }
2105
2106 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2107 {
2108 int chain;
2109
2110 read_lock_bh(&tbl->lock);
2111 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2112 struct neighbour *n;
2113
2114 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2115 cb(n, cookie);
2116 }
2117 read_unlock_bh(&tbl->lock);
2118 }
2119 EXPORT_SYMBOL(neigh_for_each);
2120
2121 /* The tbl->lock must be held as a writer and BH disabled. */
2122 void __neigh_for_each_release(struct neigh_table *tbl,
2123 int (*cb)(struct neighbour *))
2124 {
2125 int chain;
2126
2127 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2128 struct neighbour *n, **np;
2129
2130 np = &tbl->hash_buckets[chain];
2131 while ((n = *np) != NULL) {
2132 int release;
2133
2134 write_lock(&n->lock);
2135 release = cb(n);
2136 if (release) {
2137 *np = n->next;
2138 n->dead = 1;
2139 } else
2140 np = &n->next;
2141 write_unlock(&n->lock);
2142 if (release)
2143 neigh_cleanup_and_release(n);
2144 }
2145 }
2146 }
2147 EXPORT_SYMBOL(__neigh_for_each_release);
2148
2149 #ifdef CONFIG_PROC_FS
2150
2151 static struct neighbour *neigh_get_first(struct seq_file *seq)
2152 {
2153 struct neigh_seq_state *state = seq->private;
2154 struct net *net = seq_file_net(seq);
2155 struct neigh_table *tbl = state->tbl;
2156 struct neighbour *n = NULL;
2157 int bucket = state->bucket;
2158
2159 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2160 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2161 n = tbl->hash_buckets[bucket];
2162
2163 while (n) {
2164 if (!net_eq(dev_net(n->dev), net))
2165 goto next;
2166 if (state->neigh_sub_iter) {
2167 loff_t fakep = 0;
2168 void *v;
2169
2170 v = state->neigh_sub_iter(state, n, &fakep);
2171 if (!v)
2172 goto next;
2173 }
2174 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2175 break;
2176 if (n->nud_state & ~NUD_NOARP)
2177 break;
2178 next:
2179 n = n->next;
2180 }
2181
2182 if (n)
2183 break;
2184 }
2185 state->bucket = bucket;
2186
2187 return n;
2188 }
2189
2190 static struct neighbour *neigh_get_next(struct seq_file *seq,
2191 struct neighbour *n,
2192 loff_t *pos)
2193 {
2194 struct neigh_seq_state *state = seq->private;
2195 struct net *net = seq_file_net(seq);
2196 struct neigh_table *tbl = state->tbl;
2197
2198 if (state->neigh_sub_iter) {
2199 void *v = state->neigh_sub_iter(state, n, pos);
2200 if (v)
2201 return n;
2202 }
2203 n = n->next;
2204
2205 while (1) {
2206 while (n) {
2207 if (!net_eq(dev_net(n->dev), net))
2208 goto next;
2209 if (state->neigh_sub_iter) {
2210 void *v = state->neigh_sub_iter(state, n, pos);
2211 if (v)
2212 return n;
2213 goto next;
2214 }
2215 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2216 break;
2217
2218 if (n->nud_state & ~NUD_NOARP)
2219 break;
2220 next:
2221 n = n->next;
2222 }
2223
2224 if (n)
2225 break;
2226
2227 if (++state->bucket > tbl->hash_mask)
2228 break;
2229
2230 n = tbl->hash_buckets[state->bucket];
2231 }
2232
2233 if (n && pos)
2234 --(*pos);
2235 return n;
2236 }
2237
2238 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2239 {
2240 struct neighbour *n = neigh_get_first(seq);
2241
2242 if (n) {
2243 while (*pos) {
2244 n = neigh_get_next(seq, n, pos);
2245 if (!n)
2246 break;
2247 }
2248 }
2249 return *pos ? NULL : n;
2250 }
2251
2252 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2253 {
2254 struct neigh_seq_state *state = seq->private;
2255 struct net *net = seq_file_net(seq);
2256 struct neigh_table *tbl = state->tbl;
2257 struct pneigh_entry *pn = NULL;
2258 int bucket = state->bucket;
2259
2260 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2261 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2262 pn = tbl->phash_buckets[bucket];
2263 while (pn && !net_eq(pneigh_net(pn), net))
2264 pn = pn->next;
2265 if (pn)
2266 break;
2267 }
2268 state->bucket = bucket;
2269
2270 return pn;
2271 }
2272
2273 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2274 struct pneigh_entry *pn,
2275 loff_t *pos)
2276 {
2277 struct neigh_seq_state *state = seq->private;
2278 struct net *net = seq_file_net(seq);
2279 struct neigh_table *tbl = state->tbl;
2280
2281 pn = pn->next;
2282 while (!pn) {
2283 if (++state->bucket > PNEIGH_HASHMASK)
2284 break;
2285 pn = tbl->phash_buckets[state->bucket];
2286 while (pn && !net_eq(pneigh_net(pn), net))
2287 pn = pn->next;
2288 if (pn)
2289 break;
2290 }
2291
2292 if (pn && pos)
2293 --(*pos);
2294
2295 return pn;
2296 }
2297
2298 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2299 {
2300 struct pneigh_entry *pn = pneigh_get_first(seq);
2301
2302 if (pn) {
2303 while (*pos) {
2304 pn = pneigh_get_next(seq, pn, pos);
2305 if (!pn)
2306 break;
2307 }
2308 }
2309 return *pos ? NULL : pn;
2310 }
2311
2312 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2313 {
2314 struct neigh_seq_state *state = seq->private;
2315 void *rc;
2316
2317 rc = neigh_get_idx(seq, pos);
2318 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2319 rc = pneigh_get_idx(seq, pos);
2320
2321 return rc;
2322 }
2323
2324 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2325 __acquires(tbl->lock)
2326 {
2327 struct neigh_seq_state *state = seq->private;
2328 loff_t pos_minus_one;
2329
2330 state->tbl = tbl;
2331 state->bucket = 0;
2332 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2333
2334 read_lock_bh(&tbl->lock);
2335
2336 pos_minus_one = *pos - 1;
2337 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2338 }
2339 EXPORT_SYMBOL(neigh_seq_start);
2340
2341 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2342 {
2343 struct neigh_seq_state *state;
2344 void *rc;
2345
2346 if (v == SEQ_START_TOKEN) {
2347 rc = neigh_get_idx(seq, pos);
2348 goto out;
2349 }
2350
2351 state = seq->private;
2352 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2353 rc = neigh_get_next(seq, v, NULL);
2354 if (rc)
2355 goto out;
2356 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2357 rc = pneigh_get_first(seq);
2358 } else {
2359 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2360 rc = pneigh_get_next(seq, v, NULL);
2361 }
2362 out:
2363 ++(*pos);
2364 return rc;
2365 }
2366 EXPORT_SYMBOL(neigh_seq_next);
2367
2368 void neigh_seq_stop(struct seq_file *seq, void *v)
2369 __releases(tbl->lock)
2370 {
2371 struct neigh_seq_state *state = seq->private;
2372 struct neigh_table *tbl = state->tbl;
2373
2374 read_unlock_bh(&tbl->lock);
2375 }
2376 EXPORT_SYMBOL(neigh_seq_stop);
2377
2378 /* statistics via seq_file */
2379
2380 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2381 {
2382 struct proc_dir_entry *pde = seq->private;
2383 struct neigh_table *tbl = pde->data;
2384 int cpu;
2385
2386 if (*pos == 0)
2387 return SEQ_START_TOKEN;
2388
2389 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2390 if (!cpu_possible(cpu))
2391 continue;
2392 *pos = cpu+1;
2393 return per_cpu_ptr(tbl->stats, cpu);
2394 }
2395 return NULL;
2396 }
2397
2398 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2399 {
2400 struct proc_dir_entry *pde = seq->private;
2401 struct neigh_table *tbl = pde->data;
2402 int cpu;
2403
2404 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2405 if (!cpu_possible(cpu))
2406 continue;
2407 *pos = cpu+1;
2408 return per_cpu_ptr(tbl->stats, cpu);
2409 }
2410 return NULL;
2411 }
2412
2413 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2414 {
2415
2416 }
2417
2418 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2419 {
2420 struct proc_dir_entry *pde = seq->private;
2421 struct neigh_table *tbl = pde->data;
2422 struct neigh_statistics *st = v;
2423
2424 if (v == SEQ_START_TOKEN) {
2425 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2426 return 0;
2427 }
2428
2429 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2430 "%08lx %08lx %08lx %08lx\n",
2431 atomic_read(&tbl->entries),
2432
2433 st->allocs,
2434 st->destroys,
2435 st->hash_grows,
2436
2437 st->lookups,
2438 st->hits,
2439
2440 st->res_failed,
2441
2442 st->rcv_probes_mcast,
2443 st->rcv_probes_ucast,
2444
2445 st->periodic_gc_runs,
2446 st->forced_gc_runs
2447 );
2448
2449 return 0;
2450 }
2451
2452 static const struct seq_operations neigh_stat_seq_ops = {
2453 .start = neigh_stat_seq_start,
2454 .next = neigh_stat_seq_next,
2455 .stop = neigh_stat_seq_stop,
2456 .show = neigh_stat_seq_show,
2457 };
2458
2459 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2460 {
2461 int ret = seq_open(file, &neigh_stat_seq_ops);
2462
2463 if (!ret) {
2464 struct seq_file *sf = file->private_data;
2465 sf->private = PDE(inode);
2466 }
2467 return ret;
2468 };
2469
2470 static const struct file_operations neigh_stat_seq_fops = {
2471 .owner = THIS_MODULE,
2472 .open = neigh_stat_seq_open,
2473 .read = seq_read,
2474 .llseek = seq_lseek,
2475 .release = seq_release,
2476 };
2477
2478 #endif /* CONFIG_PROC_FS */
2479
2480 static inline size_t neigh_nlmsg_size(void)
2481 {
2482 return NLMSG_ALIGN(sizeof(struct ndmsg))
2483 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2484 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2485 + nla_total_size(sizeof(struct nda_cacheinfo))
2486 + nla_total_size(4); /* NDA_PROBES */
2487 }
2488
2489 static void __neigh_notify(struct neighbour *n, int type, int flags)
2490 {
2491 struct net *net = dev_net(n->dev);
2492 struct sk_buff *skb;
2493 int err = -ENOBUFS;
2494
2495 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2496 if (skb == NULL)
2497 goto errout;
2498
2499 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2500 if (err < 0) {
2501 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2502 WARN_ON(err == -EMSGSIZE);
2503 kfree_skb(skb);
2504 goto errout;
2505 }
2506 err = rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2507 errout:
2508 if (err < 0)
2509 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2510 }
2511
2512 #ifdef CONFIG_ARPD
2513 void neigh_app_ns(struct neighbour *n)
2514 {
2515 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2516 }
2517 #endif /* CONFIG_ARPD */
2518
2519 #ifdef CONFIG_SYSCTL
2520
2521 static struct neigh_sysctl_table {
2522 struct ctl_table_header *sysctl_header;
2523 struct ctl_table neigh_vars[__NET_NEIGH_MAX];
2524 char *dev_name;
2525 } neigh_sysctl_template __read_mostly = {
2526 .neigh_vars = {
2527 {
2528 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2529 .procname = "mcast_solicit",
2530 .maxlen = sizeof(int),
2531 .mode = 0644,
2532 .proc_handler = &proc_dointvec,
2533 },
2534 {
2535 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2536 .procname = "ucast_solicit",
2537 .maxlen = sizeof(int),
2538 .mode = 0644,
2539 .proc_handler = &proc_dointvec,
2540 },
2541 {
2542 .ctl_name = NET_NEIGH_APP_SOLICIT,
2543 .procname = "app_solicit",
2544 .maxlen = sizeof(int),
2545 .mode = 0644,
2546 .proc_handler = &proc_dointvec,
2547 },
2548 {
2549 .procname = "retrans_time",
2550 .maxlen = sizeof(int),
2551 .mode = 0644,
2552 .proc_handler = &proc_dointvec_userhz_jiffies,
2553 },
2554 {
2555 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2556 .procname = "base_reachable_time",
2557 .maxlen = sizeof(int),
2558 .mode = 0644,
2559 .proc_handler = &proc_dointvec_jiffies,
2560 .strategy = &sysctl_jiffies,
2561 },
2562 {
2563 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2564 .procname = "delay_first_probe_time",
2565 .maxlen = sizeof(int),
2566 .mode = 0644,
2567 .proc_handler = &proc_dointvec_jiffies,
2568 .strategy = &sysctl_jiffies,
2569 },
2570 {
2571 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2572 .procname = "gc_stale_time",
2573 .maxlen = sizeof(int),
2574 .mode = 0644,
2575 .proc_handler = &proc_dointvec_jiffies,
2576 .strategy = &sysctl_jiffies,
2577 },
2578 {
2579 .ctl_name = NET_NEIGH_UNRES_QLEN,
2580 .procname = "unres_qlen",
2581 .maxlen = sizeof(int),
2582 .mode = 0644,
2583 .proc_handler = &proc_dointvec,
2584 },
2585 {
2586 .ctl_name = NET_NEIGH_PROXY_QLEN,
2587 .procname = "proxy_qlen",
2588 .maxlen = sizeof(int),
2589 .mode = 0644,
2590 .proc_handler = &proc_dointvec,
2591 },
2592 {
2593 .procname = "anycast_delay",
2594 .maxlen = sizeof(int),
2595 .mode = 0644,
2596 .proc_handler = &proc_dointvec_userhz_jiffies,
2597 },
2598 {
2599 .procname = "proxy_delay",
2600 .maxlen = sizeof(int),
2601 .mode = 0644,
2602 .proc_handler = &proc_dointvec_userhz_jiffies,
2603 },
2604 {
2605 .procname = "locktime",
2606 .maxlen = sizeof(int),
2607 .mode = 0644,
2608 .proc_handler = &proc_dointvec_userhz_jiffies,
2609 },
2610 {
2611 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2612 .procname = "retrans_time_ms",
2613 .maxlen = sizeof(int),
2614 .mode = 0644,
2615 .proc_handler = &proc_dointvec_ms_jiffies,
2616 .strategy = &sysctl_ms_jiffies,
2617 },
2618 {
2619 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2620 .procname = "base_reachable_time_ms",
2621 .maxlen = sizeof(int),
2622 .mode = 0644,
2623 .proc_handler = &proc_dointvec_ms_jiffies,
2624 .strategy = &sysctl_ms_jiffies,
2625 },
2626 {
2627 .ctl_name = NET_NEIGH_GC_INTERVAL,
2628 .procname = "gc_interval",
2629 .maxlen = sizeof(int),
2630 .mode = 0644,
2631 .proc_handler = &proc_dointvec_jiffies,
2632 .strategy = &sysctl_jiffies,
2633 },
2634 {
2635 .ctl_name = NET_NEIGH_GC_THRESH1,
2636 .procname = "gc_thresh1",
2637 .maxlen = sizeof(int),
2638 .mode = 0644,
2639 .proc_handler = &proc_dointvec,
2640 },
2641 {
2642 .ctl_name = NET_NEIGH_GC_THRESH2,
2643 .procname = "gc_thresh2",
2644 .maxlen = sizeof(int),
2645 .mode = 0644,
2646 .proc_handler = &proc_dointvec,
2647 },
2648 {
2649 .ctl_name = NET_NEIGH_GC_THRESH3,
2650 .procname = "gc_thresh3",
2651 .maxlen = sizeof(int),
2652 .mode = 0644,
2653 .proc_handler = &proc_dointvec,
2654 },
2655 {},
2656 },
2657 };
2658
2659 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2660 int p_id, int pdev_id, char *p_name,
2661 proc_handler *handler, ctl_handler *strategy)
2662 {
2663 struct neigh_sysctl_table *t;
2664 const char *dev_name_source = NULL;
2665
2666 #define NEIGH_CTL_PATH_ROOT 0
2667 #define NEIGH_CTL_PATH_PROTO 1
2668 #define NEIGH_CTL_PATH_NEIGH 2
2669 #define NEIGH_CTL_PATH_DEV 3
2670
2671 struct ctl_path neigh_path[] = {
2672 { .procname = "net", .ctl_name = CTL_NET, },
2673 { .procname = "proto", .ctl_name = 0, },
2674 { .procname = "neigh", .ctl_name = 0, },
2675 { .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
2676 { },
2677 };
2678
2679 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2680 if (!t)
2681 goto err;
2682
2683 t->neigh_vars[0].data = &p->mcast_probes;
2684 t->neigh_vars[1].data = &p->ucast_probes;
2685 t->neigh_vars[2].data = &p->app_probes;
2686 t->neigh_vars[3].data = &p->retrans_time;
2687 t->neigh_vars[4].data = &p->base_reachable_time;
2688 t->neigh_vars[5].data = &p->delay_probe_time;
2689 t->neigh_vars[6].data = &p->gc_staletime;
2690 t->neigh_vars[7].data = &p->queue_len;
2691 t->neigh_vars[8].data = &p->proxy_qlen;
2692 t->neigh_vars[9].data = &p->anycast_delay;
2693 t->neigh_vars[10].data = &p->proxy_delay;
2694 t->neigh_vars[11].data = &p->locktime;
2695 t->neigh_vars[12].data = &p->retrans_time;
2696 t->neigh_vars[13].data = &p->base_reachable_time;
2697
2698 if (dev) {
2699 dev_name_source = dev->name;
2700 neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
2701 /* Terminate the table early */
2702 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2703 } else {
2704 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2705 t->neigh_vars[14].data = (int *)(p + 1);
2706 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2707 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2708 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2709 }
2710
2711
2712 if (handler || strategy) {
2713 /* RetransTime */
2714 t->neigh_vars[3].proc_handler = handler;
2715 t->neigh_vars[3].strategy = strategy;
2716 t->neigh_vars[3].extra1 = dev;
2717 if (!strategy)
2718 t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
2719 /* ReachableTime */
2720 t->neigh_vars[4].proc_handler = handler;
2721 t->neigh_vars[4].strategy = strategy;
2722 t->neigh_vars[4].extra1 = dev;
2723 if (!strategy)
2724 t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
2725 /* RetransTime (in milliseconds)*/
2726 t->neigh_vars[12].proc_handler = handler;
2727 t->neigh_vars[12].strategy = strategy;
2728 t->neigh_vars[12].extra1 = dev;
2729 if (!strategy)
2730 t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
2731 /* ReachableTime (in milliseconds) */
2732 t->neigh_vars[13].proc_handler = handler;
2733 t->neigh_vars[13].strategy = strategy;
2734 t->neigh_vars[13].extra1 = dev;
2735 if (!strategy)
2736 t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
2737 }
2738
2739 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2740 if (!t->dev_name)
2741 goto free;
2742
2743 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2744 neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
2745 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2746 neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
2747
2748 t->sysctl_header =
2749 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2750 if (!t->sysctl_header)
2751 goto free_procname;
2752
2753 p->sysctl_table = t;
2754 return 0;
2755
2756 free_procname:
2757 kfree(t->dev_name);
2758 free:
2759 kfree(t);
2760 err:
2761 return -ENOBUFS;
2762 }
2763
2764 void neigh_sysctl_unregister(struct neigh_parms *p)
2765 {
2766 if (p->sysctl_table) {
2767 struct neigh_sysctl_table *t = p->sysctl_table;
2768 p->sysctl_table = NULL;
2769 unregister_sysctl_table(t->sysctl_header);
2770 kfree(t->dev_name);
2771 kfree(t);
2772 }
2773 }
2774
2775 #endif /* CONFIG_SYSCTL */
2776
2777 static int __init neigh_init(void)
2778 {
2779 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2780 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2781 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2782
2783 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2784 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2785
2786 return 0;
2787 }
2788
2789 subsys_initcall(neigh_init);
2790
2791 EXPORT_SYMBOL(__neigh_event_send);
2792 EXPORT_SYMBOL(neigh_changeaddr);
2793 EXPORT_SYMBOL(neigh_compat_output);
2794 EXPORT_SYMBOL(neigh_connected_output);
2795 EXPORT_SYMBOL(neigh_create);
2796 EXPORT_SYMBOL(neigh_destroy);
2797 EXPORT_SYMBOL(neigh_event_ns);
2798 EXPORT_SYMBOL(neigh_ifdown);
2799 EXPORT_SYMBOL(neigh_lookup);
2800 EXPORT_SYMBOL(neigh_lookup_nodev);
2801 EXPORT_SYMBOL(neigh_parms_alloc);
2802 EXPORT_SYMBOL(neigh_parms_release);
2803 EXPORT_SYMBOL(neigh_rand_reach_time);
2804 EXPORT_SYMBOL(neigh_resolve_output);
2805 EXPORT_SYMBOL(neigh_table_clear);
2806 EXPORT_SYMBOL(neigh_table_init);
2807 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2808 EXPORT_SYMBOL(neigh_update);
2809 EXPORT_SYMBOL(pneigh_enqueue);
2810 EXPORT_SYMBOL(pneigh_lookup);
2811
2812 #ifdef CONFIG_ARPD
2813 EXPORT_SYMBOL(neigh_app_ns);
2814 #endif
2815 #ifdef CONFIG_SYSCTL
2816 EXPORT_SYMBOL(neigh_sysctl_register);
2817 EXPORT_SYMBOL(neigh_sysctl_unregister);
2818 #endif