]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/core/neighbour.c
[NET]: Wrap hard_header_parse
[mirror_ubuntu-bionic-kernel.git] / net / core / neighbour.c
1 /*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
38
39 #define NEIGH_DEBUG 1
40
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
55
56 #define PNEIGH_HASHMASK 0xF
57
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
63
64 static struct neigh_table *neigh_tables;
65 #ifdef CONFIG_PROC_FS
66 static const struct file_operations neigh_stat_seq_fops;
67 #endif
68
69 /*
70 Neighbour hash table buckets are protected with rwlock tbl->lock.
71
72 - All the scans/updates to hash buckets MUST be made under this lock.
73 - NOTHING clever should be made under this lock: no callbacks
74 to protocol backends, no attempts to send something to network.
75 It will result in deadlocks, if backend/driver wants to use neighbour
76 cache.
77 - If the entry requires some non-trivial actions, increase
78 its reference count and release table lock.
79
80 Neighbour entries are protected:
81 - with reference count.
82 - with rwlock neigh->lock
83
84 Reference count prevents destruction.
85
86 neigh->lock mainly serializes ll address data and its validity state.
87 However, the same lock is used to protect another entry fields:
88 - timer
89 - resolution queue
90
91 Again, nothing clever shall be made under neigh->lock,
92 the most complicated procedure, which we allow is dev->hard_header.
93 It is supposed, that dev->hard_header is simplistic and does
94 not make callbacks to neighbour tables.
95
96 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
97 list of neighbour tables. This list is used only in process context,
98 */
99
100 static DEFINE_RWLOCK(neigh_tbl_lock);
101
102 static int neigh_blackhole(struct sk_buff *skb)
103 {
104 kfree_skb(skb);
105 return -ENETDOWN;
106 }
107
108 static void neigh_cleanup_and_release(struct neighbour *neigh)
109 {
110 if (neigh->parms->neigh_cleanup)
111 neigh->parms->neigh_cleanup(neigh);
112
113 __neigh_notify(neigh, RTM_DELNEIGH, 0);
114 neigh_release(neigh);
115 }
116
117 /*
118 * It is random distribution in the interval (1/2)*base...(3/2)*base.
119 * It corresponds to default IPv6 settings and is not overridable,
120 * because it is really reasonable choice.
121 */
122
123 unsigned long neigh_rand_reach_time(unsigned long base)
124 {
125 return (base ? (net_random() % base) + (base >> 1) : 0);
126 }
127
128
129 static int neigh_forced_gc(struct neigh_table *tbl)
130 {
131 int shrunk = 0;
132 int i;
133
134 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135
136 write_lock_bh(&tbl->lock);
137 for (i = 0; i <= tbl->hash_mask; i++) {
138 struct neighbour *n, **np;
139
140 np = &tbl->hash_buckets[i];
141 while ((n = *np) != NULL) {
142 /* Neighbour record may be discarded if:
143 * - nobody refers to it.
144 * - it is not permanent
145 */
146 write_lock(&n->lock);
147 if (atomic_read(&n->refcnt) == 1 &&
148 !(n->nud_state & NUD_PERMANENT)) {
149 *np = n->next;
150 n->dead = 1;
151 shrunk = 1;
152 write_unlock(&n->lock);
153 neigh_cleanup_and_release(n);
154 continue;
155 }
156 write_unlock(&n->lock);
157 np = &n->next;
158 }
159 }
160
161 tbl->last_flush = jiffies;
162
163 write_unlock_bh(&tbl->lock);
164
165 return shrunk;
166 }
167
168 static int neigh_del_timer(struct neighbour *n)
169 {
170 if ((n->nud_state & NUD_IN_TIMER) &&
171 del_timer(&n->timer)) {
172 neigh_release(n);
173 return 1;
174 }
175 return 0;
176 }
177
178 static void pneigh_queue_purge(struct sk_buff_head *list)
179 {
180 struct sk_buff *skb;
181
182 while ((skb = skb_dequeue(list)) != NULL) {
183 dev_put(skb->dev);
184 kfree_skb(skb);
185 }
186 }
187
188 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
189 {
190 int i;
191
192 for (i = 0; i <= tbl->hash_mask; i++) {
193 struct neighbour *n, **np = &tbl->hash_buckets[i];
194
195 while ((n = *np) != NULL) {
196 if (dev && n->dev != dev) {
197 np = &n->next;
198 continue;
199 }
200 *np = n->next;
201 write_lock(&n->lock);
202 neigh_del_timer(n);
203 n->dead = 1;
204
205 if (atomic_read(&n->refcnt) != 1) {
206 /* The most unpleasant situation.
207 We must destroy neighbour entry,
208 but someone still uses it.
209
210 The destroy will be delayed until
211 the last user releases us, but
212 we must kill timers etc. and move
213 it to safe state.
214 */
215 skb_queue_purge(&n->arp_queue);
216 n->output = neigh_blackhole;
217 if (n->nud_state & NUD_VALID)
218 n->nud_state = NUD_NOARP;
219 else
220 n->nud_state = NUD_NONE;
221 NEIGH_PRINTK2("neigh %p is stray.\n", n);
222 }
223 write_unlock(&n->lock);
224 neigh_cleanup_and_release(n);
225 }
226 }
227 }
228
229 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
230 {
231 write_lock_bh(&tbl->lock);
232 neigh_flush_dev(tbl, dev);
233 write_unlock_bh(&tbl->lock);
234 }
235
236 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
237 {
238 write_lock_bh(&tbl->lock);
239 neigh_flush_dev(tbl, dev);
240 pneigh_ifdown(tbl, dev);
241 write_unlock_bh(&tbl->lock);
242
243 del_timer_sync(&tbl->proxy_timer);
244 pneigh_queue_purge(&tbl->proxy_queue);
245 return 0;
246 }
247
248 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
249 {
250 struct neighbour *n = NULL;
251 unsigned long now = jiffies;
252 int entries;
253
254 entries = atomic_inc_return(&tbl->entries) - 1;
255 if (entries >= tbl->gc_thresh3 ||
256 (entries >= tbl->gc_thresh2 &&
257 time_after(now, tbl->last_flush + 5 * HZ))) {
258 if (!neigh_forced_gc(tbl) &&
259 entries >= tbl->gc_thresh3)
260 goto out_entries;
261 }
262
263 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
264 if (!n)
265 goto out_entries;
266
267 skb_queue_head_init(&n->arp_queue);
268 rwlock_init(&n->lock);
269 n->updated = n->used = now;
270 n->nud_state = NUD_NONE;
271 n->output = neigh_blackhole;
272 n->parms = neigh_parms_clone(&tbl->parms);
273 init_timer(&n->timer);
274 n->timer.function = neigh_timer_handler;
275 n->timer.data = (unsigned long)n;
276
277 NEIGH_CACHE_STAT_INC(tbl, allocs);
278 n->tbl = tbl;
279 atomic_set(&n->refcnt, 1);
280 n->dead = 1;
281 out:
282 return n;
283
284 out_entries:
285 atomic_dec(&tbl->entries);
286 goto out;
287 }
288
289 static struct neighbour **neigh_hash_alloc(unsigned int entries)
290 {
291 unsigned long size = entries * sizeof(struct neighbour *);
292 struct neighbour **ret;
293
294 if (size <= PAGE_SIZE) {
295 ret = kzalloc(size, GFP_ATOMIC);
296 } else {
297 ret = (struct neighbour **)
298 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
299 }
300 return ret;
301 }
302
303 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
304 {
305 unsigned long size = entries * sizeof(struct neighbour *);
306
307 if (size <= PAGE_SIZE)
308 kfree(hash);
309 else
310 free_pages((unsigned long)hash, get_order(size));
311 }
312
313 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
314 {
315 struct neighbour **new_hash, **old_hash;
316 unsigned int i, new_hash_mask, old_entries;
317
318 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
319
320 BUG_ON(!is_power_of_2(new_entries));
321 new_hash = neigh_hash_alloc(new_entries);
322 if (!new_hash)
323 return;
324
325 old_entries = tbl->hash_mask + 1;
326 new_hash_mask = new_entries - 1;
327 old_hash = tbl->hash_buckets;
328
329 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
330 for (i = 0; i < old_entries; i++) {
331 struct neighbour *n, *next;
332
333 for (n = old_hash[i]; n; n = next) {
334 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
335
336 hash_val &= new_hash_mask;
337 next = n->next;
338
339 n->next = new_hash[hash_val];
340 new_hash[hash_val] = n;
341 }
342 }
343 tbl->hash_buckets = new_hash;
344 tbl->hash_mask = new_hash_mask;
345
346 neigh_hash_free(old_hash, old_entries);
347 }
348
349 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
350 struct net_device *dev)
351 {
352 struct neighbour *n;
353 int key_len = tbl->key_len;
354 u32 hash_val = tbl->hash(pkey, dev);
355
356 NEIGH_CACHE_STAT_INC(tbl, lookups);
357
358 read_lock_bh(&tbl->lock);
359 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
360 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
361 neigh_hold(n);
362 NEIGH_CACHE_STAT_INC(tbl, hits);
363 break;
364 }
365 }
366 read_unlock_bh(&tbl->lock);
367 return n;
368 }
369
370 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, const void *pkey)
371 {
372 struct neighbour *n;
373 int key_len = tbl->key_len;
374 u32 hash_val = tbl->hash(pkey, NULL);
375
376 NEIGH_CACHE_STAT_INC(tbl, lookups);
377
378 read_lock_bh(&tbl->lock);
379 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
380 if (!memcmp(n->primary_key, pkey, key_len)) {
381 neigh_hold(n);
382 NEIGH_CACHE_STAT_INC(tbl, hits);
383 break;
384 }
385 }
386 read_unlock_bh(&tbl->lock);
387 return n;
388 }
389
390 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
391 struct net_device *dev)
392 {
393 u32 hash_val;
394 int key_len = tbl->key_len;
395 int error;
396 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
397
398 if (!n) {
399 rc = ERR_PTR(-ENOBUFS);
400 goto out;
401 }
402
403 memcpy(n->primary_key, pkey, key_len);
404 n->dev = dev;
405 dev_hold(dev);
406
407 /* Protocol specific setup. */
408 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
409 rc = ERR_PTR(error);
410 goto out_neigh_release;
411 }
412
413 /* Device specific setup. */
414 if (n->parms->neigh_setup &&
415 (error = n->parms->neigh_setup(n)) < 0) {
416 rc = ERR_PTR(error);
417 goto out_neigh_release;
418 }
419
420 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
421
422 write_lock_bh(&tbl->lock);
423
424 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
425 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
426
427 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
428
429 if (n->parms->dead) {
430 rc = ERR_PTR(-EINVAL);
431 goto out_tbl_unlock;
432 }
433
434 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
435 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
436 neigh_hold(n1);
437 rc = n1;
438 goto out_tbl_unlock;
439 }
440 }
441
442 n->next = tbl->hash_buckets[hash_val];
443 tbl->hash_buckets[hash_val] = n;
444 n->dead = 0;
445 neigh_hold(n);
446 write_unlock_bh(&tbl->lock);
447 NEIGH_PRINTK2("neigh %p is created.\n", n);
448 rc = n;
449 out:
450 return rc;
451 out_tbl_unlock:
452 write_unlock_bh(&tbl->lock);
453 out_neigh_release:
454 neigh_release(n);
455 goto out;
456 }
457
458 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
459 struct net_device *dev, int creat)
460 {
461 struct pneigh_entry *n;
462 int key_len = tbl->key_len;
463 u32 hash_val = *(u32 *)(pkey + key_len - 4);
464
465 hash_val ^= (hash_val >> 16);
466 hash_val ^= hash_val >> 8;
467 hash_val ^= hash_val >> 4;
468 hash_val &= PNEIGH_HASHMASK;
469
470 read_lock_bh(&tbl->lock);
471
472 for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
473 if (!memcmp(n->key, pkey, key_len) &&
474 (n->dev == dev || !n->dev)) {
475 read_unlock_bh(&tbl->lock);
476 goto out;
477 }
478 }
479 read_unlock_bh(&tbl->lock);
480 n = NULL;
481 if (!creat)
482 goto out;
483
484 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
485 if (!n)
486 goto out;
487
488 memcpy(n->key, pkey, key_len);
489 n->dev = dev;
490 if (dev)
491 dev_hold(dev);
492
493 if (tbl->pconstructor && tbl->pconstructor(n)) {
494 if (dev)
495 dev_put(dev);
496 kfree(n);
497 n = NULL;
498 goto out;
499 }
500
501 write_lock_bh(&tbl->lock);
502 n->next = tbl->phash_buckets[hash_val];
503 tbl->phash_buckets[hash_val] = n;
504 write_unlock_bh(&tbl->lock);
505 out:
506 return n;
507 }
508
509
510 int pneigh_delete(struct neigh_table *tbl, const void *pkey,
511 struct net_device *dev)
512 {
513 struct pneigh_entry *n, **np;
514 int key_len = tbl->key_len;
515 u32 hash_val = *(u32 *)(pkey + key_len - 4);
516
517 hash_val ^= (hash_val >> 16);
518 hash_val ^= hash_val >> 8;
519 hash_val ^= hash_val >> 4;
520 hash_val &= PNEIGH_HASHMASK;
521
522 write_lock_bh(&tbl->lock);
523 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
524 np = &n->next) {
525 if (!memcmp(n->key, pkey, key_len) && n->dev == dev) {
526 *np = n->next;
527 write_unlock_bh(&tbl->lock);
528 if (tbl->pdestructor)
529 tbl->pdestructor(n);
530 if (n->dev)
531 dev_put(n->dev);
532 kfree(n);
533 return 0;
534 }
535 }
536 write_unlock_bh(&tbl->lock);
537 return -ENOENT;
538 }
539
540 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
541 {
542 struct pneigh_entry *n, **np;
543 u32 h;
544
545 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
546 np = &tbl->phash_buckets[h];
547 while ((n = *np) != NULL) {
548 if (!dev || n->dev == dev) {
549 *np = n->next;
550 if (tbl->pdestructor)
551 tbl->pdestructor(n);
552 if (n->dev)
553 dev_put(n->dev);
554 kfree(n);
555 continue;
556 }
557 np = &n->next;
558 }
559 }
560 return -ENOENT;
561 }
562
563
564 /*
565 * neighbour must already be out of the table;
566 *
567 */
568 void neigh_destroy(struct neighbour *neigh)
569 {
570 struct hh_cache *hh;
571
572 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
573
574 if (!neigh->dead) {
575 printk(KERN_WARNING
576 "Destroying alive neighbour %p\n", neigh);
577 dump_stack();
578 return;
579 }
580
581 if (neigh_del_timer(neigh))
582 printk(KERN_WARNING "Impossible event.\n");
583
584 while ((hh = neigh->hh) != NULL) {
585 neigh->hh = hh->hh_next;
586 hh->hh_next = NULL;
587
588 write_seqlock_bh(&hh->hh_lock);
589 hh->hh_output = neigh_blackhole;
590 write_sequnlock_bh(&hh->hh_lock);
591 if (atomic_dec_and_test(&hh->hh_refcnt))
592 kfree(hh);
593 }
594
595 skb_queue_purge(&neigh->arp_queue);
596
597 dev_put(neigh->dev);
598 neigh_parms_put(neigh->parms);
599
600 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
601
602 atomic_dec(&neigh->tbl->entries);
603 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
604 }
605
606 /* Neighbour state is suspicious;
607 disable fast path.
608
609 Called with write_locked neigh.
610 */
611 static void neigh_suspect(struct neighbour *neigh)
612 {
613 struct hh_cache *hh;
614
615 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
616
617 neigh->output = neigh->ops->output;
618
619 for (hh = neigh->hh; hh; hh = hh->hh_next)
620 hh->hh_output = neigh->ops->output;
621 }
622
623 /* Neighbour state is OK;
624 enable fast path.
625
626 Called with write_locked neigh.
627 */
628 static void neigh_connect(struct neighbour *neigh)
629 {
630 struct hh_cache *hh;
631
632 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
633
634 neigh->output = neigh->ops->connected_output;
635
636 for (hh = neigh->hh; hh; hh = hh->hh_next)
637 hh->hh_output = neigh->ops->hh_output;
638 }
639
640 static void neigh_periodic_timer(unsigned long arg)
641 {
642 struct neigh_table *tbl = (struct neigh_table *)arg;
643 struct neighbour *n, **np;
644 unsigned long expire, now = jiffies;
645
646 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
647
648 write_lock(&tbl->lock);
649
650 /*
651 * periodically recompute ReachableTime from random function
652 */
653
654 if (time_after(now, tbl->last_rand + 300 * HZ)) {
655 struct neigh_parms *p;
656 tbl->last_rand = now;
657 for (p = &tbl->parms; p; p = p->next)
658 p->reachable_time =
659 neigh_rand_reach_time(p->base_reachable_time);
660 }
661
662 np = &tbl->hash_buckets[tbl->hash_chain_gc];
663 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
664
665 while ((n = *np) != NULL) {
666 unsigned int state;
667
668 write_lock(&n->lock);
669
670 state = n->nud_state;
671 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
672 write_unlock(&n->lock);
673 goto next_elt;
674 }
675
676 if (time_before(n->used, n->confirmed))
677 n->used = n->confirmed;
678
679 if (atomic_read(&n->refcnt) == 1 &&
680 (state == NUD_FAILED ||
681 time_after(now, n->used + n->parms->gc_staletime))) {
682 *np = n->next;
683 n->dead = 1;
684 write_unlock(&n->lock);
685 neigh_cleanup_and_release(n);
686 continue;
687 }
688 write_unlock(&n->lock);
689
690 next_elt:
691 np = &n->next;
692 }
693
694 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
695 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
696 * base_reachable_time.
697 */
698 expire = tbl->parms.base_reachable_time >> 1;
699 expire /= (tbl->hash_mask + 1);
700 if (!expire)
701 expire = 1;
702
703 if (expire>HZ)
704 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
705 else
706 mod_timer(&tbl->gc_timer, now + expire);
707
708 write_unlock(&tbl->lock);
709 }
710
711 static __inline__ int neigh_max_probes(struct neighbour *n)
712 {
713 struct neigh_parms *p = n->parms;
714 return (n->nud_state & NUD_PROBE ?
715 p->ucast_probes :
716 p->ucast_probes + p->app_probes + p->mcast_probes);
717 }
718
719 static inline void neigh_add_timer(struct neighbour *n, unsigned long when)
720 {
721 if (unlikely(mod_timer(&n->timer, when))) {
722 printk("NEIGH: BUG, double timer add, state is %x\n",
723 n->nud_state);
724 dump_stack();
725 }
726 }
727
728 /* Called when a timer expires for a neighbour entry. */
729
730 static void neigh_timer_handler(unsigned long arg)
731 {
732 unsigned long now, next;
733 struct neighbour *neigh = (struct neighbour *)arg;
734 unsigned state;
735 int notify = 0;
736
737 write_lock(&neigh->lock);
738
739 state = neigh->nud_state;
740 now = jiffies;
741 next = now + HZ;
742
743 if (!(state & NUD_IN_TIMER)) {
744 #ifndef CONFIG_SMP
745 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
746 #endif
747 goto out;
748 }
749
750 if (state & NUD_REACHABLE) {
751 if (time_before_eq(now,
752 neigh->confirmed + neigh->parms->reachable_time)) {
753 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
754 next = neigh->confirmed + neigh->parms->reachable_time;
755 } else if (time_before_eq(now,
756 neigh->used + neigh->parms->delay_probe_time)) {
757 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
758 neigh->nud_state = NUD_DELAY;
759 neigh->updated = jiffies;
760 neigh_suspect(neigh);
761 next = now + neigh->parms->delay_probe_time;
762 } else {
763 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
764 neigh->nud_state = NUD_STALE;
765 neigh->updated = jiffies;
766 neigh_suspect(neigh);
767 notify = 1;
768 }
769 } else if (state & NUD_DELAY) {
770 if (time_before_eq(now,
771 neigh->confirmed + neigh->parms->delay_probe_time)) {
772 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
773 neigh->nud_state = NUD_REACHABLE;
774 neigh->updated = jiffies;
775 neigh_connect(neigh);
776 notify = 1;
777 next = neigh->confirmed + neigh->parms->reachable_time;
778 } else {
779 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
780 neigh->nud_state = NUD_PROBE;
781 neigh->updated = jiffies;
782 atomic_set(&neigh->probes, 0);
783 next = now + neigh->parms->retrans_time;
784 }
785 } else {
786 /* NUD_PROBE|NUD_INCOMPLETE */
787 next = now + neigh->parms->retrans_time;
788 }
789
790 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
791 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
792 struct sk_buff *skb;
793
794 neigh->nud_state = NUD_FAILED;
795 neigh->updated = jiffies;
796 notify = 1;
797 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
798 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
799
800 /* It is very thin place. report_unreachable is very complicated
801 routine. Particularly, it can hit the same neighbour entry!
802
803 So that, we try to be accurate and avoid dead loop. --ANK
804 */
805 while (neigh->nud_state == NUD_FAILED &&
806 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
807 write_unlock(&neigh->lock);
808 neigh->ops->error_report(neigh, skb);
809 write_lock(&neigh->lock);
810 }
811 skb_queue_purge(&neigh->arp_queue);
812 }
813
814 if (neigh->nud_state & NUD_IN_TIMER) {
815 if (time_before(next, jiffies + HZ/2))
816 next = jiffies + HZ/2;
817 if (!mod_timer(&neigh->timer, next))
818 neigh_hold(neigh);
819 }
820 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
821 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
822 /* keep skb alive even if arp_queue overflows */
823 if (skb)
824 skb_get(skb);
825 write_unlock(&neigh->lock);
826 neigh->ops->solicit(neigh, skb);
827 atomic_inc(&neigh->probes);
828 if (skb)
829 kfree_skb(skb);
830 } else {
831 out:
832 write_unlock(&neigh->lock);
833 }
834
835 if (notify)
836 neigh_update_notify(neigh);
837
838 neigh_release(neigh);
839 }
840
841 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
842 {
843 int rc;
844 unsigned long now;
845
846 write_lock_bh(&neigh->lock);
847
848 rc = 0;
849 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
850 goto out_unlock_bh;
851
852 now = jiffies;
853
854 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
855 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
856 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
857 neigh->nud_state = NUD_INCOMPLETE;
858 neigh->updated = jiffies;
859 neigh_hold(neigh);
860 neigh_add_timer(neigh, now + 1);
861 } else {
862 neigh->nud_state = NUD_FAILED;
863 neigh->updated = jiffies;
864 write_unlock_bh(&neigh->lock);
865
866 if (skb)
867 kfree_skb(skb);
868 return 1;
869 }
870 } else if (neigh->nud_state & NUD_STALE) {
871 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
872 neigh_hold(neigh);
873 neigh->nud_state = NUD_DELAY;
874 neigh->updated = jiffies;
875 neigh_add_timer(neigh,
876 jiffies + neigh->parms->delay_probe_time);
877 }
878
879 if (neigh->nud_state == NUD_INCOMPLETE) {
880 if (skb) {
881 if (skb_queue_len(&neigh->arp_queue) >=
882 neigh->parms->queue_len) {
883 struct sk_buff *buff;
884 buff = neigh->arp_queue.next;
885 __skb_unlink(buff, &neigh->arp_queue);
886 kfree_skb(buff);
887 }
888 __skb_queue_tail(&neigh->arp_queue, skb);
889 }
890 rc = 1;
891 }
892 out_unlock_bh:
893 write_unlock_bh(&neigh->lock);
894 return rc;
895 }
896
897 static void neigh_update_hhs(struct neighbour *neigh)
898 {
899 struct hh_cache *hh;
900 void (*update)(struct hh_cache*, struct net_device*, unsigned char *) =
901 neigh->dev->header_cache_update;
902
903 if (update) {
904 for (hh = neigh->hh; hh; hh = hh->hh_next) {
905 write_seqlock_bh(&hh->hh_lock);
906 update(hh, neigh->dev, neigh->ha);
907 write_sequnlock_bh(&hh->hh_lock);
908 }
909 }
910 }
911
912
913
914 /* Generic update routine.
915 -- lladdr is new lladdr or NULL, if it is not supplied.
916 -- new is new state.
917 -- flags
918 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
919 if it is different.
920 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
921 lladdr instead of overriding it
922 if it is different.
923 It also allows to retain current state
924 if lladdr is unchanged.
925 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
926
927 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
928 NTF_ROUTER flag.
929 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
930 a router.
931
932 Caller MUST hold reference count on the entry.
933 */
934
935 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
936 u32 flags)
937 {
938 u8 old;
939 int err;
940 int notify = 0;
941 struct net_device *dev;
942 int update_isrouter = 0;
943
944 write_lock_bh(&neigh->lock);
945
946 dev = neigh->dev;
947 old = neigh->nud_state;
948 err = -EPERM;
949
950 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
951 (old & (NUD_NOARP | NUD_PERMANENT)))
952 goto out;
953
954 if (!(new & NUD_VALID)) {
955 neigh_del_timer(neigh);
956 if (old & NUD_CONNECTED)
957 neigh_suspect(neigh);
958 neigh->nud_state = new;
959 err = 0;
960 notify = old & NUD_VALID;
961 goto out;
962 }
963
964 /* Compare new lladdr with cached one */
965 if (!dev->addr_len) {
966 /* First case: device needs no address. */
967 lladdr = neigh->ha;
968 } else if (lladdr) {
969 /* The second case: if something is already cached
970 and a new address is proposed:
971 - compare new & old
972 - if they are different, check override flag
973 */
974 if ((old & NUD_VALID) &&
975 !memcmp(lladdr, neigh->ha, dev->addr_len))
976 lladdr = neigh->ha;
977 } else {
978 /* No address is supplied; if we know something,
979 use it, otherwise discard the request.
980 */
981 err = -EINVAL;
982 if (!(old & NUD_VALID))
983 goto out;
984 lladdr = neigh->ha;
985 }
986
987 if (new & NUD_CONNECTED)
988 neigh->confirmed = jiffies;
989 neigh->updated = jiffies;
990
991 /* If entry was valid and address is not changed,
992 do not change entry state, if new one is STALE.
993 */
994 err = 0;
995 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
996 if (old & NUD_VALID) {
997 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
998 update_isrouter = 0;
999 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1000 (old & NUD_CONNECTED)) {
1001 lladdr = neigh->ha;
1002 new = NUD_STALE;
1003 } else
1004 goto out;
1005 } else {
1006 if (lladdr == neigh->ha && new == NUD_STALE &&
1007 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1008 (old & NUD_CONNECTED))
1009 )
1010 new = old;
1011 }
1012 }
1013
1014 if (new != old) {
1015 neigh_del_timer(neigh);
1016 if (new & NUD_IN_TIMER) {
1017 neigh_hold(neigh);
1018 neigh_add_timer(neigh, (jiffies +
1019 ((new & NUD_REACHABLE) ?
1020 neigh->parms->reachable_time :
1021 0)));
1022 }
1023 neigh->nud_state = new;
1024 }
1025
1026 if (lladdr != neigh->ha) {
1027 memcpy(&neigh->ha, lladdr, dev->addr_len);
1028 neigh_update_hhs(neigh);
1029 if (!(new & NUD_CONNECTED))
1030 neigh->confirmed = jiffies -
1031 (neigh->parms->base_reachable_time << 1);
1032 notify = 1;
1033 }
1034 if (new == old)
1035 goto out;
1036 if (new & NUD_CONNECTED)
1037 neigh_connect(neigh);
1038 else
1039 neigh_suspect(neigh);
1040 if (!(old & NUD_VALID)) {
1041 struct sk_buff *skb;
1042
1043 /* Again: avoid dead loop if something went wrong */
1044
1045 while (neigh->nud_state & NUD_VALID &&
1046 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1047 struct neighbour *n1 = neigh;
1048 write_unlock_bh(&neigh->lock);
1049 /* On shaper/eql skb->dst->neighbour != neigh :( */
1050 if (skb->dst && skb->dst->neighbour)
1051 n1 = skb->dst->neighbour;
1052 n1->output(skb);
1053 write_lock_bh(&neigh->lock);
1054 }
1055 skb_queue_purge(&neigh->arp_queue);
1056 }
1057 out:
1058 if (update_isrouter) {
1059 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1060 (neigh->flags | NTF_ROUTER) :
1061 (neigh->flags & ~NTF_ROUTER);
1062 }
1063 write_unlock_bh(&neigh->lock);
1064
1065 if (notify)
1066 neigh_update_notify(neigh);
1067
1068 return err;
1069 }
1070
1071 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1072 u8 *lladdr, void *saddr,
1073 struct net_device *dev)
1074 {
1075 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1076 lladdr || !dev->addr_len);
1077 if (neigh)
1078 neigh_update(neigh, lladdr, NUD_STALE,
1079 NEIGH_UPDATE_F_OVERRIDE);
1080 return neigh;
1081 }
1082
1083 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1084 __be16 protocol)
1085 {
1086 struct hh_cache *hh;
1087 struct net_device *dev = dst->dev;
1088
1089 for (hh = n->hh; hh; hh = hh->hh_next)
1090 if (hh->hh_type == protocol)
1091 break;
1092
1093 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1094 seqlock_init(&hh->hh_lock);
1095 hh->hh_type = protocol;
1096 atomic_set(&hh->hh_refcnt, 0);
1097 hh->hh_next = NULL;
1098 if (dev->hard_header_cache(n, hh)) {
1099 kfree(hh);
1100 hh = NULL;
1101 } else {
1102 atomic_inc(&hh->hh_refcnt);
1103 hh->hh_next = n->hh;
1104 n->hh = hh;
1105 if (n->nud_state & NUD_CONNECTED)
1106 hh->hh_output = n->ops->hh_output;
1107 else
1108 hh->hh_output = n->ops->output;
1109 }
1110 }
1111 if (hh) {
1112 atomic_inc(&hh->hh_refcnt);
1113 dst->hh = hh;
1114 }
1115 }
1116
1117 /* This function can be used in contexts, where only old dev_queue_xmit
1118 worked, f.e. if you want to override normal output path (eql, shaper),
1119 but resolution is not made yet.
1120 */
1121
1122 int neigh_compat_output(struct sk_buff *skb)
1123 {
1124 struct net_device *dev = skb->dev;
1125
1126 __skb_pull(skb, skb_network_offset(skb));
1127
1128 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1129 skb->len) < 0 &&
1130 dev->rebuild_header(skb))
1131 return 0;
1132
1133 return dev_queue_xmit(skb);
1134 }
1135
1136 /* Slow and careful. */
1137
1138 int neigh_resolve_output(struct sk_buff *skb)
1139 {
1140 struct dst_entry *dst = skb->dst;
1141 struct neighbour *neigh;
1142 int rc = 0;
1143
1144 if (!dst || !(neigh = dst->neighbour))
1145 goto discard;
1146
1147 __skb_pull(skb, skb_network_offset(skb));
1148
1149 if (!neigh_event_send(neigh, skb)) {
1150 int err;
1151 struct net_device *dev = neigh->dev;
1152 if (dev->hard_header_cache && !dst->hh) {
1153 write_lock_bh(&neigh->lock);
1154 if (!dst->hh)
1155 neigh_hh_init(neigh, dst, dst->ops->protocol);
1156 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1157 neigh->ha, NULL, skb->len);
1158 write_unlock_bh(&neigh->lock);
1159 } else {
1160 read_lock_bh(&neigh->lock);
1161 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1162 neigh->ha, NULL, skb->len);
1163 read_unlock_bh(&neigh->lock);
1164 }
1165 if (err >= 0)
1166 rc = neigh->ops->queue_xmit(skb);
1167 else
1168 goto out_kfree_skb;
1169 }
1170 out:
1171 return rc;
1172 discard:
1173 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1174 dst, dst ? dst->neighbour : NULL);
1175 out_kfree_skb:
1176 rc = -EINVAL;
1177 kfree_skb(skb);
1178 goto out;
1179 }
1180
1181 /* As fast as possible without hh cache */
1182
1183 int neigh_connected_output(struct sk_buff *skb)
1184 {
1185 int err;
1186 struct dst_entry *dst = skb->dst;
1187 struct neighbour *neigh = dst->neighbour;
1188 struct net_device *dev = neigh->dev;
1189
1190 __skb_pull(skb, skb_network_offset(skb));
1191
1192 read_lock_bh(&neigh->lock);
1193 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1194 neigh->ha, NULL, skb->len);
1195 read_unlock_bh(&neigh->lock);
1196 if (err >= 0)
1197 err = neigh->ops->queue_xmit(skb);
1198 else {
1199 err = -EINVAL;
1200 kfree_skb(skb);
1201 }
1202 return err;
1203 }
1204
1205 static void neigh_proxy_process(unsigned long arg)
1206 {
1207 struct neigh_table *tbl = (struct neigh_table *)arg;
1208 long sched_next = 0;
1209 unsigned long now = jiffies;
1210 struct sk_buff *skb;
1211
1212 spin_lock(&tbl->proxy_queue.lock);
1213
1214 skb = tbl->proxy_queue.next;
1215
1216 while (skb != (struct sk_buff *)&tbl->proxy_queue) {
1217 struct sk_buff *back = skb;
1218 long tdif = NEIGH_CB(back)->sched_next - now;
1219
1220 skb = skb->next;
1221 if (tdif <= 0) {
1222 struct net_device *dev = back->dev;
1223 __skb_unlink(back, &tbl->proxy_queue);
1224 if (tbl->proxy_redo && netif_running(dev))
1225 tbl->proxy_redo(back);
1226 else
1227 kfree_skb(back);
1228
1229 dev_put(dev);
1230 } else if (!sched_next || tdif < sched_next)
1231 sched_next = tdif;
1232 }
1233 del_timer(&tbl->proxy_timer);
1234 if (sched_next)
1235 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1236 spin_unlock(&tbl->proxy_queue.lock);
1237 }
1238
1239 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1240 struct sk_buff *skb)
1241 {
1242 unsigned long now = jiffies;
1243 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1244
1245 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1246 kfree_skb(skb);
1247 return;
1248 }
1249
1250 NEIGH_CB(skb)->sched_next = sched_next;
1251 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1252
1253 spin_lock(&tbl->proxy_queue.lock);
1254 if (del_timer(&tbl->proxy_timer)) {
1255 if (time_before(tbl->proxy_timer.expires, sched_next))
1256 sched_next = tbl->proxy_timer.expires;
1257 }
1258 dst_release(skb->dst);
1259 skb->dst = NULL;
1260 dev_hold(skb->dev);
1261 __skb_queue_tail(&tbl->proxy_queue, skb);
1262 mod_timer(&tbl->proxy_timer, sched_next);
1263 spin_unlock(&tbl->proxy_queue.lock);
1264 }
1265
1266
1267 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1268 struct neigh_table *tbl)
1269 {
1270 struct neigh_parms *p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1271
1272 if (p) {
1273 p->tbl = tbl;
1274 atomic_set(&p->refcnt, 1);
1275 INIT_RCU_HEAD(&p->rcu_head);
1276 p->reachable_time =
1277 neigh_rand_reach_time(p->base_reachable_time);
1278 if (dev) {
1279 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1280 kfree(p);
1281 return NULL;
1282 }
1283
1284 dev_hold(dev);
1285 p->dev = dev;
1286 }
1287 p->sysctl_table = NULL;
1288 write_lock_bh(&tbl->lock);
1289 p->next = tbl->parms.next;
1290 tbl->parms.next = p;
1291 write_unlock_bh(&tbl->lock);
1292 }
1293 return p;
1294 }
1295
1296 static void neigh_rcu_free_parms(struct rcu_head *head)
1297 {
1298 struct neigh_parms *parms =
1299 container_of(head, struct neigh_parms, rcu_head);
1300
1301 neigh_parms_put(parms);
1302 }
1303
1304 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1305 {
1306 struct neigh_parms **p;
1307
1308 if (!parms || parms == &tbl->parms)
1309 return;
1310 write_lock_bh(&tbl->lock);
1311 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1312 if (*p == parms) {
1313 *p = parms->next;
1314 parms->dead = 1;
1315 write_unlock_bh(&tbl->lock);
1316 if (parms->dev)
1317 dev_put(parms->dev);
1318 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1319 return;
1320 }
1321 }
1322 write_unlock_bh(&tbl->lock);
1323 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1324 }
1325
1326 void neigh_parms_destroy(struct neigh_parms *parms)
1327 {
1328 kfree(parms);
1329 }
1330
1331 static struct lock_class_key neigh_table_proxy_queue_class;
1332
1333 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1334 {
1335 unsigned long now = jiffies;
1336 unsigned long phsize;
1337
1338 atomic_set(&tbl->parms.refcnt, 1);
1339 INIT_RCU_HEAD(&tbl->parms.rcu_head);
1340 tbl->parms.reachable_time =
1341 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1342
1343 if (!tbl->kmem_cachep)
1344 tbl->kmem_cachep =
1345 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1346 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1347 NULL);
1348 tbl->stats = alloc_percpu(struct neigh_statistics);
1349 if (!tbl->stats)
1350 panic("cannot create neighbour cache statistics");
1351
1352 #ifdef CONFIG_PROC_FS
1353 tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat);
1354 if (!tbl->pde)
1355 panic("cannot create neighbour proc dir entry");
1356 tbl->pde->proc_fops = &neigh_stat_seq_fops;
1357 tbl->pde->data = tbl;
1358 #endif
1359
1360 tbl->hash_mask = 1;
1361 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1362
1363 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1364 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1365
1366 if (!tbl->hash_buckets || !tbl->phash_buckets)
1367 panic("cannot allocate neighbour cache hashes");
1368
1369 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1370
1371 rwlock_init(&tbl->lock);
1372 init_timer(&tbl->gc_timer);
1373 tbl->gc_timer.data = (unsigned long)tbl;
1374 tbl->gc_timer.function = neigh_periodic_timer;
1375 tbl->gc_timer.expires = now + 1;
1376 add_timer(&tbl->gc_timer);
1377
1378 init_timer(&tbl->proxy_timer);
1379 tbl->proxy_timer.data = (unsigned long)tbl;
1380 tbl->proxy_timer.function = neigh_proxy_process;
1381 skb_queue_head_init_class(&tbl->proxy_queue,
1382 &neigh_table_proxy_queue_class);
1383
1384 tbl->last_flush = now;
1385 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1386 }
1387
1388 void neigh_table_init(struct neigh_table *tbl)
1389 {
1390 struct neigh_table *tmp;
1391
1392 neigh_table_init_no_netlink(tbl);
1393 write_lock(&neigh_tbl_lock);
1394 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1395 if (tmp->family == tbl->family)
1396 break;
1397 }
1398 tbl->next = neigh_tables;
1399 neigh_tables = tbl;
1400 write_unlock(&neigh_tbl_lock);
1401
1402 if (unlikely(tmp)) {
1403 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1404 "family %d\n", tbl->family);
1405 dump_stack();
1406 }
1407 }
1408
1409 int neigh_table_clear(struct neigh_table *tbl)
1410 {
1411 struct neigh_table **tp;
1412
1413 /* It is not clean... Fix it to unload IPv6 module safely */
1414 del_timer_sync(&tbl->gc_timer);
1415 del_timer_sync(&tbl->proxy_timer);
1416 pneigh_queue_purge(&tbl->proxy_queue);
1417 neigh_ifdown(tbl, NULL);
1418 if (atomic_read(&tbl->entries))
1419 printk(KERN_CRIT "neighbour leakage\n");
1420 write_lock(&neigh_tbl_lock);
1421 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1422 if (*tp == tbl) {
1423 *tp = tbl->next;
1424 break;
1425 }
1426 }
1427 write_unlock(&neigh_tbl_lock);
1428
1429 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1430 tbl->hash_buckets = NULL;
1431
1432 kfree(tbl->phash_buckets);
1433 tbl->phash_buckets = NULL;
1434
1435 free_percpu(tbl->stats);
1436 tbl->stats = NULL;
1437
1438 return 0;
1439 }
1440
1441 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1442 {
1443 struct net *net = skb->sk->sk_net;
1444 struct ndmsg *ndm;
1445 struct nlattr *dst_attr;
1446 struct neigh_table *tbl;
1447 struct net_device *dev = NULL;
1448 int err = -EINVAL;
1449
1450 if (nlmsg_len(nlh) < sizeof(*ndm))
1451 goto out;
1452
1453 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1454 if (dst_attr == NULL)
1455 goto out;
1456
1457 ndm = nlmsg_data(nlh);
1458 if (ndm->ndm_ifindex) {
1459 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1460 if (dev == NULL) {
1461 err = -ENODEV;
1462 goto out;
1463 }
1464 }
1465
1466 read_lock(&neigh_tbl_lock);
1467 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1468 struct neighbour *neigh;
1469
1470 if (tbl->family != ndm->ndm_family)
1471 continue;
1472 read_unlock(&neigh_tbl_lock);
1473
1474 if (nla_len(dst_attr) < tbl->key_len)
1475 goto out_dev_put;
1476
1477 if (ndm->ndm_flags & NTF_PROXY) {
1478 err = pneigh_delete(tbl, nla_data(dst_attr), dev);
1479 goto out_dev_put;
1480 }
1481
1482 if (dev == NULL)
1483 goto out_dev_put;
1484
1485 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1486 if (neigh == NULL) {
1487 err = -ENOENT;
1488 goto out_dev_put;
1489 }
1490
1491 err = neigh_update(neigh, NULL, NUD_FAILED,
1492 NEIGH_UPDATE_F_OVERRIDE |
1493 NEIGH_UPDATE_F_ADMIN);
1494 neigh_release(neigh);
1495 goto out_dev_put;
1496 }
1497 read_unlock(&neigh_tbl_lock);
1498 err = -EAFNOSUPPORT;
1499
1500 out_dev_put:
1501 if (dev)
1502 dev_put(dev);
1503 out:
1504 return err;
1505 }
1506
1507 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1508 {
1509 struct net *net = skb->sk->sk_net;
1510 struct ndmsg *ndm;
1511 struct nlattr *tb[NDA_MAX+1];
1512 struct neigh_table *tbl;
1513 struct net_device *dev = NULL;
1514 int err;
1515
1516 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1517 if (err < 0)
1518 goto out;
1519
1520 err = -EINVAL;
1521 if (tb[NDA_DST] == NULL)
1522 goto out;
1523
1524 ndm = nlmsg_data(nlh);
1525 if (ndm->ndm_ifindex) {
1526 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1527 if (dev == NULL) {
1528 err = -ENODEV;
1529 goto out;
1530 }
1531
1532 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1533 goto out_dev_put;
1534 }
1535
1536 read_lock(&neigh_tbl_lock);
1537 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1538 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1539 struct neighbour *neigh;
1540 void *dst, *lladdr;
1541
1542 if (tbl->family != ndm->ndm_family)
1543 continue;
1544 read_unlock(&neigh_tbl_lock);
1545
1546 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1547 goto out_dev_put;
1548 dst = nla_data(tb[NDA_DST]);
1549 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1550
1551 if (ndm->ndm_flags & NTF_PROXY) {
1552 struct pneigh_entry *pn;
1553
1554 err = -ENOBUFS;
1555 pn = pneigh_lookup(tbl, dst, dev, 1);
1556 if (pn) {
1557 pn->flags = ndm->ndm_flags;
1558 err = 0;
1559 }
1560 goto out_dev_put;
1561 }
1562
1563 if (dev == NULL)
1564 goto out_dev_put;
1565
1566 neigh = neigh_lookup(tbl, dst, dev);
1567 if (neigh == NULL) {
1568 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1569 err = -ENOENT;
1570 goto out_dev_put;
1571 }
1572
1573 neigh = __neigh_lookup_errno(tbl, dst, dev);
1574 if (IS_ERR(neigh)) {
1575 err = PTR_ERR(neigh);
1576 goto out_dev_put;
1577 }
1578 } else {
1579 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1580 err = -EEXIST;
1581 neigh_release(neigh);
1582 goto out_dev_put;
1583 }
1584
1585 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1586 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1587 }
1588
1589 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1590 neigh_release(neigh);
1591 goto out_dev_put;
1592 }
1593
1594 read_unlock(&neigh_tbl_lock);
1595 err = -EAFNOSUPPORT;
1596
1597 out_dev_put:
1598 if (dev)
1599 dev_put(dev);
1600 out:
1601 return err;
1602 }
1603
1604 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1605 {
1606 struct nlattr *nest;
1607
1608 nest = nla_nest_start(skb, NDTA_PARMS);
1609 if (nest == NULL)
1610 return -ENOBUFS;
1611
1612 if (parms->dev)
1613 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1614
1615 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1616 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1617 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1618 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1619 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1620 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1621 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1622 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1623 parms->base_reachable_time);
1624 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1625 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1626 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1627 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1628 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1629 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1630
1631 return nla_nest_end(skb, nest);
1632
1633 nla_put_failure:
1634 return nla_nest_cancel(skb, nest);
1635 }
1636
1637 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1638 u32 pid, u32 seq, int type, int flags)
1639 {
1640 struct nlmsghdr *nlh;
1641 struct ndtmsg *ndtmsg;
1642
1643 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1644 if (nlh == NULL)
1645 return -EMSGSIZE;
1646
1647 ndtmsg = nlmsg_data(nlh);
1648
1649 read_lock_bh(&tbl->lock);
1650 ndtmsg->ndtm_family = tbl->family;
1651 ndtmsg->ndtm_pad1 = 0;
1652 ndtmsg->ndtm_pad2 = 0;
1653
1654 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1655 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1656 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1657 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1658 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1659
1660 {
1661 unsigned long now = jiffies;
1662 unsigned int flush_delta = now - tbl->last_flush;
1663 unsigned int rand_delta = now - tbl->last_rand;
1664
1665 struct ndt_config ndc = {
1666 .ndtc_key_len = tbl->key_len,
1667 .ndtc_entry_size = tbl->entry_size,
1668 .ndtc_entries = atomic_read(&tbl->entries),
1669 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1670 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1671 .ndtc_hash_rnd = tbl->hash_rnd,
1672 .ndtc_hash_mask = tbl->hash_mask,
1673 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1674 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1675 };
1676
1677 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1678 }
1679
1680 {
1681 int cpu;
1682 struct ndt_stats ndst;
1683
1684 memset(&ndst, 0, sizeof(ndst));
1685
1686 for_each_possible_cpu(cpu) {
1687 struct neigh_statistics *st;
1688
1689 st = per_cpu_ptr(tbl->stats, cpu);
1690 ndst.ndts_allocs += st->allocs;
1691 ndst.ndts_destroys += st->destroys;
1692 ndst.ndts_hash_grows += st->hash_grows;
1693 ndst.ndts_res_failed += st->res_failed;
1694 ndst.ndts_lookups += st->lookups;
1695 ndst.ndts_hits += st->hits;
1696 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1697 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1698 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1699 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1700 }
1701
1702 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1703 }
1704
1705 BUG_ON(tbl->parms.dev);
1706 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1707 goto nla_put_failure;
1708
1709 read_unlock_bh(&tbl->lock);
1710 return nlmsg_end(skb, nlh);
1711
1712 nla_put_failure:
1713 read_unlock_bh(&tbl->lock);
1714 nlmsg_cancel(skb, nlh);
1715 return -EMSGSIZE;
1716 }
1717
1718 static int neightbl_fill_param_info(struct sk_buff *skb,
1719 struct neigh_table *tbl,
1720 struct neigh_parms *parms,
1721 u32 pid, u32 seq, int type,
1722 unsigned int flags)
1723 {
1724 struct ndtmsg *ndtmsg;
1725 struct nlmsghdr *nlh;
1726
1727 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1728 if (nlh == NULL)
1729 return -EMSGSIZE;
1730
1731 ndtmsg = nlmsg_data(nlh);
1732
1733 read_lock_bh(&tbl->lock);
1734 ndtmsg->ndtm_family = tbl->family;
1735 ndtmsg->ndtm_pad1 = 0;
1736 ndtmsg->ndtm_pad2 = 0;
1737
1738 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1739 neightbl_fill_parms(skb, parms) < 0)
1740 goto errout;
1741
1742 read_unlock_bh(&tbl->lock);
1743 return nlmsg_end(skb, nlh);
1744 errout:
1745 read_unlock_bh(&tbl->lock);
1746 nlmsg_cancel(skb, nlh);
1747 return -EMSGSIZE;
1748 }
1749
1750 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1751 int ifindex)
1752 {
1753 struct neigh_parms *p;
1754
1755 for (p = &tbl->parms; p; p = p->next)
1756 if ((p->dev && p->dev->ifindex == ifindex) ||
1757 (!p->dev && !ifindex))
1758 return p;
1759
1760 return NULL;
1761 }
1762
1763 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1764 [NDTA_NAME] = { .type = NLA_STRING },
1765 [NDTA_THRESH1] = { .type = NLA_U32 },
1766 [NDTA_THRESH2] = { .type = NLA_U32 },
1767 [NDTA_THRESH3] = { .type = NLA_U32 },
1768 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1769 [NDTA_PARMS] = { .type = NLA_NESTED },
1770 };
1771
1772 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1773 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1774 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1775 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1776 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1777 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1778 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1779 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1780 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1781 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1782 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1783 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1784 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1785 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1786 };
1787
1788 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1789 {
1790 struct neigh_table *tbl;
1791 struct ndtmsg *ndtmsg;
1792 struct nlattr *tb[NDTA_MAX+1];
1793 int err;
1794
1795 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1796 nl_neightbl_policy);
1797 if (err < 0)
1798 goto errout;
1799
1800 if (tb[NDTA_NAME] == NULL) {
1801 err = -EINVAL;
1802 goto errout;
1803 }
1804
1805 ndtmsg = nlmsg_data(nlh);
1806 read_lock(&neigh_tbl_lock);
1807 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1808 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1809 continue;
1810
1811 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1812 break;
1813 }
1814
1815 if (tbl == NULL) {
1816 err = -ENOENT;
1817 goto errout_locked;
1818 }
1819
1820 /*
1821 * We acquire tbl->lock to be nice to the periodic timers and
1822 * make sure they always see a consistent set of values.
1823 */
1824 write_lock_bh(&tbl->lock);
1825
1826 if (tb[NDTA_PARMS]) {
1827 struct nlattr *tbp[NDTPA_MAX+1];
1828 struct neigh_parms *p;
1829 int i, ifindex = 0;
1830
1831 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1832 nl_ntbl_parm_policy);
1833 if (err < 0)
1834 goto errout_tbl_lock;
1835
1836 if (tbp[NDTPA_IFINDEX])
1837 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1838
1839 p = lookup_neigh_params(tbl, ifindex);
1840 if (p == NULL) {
1841 err = -ENOENT;
1842 goto errout_tbl_lock;
1843 }
1844
1845 for (i = 1; i <= NDTPA_MAX; i++) {
1846 if (tbp[i] == NULL)
1847 continue;
1848
1849 switch (i) {
1850 case NDTPA_QUEUE_LEN:
1851 p->queue_len = nla_get_u32(tbp[i]);
1852 break;
1853 case NDTPA_PROXY_QLEN:
1854 p->proxy_qlen = nla_get_u32(tbp[i]);
1855 break;
1856 case NDTPA_APP_PROBES:
1857 p->app_probes = nla_get_u32(tbp[i]);
1858 break;
1859 case NDTPA_UCAST_PROBES:
1860 p->ucast_probes = nla_get_u32(tbp[i]);
1861 break;
1862 case NDTPA_MCAST_PROBES:
1863 p->mcast_probes = nla_get_u32(tbp[i]);
1864 break;
1865 case NDTPA_BASE_REACHABLE_TIME:
1866 p->base_reachable_time = nla_get_msecs(tbp[i]);
1867 break;
1868 case NDTPA_GC_STALETIME:
1869 p->gc_staletime = nla_get_msecs(tbp[i]);
1870 break;
1871 case NDTPA_DELAY_PROBE_TIME:
1872 p->delay_probe_time = nla_get_msecs(tbp[i]);
1873 break;
1874 case NDTPA_RETRANS_TIME:
1875 p->retrans_time = nla_get_msecs(tbp[i]);
1876 break;
1877 case NDTPA_ANYCAST_DELAY:
1878 p->anycast_delay = nla_get_msecs(tbp[i]);
1879 break;
1880 case NDTPA_PROXY_DELAY:
1881 p->proxy_delay = nla_get_msecs(tbp[i]);
1882 break;
1883 case NDTPA_LOCKTIME:
1884 p->locktime = nla_get_msecs(tbp[i]);
1885 break;
1886 }
1887 }
1888 }
1889
1890 if (tb[NDTA_THRESH1])
1891 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1892
1893 if (tb[NDTA_THRESH2])
1894 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1895
1896 if (tb[NDTA_THRESH3])
1897 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1898
1899 if (tb[NDTA_GC_INTERVAL])
1900 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1901
1902 err = 0;
1903
1904 errout_tbl_lock:
1905 write_unlock_bh(&tbl->lock);
1906 errout_locked:
1907 read_unlock(&neigh_tbl_lock);
1908 errout:
1909 return err;
1910 }
1911
1912 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1913 {
1914 int family, tidx, nidx = 0;
1915 int tbl_skip = cb->args[0];
1916 int neigh_skip = cb->args[1];
1917 struct neigh_table *tbl;
1918
1919 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1920
1921 read_lock(&neigh_tbl_lock);
1922 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1923 struct neigh_parms *p;
1924
1925 if (tidx < tbl_skip || (family && tbl->family != family))
1926 continue;
1927
1928 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1929 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1930 NLM_F_MULTI) <= 0)
1931 break;
1932
1933 for (nidx = 0, p = tbl->parms.next; p; p = p->next, nidx++) {
1934 if (nidx < neigh_skip)
1935 continue;
1936
1937 if (neightbl_fill_param_info(skb, tbl, p,
1938 NETLINK_CB(cb->skb).pid,
1939 cb->nlh->nlmsg_seq,
1940 RTM_NEWNEIGHTBL,
1941 NLM_F_MULTI) <= 0)
1942 goto out;
1943 }
1944
1945 neigh_skip = 0;
1946 }
1947 out:
1948 read_unlock(&neigh_tbl_lock);
1949 cb->args[0] = tidx;
1950 cb->args[1] = nidx;
1951
1952 return skb->len;
1953 }
1954
1955 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
1956 u32 pid, u32 seq, int type, unsigned int flags)
1957 {
1958 unsigned long now = jiffies;
1959 struct nda_cacheinfo ci;
1960 struct nlmsghdr *nlh;
1961 struct ndmsg *ndm;
1962
1963 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
1964 if (nlh == NULL)
1965 return -EMSGSIZE;
1966
1967 ndm = nlmsg_data(nlh);
1968 ndm->ndm_family = neigh->ops->family;
1969 ndm->ndm_pad1 = 0;
1970 ndm->ndm_pad2 = 0;
1971 ndm->ndm_flags = neigh->flags;
1972 ndm->ndm_type = neigh->type;
1973 ndm->ndm_ifindex = neigh->dev->ifindex;
1974
1975 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
1976
1977 read_lock_bh(&neigh->lock);
1978 ndm->ndm_state = neigh->nud_state;
1979 if ((neigh->nud_state & NUD_VALID) &&
1980 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
1981 read_unlock_bh(&neigh->lock);
1982 goto nla_put_failure;
1983 }
1984
1985 ci.ndm_used = now - neigh->used;
1986 ci.ndm_confirmed = now - neigh->confirmed;
1987 ci.ndm_updated = now - neigh->updated;
1988 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
1989 read_unlock_bh(&neigh->lock);
1990
1991 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
1992 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1993
1994 return nlmsg_end(skb, nlh);
1995
1996 nla_put_failure:
1997 nlmsg_cancel(skb, nlh);
1998 return -EMSGSIZE;
1999 }
2000
2001 static void neigh_update_notify(struct neighbour *neigh)
2002 {
2003 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2004 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2005 }
2006
2007 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2008 struct netlink_callback *cb)
2009 {
2010 struct neighbour *n;
2011 int rc, h, s_h = cb->args[1];
2012 int idx, s_idx = idx = cb->args[2];
2013
2014 read_lock_bh(&tbl->lock);
2015 for (h = 0; h <= tbl->hash_mask; h++) {
2016 if (h < s_h)
2017 continue;
2018 if (h > s_h)
2019 s_idx = 0;
2020 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next, idx++) {
2021 if (idx < s_idx)
2022 continue;
2023 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2024 cb->nlh->nlmsg_seq,
2025 RTM_NEWNEIGH,
2026 NLM_F_MULTI) <= 0) {
2027 read_unlock_bh(&tbl->lock);
2028 rc = -1;
2029 goto out;
2030 }
2031 }
2032 }
2033 read_unlock_bh(&tbl->lock);
2034 rc = skb->len;
2035 out:
2036 cb->args[1] = h;
2037 cb->args[2] = idx;
2038 return rc;
2039 }
2040
2041 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2042 {
2043 struct neigh_table *tbl;
2044 int t, family, s_t;
2045
2046 read_lock(&neigh_tbl_lock);
2047 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2048 s_t = cb->args[0];
2049
2050 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2051 if (t < s_t || (family && tbl->family != family))
2052 continue;
2053 if (t > s_t)
2054 memset(&cb->args[1], 0, sizeof(cb->args) -
2055 sizeof(cb->args[0]));
2056 if (neigh_dump_table(tbl, skb, cb) < 0)
2057 break;
2058 }
2059 read_unlock(&neigh_tbl_lock);
2060
2061 cb->args[0] = t;
2062 return skb->len;
2063 }
2064
2065 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2066 {
2067 int chain;
2068
2069 read_lock_bh(&tbl->lock);
2070 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2071 struct neighbour *n;
2072
2073 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2074 cb(n, cookie);
2075 }
2076 read_unlock_bh(&tbl->lock);
2077 }
2078 EXPORT_SYMBOL(neigh_for_each);
2079
2080 /* The tbl->lock must be held as a writer and BH disabled. */
2081 void __neigh_for_each_release(struct neigh_table *tbl,
2082 int (*cb)(struct neighbour *))
2083 {
2084 int chain;
2085
2086 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2087 struct neighbour *n, **np;
2088
2089 np = &tbl->hash_buckets[chain];
2090 while ((n = *np) != NULL) {
2091 int release;
2092
2093 write_lock(&n->lock);
2094 release = cb(n);
2095 if (release) {
2096 *np = n->next;
2097 n->dead = 1;
2098 } else
2099 np = &n->next;
2100 write_unlock(&n->lock);
2101 if (release)
2102 neigh_cleanup_and_release(n);
2103 }
2104 }
2105 }
2106 EXPORT_SYMBOL(__neigh_for_each_release);
2107
2108 #ifdef CONFIG_PROC_FS
2109
2110 static struct neighbour *neigh_get_first(struct seq_file *seq)
2111 {
2112 struct neigh_seq_state *state = seq->private;
2113 struct neigh_table *tbl = state->tbl;
2114 struct neighbour *n = NULL;
2115 int bucket = state->bucket;
2116
2117 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2118 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2119 n = tbl->hash_buckets[bucket];
2120
2121 while (n) {
2122 if (state->neigh_sub_iter) {
2123 loff_t fakep = 0;
2124 void *v;
2125
2126 v = state->neigh_sub_iter(state, n, &fakep);
2127 if (!v)
2128 goto next;
2129 }
2130 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2131 break;
2132 if (n->nud_state & ~NUD_NOARP)
2133 break;
2134 next:
2135 n = n->next;
2136 }
2137
2138 if (n)
2139 break;
2140 }
2141 state->bucket = bucket;
2142
2143 return n;
2144 }
2145
2146 static struct neighbour *neigh_get_next(struct seq_file *seq,
2147 struct neighbour *n,
2148 loff_t *pos)
2149 {
2150 struct neigh_seq_state *state = seq->private;
2151 struct neigh_table *tbl = state->tbl;
2152
2153 if (state->neigh_sub_iter) {
2154 void *v = state->neigh_sub_iter(state, n, pos);
2155 if (v)
2156 return n;
2157 }
2158 n = n->next;
2159
2160 while (1) {
2161 while (n) {
2162 if (state->neigh_sub_iter) {
2163 void *v = state->neigh_sub_iter(state, n, pos);
2164 if (v)
2165 return n;
2166 goto next;
2167 }
2168 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2169 break;
2170
2171 if (n->nud_state & ~NUD_NOARP)
2172 break;
2173 next:
2174 n = n->next;
2175 }
2176
2177 if (n)
2178 break;
2179
2180 if (++state->bucket > tbl->hash_mask)
2181 break;
2182
2183 n = tbl->hash_buckets[state->bucket];
2184 }
2185
2186 if (n && pos)
2187 --(*pos);
2188 return n;
2189 }
2190
2191 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2192 {
2193 struct neighbour *n = neigh_get_first(seq);
2194
2195 if (n) {
2196 while (*pos) {
2197 n = neigh_get_next(seq, n, pos);
2198 if (!n)
2199 break;
2200 }
2201 }
2202 return *pos ? NULL : n;
2203 }
2204
2205 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2206 {
2207 struct neigh_seq_state *state = seq->private;
2208 struct neigh_table *tbl = state->tbl;
2209 struct pneigh_entry *pn = NULL;
2210 int bucket = state->bucket;
2211
2212 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2213 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2214 pn = tbl->phash_buckets[bucket];
2215 if (pn)
2216 break;
2217 }
2218 state->bucket = bucket;
2219
2220 return pn;
2221 }
2222
2223 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2224 struct pneigh_entry *pn,
2225 loff_t *pos)
2226 {
2227 struct neigh_seq_state *state = seq->private;
2228 struct neigh_table *tbl = state->tbl;
2229
2230 pn = pn->next;
2231 while (!pn) {
2232 if (++state->bucket > PNEIGH_HASHMASK)
2233 break;
2234 pn = tbl->phash_buckets[state->bucket];
2235 if (pn)
2236 break;
2237 }
2238
2239 if (pn && pos)
2240 --(*pos);
2241
2242 return pn;
2243 }
2244
2245 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2246 {
2247 struct pneigh_entry *pn = pneigh_get_first(seq);
2248
2249 if (pn) {
2250 while (*pos) {
2251 pn = pneigh_get_next(seq, pn, pos);
2252 if (!pn)
2253 break;
2254 }
2255 }
2256 return *pos ? NULL : pn;
2257 }
2258
2259 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2260 {
2261 struct neigh_seq_state *state = seq->private;
2262 void *rc;
2263
2264 rc = neigh_get_idx(seq, pos);
2265 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2266 rc = pneigh_get_idx(seq, pos);
2267
2268 return rc;
2269 }
2270
2271 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2272 {
2273 struct neigh_seq_state *state = seq->private;
2274 loff_t pos_minus_one;
2275
2276 state->tbl = tbl;
2277 state->bucket = 0;
2278 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2279
2280 read_lock_bh(&tbl->lock);
2281
2282 pos_minus_one = *pos - 1;
2283 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2284 }
2285 EXPORT_SYMBOL(neigh_seq_start);
2286
2287 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2288 {
2289 struct neigh_seq_state *state;
2290 void *rc;
2291
2292 if (v == SEQ_START_TOKEN) {
2293 rc = neigh_get_idx(seq, pos);
2294 goto out;
2295 }
2296
2297 state = seq->private;
2298 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2299 rc = neigh_get_next(seq, v, NULL);
2300 if (rc)
2301 goto out;
2302 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2303 rc = pneigh_get_first(seq);
2304 } else {
2305 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2306 rc = pneigh_get_next(seq, v, NULL);
2307 }
2308 out:
2309 ++(*pos);
2310 return rc;
2311 }
2312 EXPORT_SYMBOL(neigh_seq_next);
2313
2314 void neigh_seq_stop(struct seq_file *seq, void *v)
2315 {
2316 struct neigh_seq_state *state = seq->private;
2317 struct neigh_table *tbl = state->tbl;
2318
2319 read_unlock_bh(&tbl->lock);
2320 }
2321 EXPORT_SYMBOL(neigh_seq_stop);
2322
2323 /* statistics via seq_file */
2324
2325 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2326 {
2327 struct proc_dir_entry *pde = seq->private;
2328 struct neigh_table *tbl = pde->data;
2329 int cpu;
2330
2331 if (*pos == 0)
2332 return SEQ_START_TOKEN;
2333
2334 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2335 if (!cpu_possible(cpu))
2336 continue;
2337 *pos = cpu+1;
2338 return per_cpu_ptr(tbl->stats, cpu);
2339 }
2340 return NULL;
2341 }
2342
2343 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2344 {
2345 struct proc_dir_entry *pde = seq->private;
2346 struct neigh_table *tbl = pde->data;
2347 int cpu;
2348
2349 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2350 if (!cpu_possible(cpu))
2351 continue;
2352 *pos = cpu+1;
2353 return per_cpu_ptr(tbl->stats, cpu);
2354 }
2355 return NULL;
2356 }
2357
2358 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2359 {
2360
2361 }
2362
2363 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2364 {
2365 struct proc_dir_entry *pde = seq->private;
2366 struct neigh_table *tbl = pde->data;
2367 struct neigh_statistics *st = v;
2368
2369 if (v == SEQ_START_TOKEN) {
2370 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs\n");
2371 return 0;
2372 }
2373
2374 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2375 "%08lx %08lx %08lx %08lx\n",
2376 atomic_read(&tbl->entries),
2377
2378 st->allocs,
2379 st->destroys,
2380 st->hash_grows,
2381
2382 st->lookups,
2383 st->hits,
2384
2385 st->res_failed,
2386
2387 st->rcv_probes_mcast,
2388 st->rcv_probes_ucast,
2389
2390 st->periodic_gc_runs,
2391 st->forced_gc_runs
2392 );
2393
2394 return 0;
2395 }
2396
2397 static const struct seq_operations neigh_stat_seq_ops = {
2398 .start = neigh_stat_seq_start,
2399 .next = neigh_stat_seq_next,
2400 .stop = neigh_stat_seq_stop,
2401 .show = neigh_stat_seq_show,
2402 };
2403
2404 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2405 {
2406 int ret = seq_open(file, &neigh_stat_seq_ops);
2407
2408 if (!ret) {
2409 struct seq_file *sf = file->private_data;
2410 sf->private = PDE(inode);
2411 }
2412 return ret;
2413 };
2414
2415 static const struct file_operations neigh_stat_seq_fops = {
2416 .owner = THIS_MODULE,
2417 .open = neigh_stat_seq_open,
2418 .read = seq_read,
2419 .llseek = seq_lseek,
2420 .release = seq_release,
2421 };
2422
2423 #endif /* CONFIG_PROC_FS */
2424
2425 static inline size_t neigh_nlmsg_size(void)
2426 {
2427 return NLMSG_ALIGN(sizeof(struct ndmsg))
2428 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2429 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2430 + nla_total_size(sizeof(struct nda_cacheinfo))
2431 + nla_total_size(4); /* NDA_PROBES */
2432 }
2433
2434 static void __neigh_notify(struct neighbour *n, int type, int flags)
2435 {
2436 struct sk_buff *skb;
2437 int err = -ENOBUFS;
2438
2439 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2440 if (skb == NULL)
2441 goto errout;
2442
2443 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2444 if (err < 0) {
2445 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2446 WARN_ON(err == -EMSGSIZE);
2447 kfree_skb(skb);
2448 goto errout;
2449 }
2450 err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2451 errout:
2452 if (err < 0)
2453 rtnl_set_sk_err(RTNLGRP_NEIGH, err);
2454 }
2455
2456 #ifdef CONFIG_ARPD
2457 void neigh_app_ns(struct neighbour *n)
2458 {
2459 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2460 }
2461 #endif /* CONFIG_ARPD */
2462
2463 #ifdef CONFIG_SYSCTL
2464
2465 static struct neigh_sysctl_table {
2466 struct ctl_table_header *sysctl_header;
2467 ctl_table neigh_vars[__NET_NEIGH_MAX];
2468 ctl_table neigh_dev[2];
2469 ctl_table neigh_neigh_dir[2];
2470 ctl_table neigh_proto_dir[2];
2471 ctl_table neigh_root_dir[2];
2472 } neigh_sysctl_template __read_mostly = {
2473 .neigh_vars = {
2474 {
2475 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2476 .procname = "mcast_solicit",
2477 .maxlen = sizeof(int),
2478 .mode = 0644,
2479 .proc_handler = &proc_dointvec,
2480 },
2481 {
2482 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2483 .procname = "ucast_solicit",
2484 .maxlen = sizeof(int),
2485 .mode = 0644,
2486 .proc_handler = &proc_dointvec,
2487 },
2488 {
2489 .ctl_name = NET_NEIGH_APP_SOLICIT,
2490 .procname = "app_solicit",
2491 .maxlen = sizeof(int),
2492 .mode = 0644,
2493 .proc_handler = &proc_dointvec,
2494 },
2495 {
2496 .ctl_name = NET_NEIGH_RETRANS_TIME,
2497 .procname = "retrans_time",
2498 .maxlen = sizeof(int),
2499 .mode = 0644,
2500 .proc_handler = &proc_dointvec_userhz_jiffies,
2501 },
2502 {
2503 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2504 .procname = "base_reachable_time",
2505 .maxlen = sizeof(int),
2506 .mode = 0644,
2507 .proc_handler = &proc_dointvec_jiffies,
2508 .strategy = &sysctl_jiffies,
2509 },
2510 {
2511 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2512 .procname = "delay_first_probe_time",
2513 .maxlen = sizeof(int),
2514 .mode = 0644,
2515 .proc_handler = &proc_dointvec_jiffies,
2516 .strategy = &sysctl_jiffies,
2517 },
2518 {
2519 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2520 .procname = "gc_stale_time",
2521 .maxlen = sizeof(int),
2522 .mode = 0644,
2523 .proc_handler = &proc_dointvec_jiffies,
2524 .strategy = &sysctl_jiffies,
2525 },
2526 {
2527 .ctl_name = NET_NEIGH_UNRES_QLEN,
2528 .procname = "unres_qlen",
2529 .maxlen = sizeof(int),
2530 .mode = 0644,
2531 .proc_handler = &proc_dointvec,
2532 },
2533 {
2534 .ctl_name = NET_NEIGH_PROXY_QLEN,
2535 .procname = "proxy_qlen",
2536 .maxlen = sizeof(int),
2537 .mode = 0644,
2538 .proc_handler = &proc_dointvec,
2539 },
2540 {
2541 .ctl_name = NET_NEIGH_ANYCAST_DELAY,
2542 .procname = "anycast_delay",
2543 .maxlen = sizeof(int),
2544 .mode = 0644,
2545 .proc_handler = &proc_dointvec_userhz_jiffies,
2546 },
2547 {
2548 .ctl_name = NET_NEIGH_PROXY_DELAY,
2549 .procname = "proxy_delay",
2550 .maxlen = sizeof(int),
2551 .mode = 0644,
2552 .proc_handler = &proc_dointvec_userhz_jiffies,
2553 },
2554 {
2555 .ctl_name = NET_NEIGH_LOCKTIME,
2556 .procname = "locktime",
2557 .maxlen = sizeof(int),
2558 .mode = 0644,
2559 .proc_handler = &proc_dointvec_userhz_jiffies,
2560 },
2561 {
2562 .ctl_name = NET_NEIGH_GC_INTERVAL,
2563 .procname = "gc_interval",
2564 .maxlen = sizeof(int),
2565 .mode = 0644,
2566 .proc_handler = &proc_dointvec_jiffies,
2567 .strategy = &sysctl_jiffies,
2568 },
2569 {
2570 .ctl_name = NET_NEIGH_GC_THRESH1,
2571 .procname = "gc_thresh1",
2572 .maxlen = sizeof(int),
2573 .mode = 0644,
2574 .proc_handler = &proc_dointvec,
2575 },
2576 {
2577 .ctl_name = NET_NEIGH_GC_THRESH2,
2578 .procname = "gc_thresh2",
2579 .maxlen = sizeof(int),
2580 .mode = 0644,
2581 .proc_handler = &proc_dointvec,
2582 },
2583 {
2584 .ctl_name = NET_NEIGH_GC_THRESH3,
2585 .procname = "gc_thresh3",
2586 .maxlen = sizeof(int),
2587 .mode = 0644,
2588 .proc_handler = &proc_dointvec,
2589 },
2590 {
2591 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2592 .procname = "retrans_time_ms",
2593 .maxlen = sizeof(int),
2594 .mode = 0644,
2595 .proc_handler = &proc_dointvec_ms_jiffies,
2596 .strategy = &sysctl_ms_jiffies,
2597 },
2598 {
2599 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2600 .procname = "base_reachable_time_ms",
2601 .maxlen = sizeof(int),
2602 .mode = 0644,
2603 .proc_handler = &proc_dointvec_ms_jiffies,
2604 .strategy = &sysctl_ms_jiffies,
2605 },
2606 },
2607 .neigh_dev = {
2608 {
2609 .ctl_name = NET_PROTO_CONF_DEFAULT,
2610 .procname = "default",
2611 .mode = 0555,
2612 },
2613 },
2614 .neigh_neigh_dir = {
2615 {
2616 .procname = "neigh",
2617 .mode = 0555,
2618 },
2619 },
2620 .neigh_proto_dir = {
2621 {
2622 .mode = 0555,
2623 },
2624 },
2625 .neigh_root_dir = {
2626 {
2627 .ctl_name = CTL_NET,
2628 .procname = "net",
2629 .mode = 0555,
2630 },
2631 },
2632 };
2633
2634 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2635 int p_id, int pdev_id, char *p_name,
2636 proc_handler *handler, ctl_handler *strategy)
2637 {
2638 struct neigh_sysctl_table *t = kmemdup(&neigh_sysctl_template,
2639 sizeof(*t), GFP_KERNEL);
2640 const char *dev_name_source = NULL;
2641 char *dev_name = NULL;
2642 int err = 0;
2643
2644 if (!t)
2645 return -ENOBUFS;
2646 t->neigh_vars[0].data = &p->mcast_probes;
2647 t->neigh_vars[1].data = &p->ucast_probes;
2648 t->neigh_vars[2].data = &p->app_probes;
2649 t->neigh_vars[3].data = &p->retrans_time;
2650 t->neigh_vars[4].data = &p->base_reachable_time;
2651 t->neigh_vars[5].data = &p->delay_probe_time;
2652 t->neigh_vars[6].data = &p->gc_staletime;
2653 t->neigh_vars[7].data = &p->queue_len;
2654 t->neigh_vars[8].data = &p->proxy_qlen;
2655 t->neigh_vars[9].data = &p->anycast_delay;
2656 t->neigh_vars[10].data = &p->proxy_delay;
2657 t->neigh_vars[11].data = &p->locktime;
2658
2659 if (dev) {
2660 dev_name_source = dev->name;
2661 t->neigh_dev[0].ctl_name = dev->ifindex;
2662 t->neigh_vars[12].procname = NULL;
2663 t->neigh_vars[13].procname = NULL;
2664 t->neigh_vars[14].procname = NULL;
2665 t->neigh_vars[15].procname = NULL;
2666 } else {
2667 dev_name_source = t->neigh_dev[0].procname;
2668 t->neigh_vars[12].data = (int *)(p + 1);
2669 t->neigh_vars[13].data = (int *)(p + 1) + 1;
2670 t->neigh_vars[14].data = (int *)(p + 1) + 2;
2671 t->neigh_vars[15].data = (int *)(p + 1) + 3;
2672 }
2673
2674 t->neigh_vars[16].data = &p->retrans_time;
2675 t->neigh_vars[17].data = &p->base_reachable_time;
2676
2677 if (handler || strategy) {
2678 /* RetransTime */
2679 t->neigh_vars[3].proc_handler = handler;
2680 t->neigh_vars[3].strategy = strategy;
2681 t->neigh_vars[3].extra1 = dev;
2682 /* ReachableTime */
2683 t->neigh_vars[4].proc_handler = handler;
2684 t->neigh_vars[4].strategy = strategy;
2685 t->neigh_vars[4].extra1 = dev;
2686 /* RetransTime (in milliseconds)*/
2687 t->neigh_vars[16].proc_handler = handler;
2688 t->neigh_vars[16].strategy = strategy;
2689 t->neigh_vars[16].extra1 = dev;
2690 /* ReachableTime (in milliseconds) */
2691 t->neigh_vars[17].proc_handler = handler;
2692 t->neigh_vars[17].strategy = strategy;
2693 t->neigh_vars[17].extra1 = dev;
2694 }
2695
2696 dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2697 if (!dev_name) {
2698 err = -ENOBUFS;
2699 goto free;
2700 }
2701
2702 t->neigh_dev[0].procname = dev_name;
2703
2704 t->neigh_neigh_dir[0].ctl_name = pdev_id;
2705
2706 t->neigh_proto_dir[0].procname = p_name;
2707 t->neigh_proto_dir[0].ctl_name = p_id;
2708
2709 t->neigh_dev[0].child = t->neigh_vars;
2710 t->neigh_neigh_dir[0].child = t->neigh_dev;
2711 t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
2712 t->neigh_root_dir[0].child = t->neigh_proto_dir;
2713
2714 t->sysctl_header = register_sysctl_table(t->neigh_root_dir);
2715 if (!t->sysctl_header) {
2716 err = -ENOBUFS;
2717 goto free_procname;
2718 }
2719 p->sysctl_table = t;
2720 return 0;
2721
2722 /* error path */
2723 free_procname:
2724 kfree(dev_name);
2725 free:
2726 kfree(t);
2727
2728 return err;
2729 }
2730
2731 void neigh_sysctl_unregister(struct neigh_parms *p)
2732 {
2733 if (p->sysctl_table) {
2734 struct neigh_sysctl_table *t = p->sysctl_table;
2735 p->sysctl_table = NULL;
2736 unregister_sysctl_table(t->sysctl_header);
2737 kfree(t->neigh_dev[0].procname);
2738 kfree(t);
2739 }
2740 }
2741
2742 #endif /* CONFIG_SYSCTL */
2743
2744 static int __init neigh_init(void)
2745 {
2746 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2747 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2748 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2749
2750 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2751 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2752
2753 return 0;
2754 }
2755
2756 subsys_initcall(neigh_init);
2757
2758 EXPORT_SYMBOL(__neigh_event_send);
2759 EXPORT_SYMBOL(neigh_changeaddr);
2760 EXPORT_SYMBOL(neigh_compat_output);
2761 EXPORT_SYMBOL(neigh_connected_output);
2762 EXPORT_SYMBOL(neigh_create);
2763 EXPORT_SYMBOL(neigh_destroy);
2764 EXPORT_SYMBOL(neigh_event_ns);
2765 EXPORT_SYMBOL(neigh_ifdown);
2766 EXPORT_SYMBOL(neigh_lookup);
2767 EXPORT_SYMBOL(neigh_lookup_nodev);
2768 EXPORT_SYMBOL(neigh_parms_alloc);
2769 EXPORT_SYMBOL(neigh_parms_release);
2770 EXPORT_SYMBOL(neigh_rand_reach_time);
2771 EXPORT_SYMBOL(neigh_resolve_output);
2772 EXPORT_SYMBOL(neigh_table_clear);
2773 EXPORT_SYMBOL(neigh_table_init);
2774 EXPORT_SYMBOL(neigh_table_init_no_netlink);
2775 EXPORT_SYMBOL(neigh_update);
2776 EXPORT_SYMBOL(pneigh_enqueue);
2777 EXPORT_SYMBOL(pneigh_lookup);
2778
2779 #ifdef CONFIG_ARPD
2780 EXPORT_SYMBOL(neigh_app_ns);
2781 #endif
2782 #ifdef CONFIG_SYSCTL
2783 EXPORT_SYMBOL(neigh_sysctl_register);
2784 EXPORT_SYMBOL(neigh_sysctl_unregister);
2785 #endif