1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Generic address resolution entity
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
10 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
11 * Harald Welte Add neighbour cache statistics like rtstat
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/slab.h>
17 #include <linux/kmemleak.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
25 #include <linux/sysctl.h>
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
33 #include <net/netevent.h>
34 #include <net/netlink.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/random.h>
37 #include <linux/string.h>
38 #include <linux/log2.h>
39 #include <linux/inetdevice.h>
40 #include <net/addrconf.h>
42 #include <trace/events/neigh.h>
46 #define neigh_dbg(level, fmt, ...) \
48 if (level <= NEIGH_DEBUG) \
49 pr_debug(fmt, ##__VA_ARGS__); \
52 #define PNEIGH_HASHMASK 0xF
54 static void neigh_timer_handler(struct timer_list
*t
);
55 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
,
57 static void neigh_update_notify(struct neighbour
*neigh
, u32 nlmsg_pid
);
58 static int pneigh_ifdown_and_unlock(struct neigh_table
*tbl
,
59 struct net_device
*dev
);
62 static const struct seq_operations neigh_stat_seq_ops
;
66 Neighbour hash table buckets are protected with rwlock tbl->lock.
68 - All the scans/updates to hash buckets MUST be made under this lock.
69 - NOTHING clever should be made under this lock: no callbacks
70 to protocol backends, no attempts to send something to network.
71 It will result in deadlocks, if backend/driver wants to use neighbour
73 - If the entry requires some non-trivial actions, increase
74 its reference count and release table lock.
76 Neighbour entries are protected:
77 - with reference count.
78 - with rwlock neigh->lock
80 Reference count prevents destruction.
82 neigh->lock mainly serializes ll address data and its validity state.
83 However, the same lock is used to protect another entry fields:
87 Again, nothing clever shall be made under neigh->lock,
88 the most complicated procedure, which we allow is dev->hard_header.
89 It is supposed, that dev->hard_header is simplistic and does
90 not make callbacks to neighbour tables.
93 static int neigh_blackhole(struct neighbour
*neigh
, struct sk_buff
*skb
)
99 static void neigh_cleanup_and_release(struct neighbour
*neigh
)
101 trace_neigh_cleanup_and_release(neigh
, 0);
102 __neigh_notify(neigh
, RTM_DELNEIGH
, 0, 0);
103 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
104 neigh_release(neigh
);
108 * It is random distribution in the interval (1/2)*base...(3/2)*base.
109 * It corresponds to default IPv6 settings and is not overridable,
110 * because it is really reasonable choice.
113 unsigned long neigh_rand_reach_time(unsigned long base
)
115 return base
? (prandom_u32() % base
) + (base
>> 1) : 0;
117 EXPORT_SYMBOL(neigh_rand_reach_time
);
119 static void neigh_mark_dead(struct neighbour
*n
)
122 if (!list_empty(&n
->gc_list
)) {
123 list_del_init(&n
->gc_list
);
124 atomic_dec(&n
->tbl
->gc_entries
);
128 static void neigh_update_gc_list(struct neighbour
*n
)
130 bool on_gc_list
, exempt_from_gc
;
132 write_lock_bh(&n
->tbl
->lock
);
133 write_lock(&n
->lock
);
135 /* remove from the gc list if new state is permanent or if neighbor
136 * is externally learned; otherwise entry should be on the gc list
138 exempt_from_gc
= n
->nud_state
& NUD_PERMANENT
||
139 n
->flags
& NTF_EXT_LEARNED
;
140 on_gc_list
= !list_empty(&n
->gc_list
);
142 if (exempt_from_gc
&& on_gc_list
) {
143 list_del_init(&n
->gc_list
);
144 atomic_dec(&n
->tbl
->gc_entries
);
145 } else if (!exempt_from_gc
&& !on_gc_list
) {
146 /* add entries to the tail; cleaning removes from the front */
147 list_add_tail(&n
->gc_list
, &n
->tbl
->gc_list
);
148 atomic_inc(&n
->tbl
->gc_entries
);
151 write_unlock(&n
->lock
);
152 write_unlock_bh(&n
->tbl
->lock
);
155 static bool neigh_update_ext_learned(struct neighbour
*neigh
, u32 flags
,
161 if (!(flags
& NEIGH_UPDATE_F_ADMIN
))
164 ndm_flags
= (flags
& NEIGH_UPDATE_F_EXT_LEARNED
) ? NTF_EXT_LEARNED
: 0;
165 if ((neigh
->flags
^ ndm_flags
) & NTF_EXT_LEARNED
) {
166 if (ndm_flags
& NTF_EXT_LEARNED
)
167 neigh
->flags
|= NTF_EXT_LEARNED
;
169 neigh
->flags
&= ~NTF_EXT_LEARNED
;
177 static bool neigh_del(struct neighbour
*n
, struct neighbour __rcu
**np
,
178 struct neigh_table
*tbl
)
182 write_lock(&n
->lock
);
183 if (refcount_read(&n
->refcnt
) == 1) {
184 struct neighbour
*neigh
;
186 neigh
= rcu_dereference_protected(n
->next
,
187 lockdep_is_held(&tbl
->lock
));
188 rcu_assign_pointer(*np
, neigh
);
192 write_unlock(&n
->lock
);
194 neigh_cleanup_and_release(n
);
198 bool neigh_remove_one(struct neighbour
*ndel
, struct neigh_table
*tbl
)
200 struct neigh_hash_table
*nht
;
201 void *pkey
= ndel
->primary_key
;
204 struct neighbour __rcu
**np
;
206 nht
= rcu_dereference_protected(tbl
->nht
,
207 lockdep_is_held(&tbl
->lock
));
208 hash_val
= tbl
->hash(pkey
, ndel
->dev
, nht
->hash_rnd
);
209 hash_val
= hash_val
>> (32 - nht
->hash_shift
);
211 np
= &nht
->hash_buckets
[hash_val
];
212 while ((n
= rcu_dereference_protected(*np
,
213 lockdep_is_held(&tbl
->lock
)))) {
215 return neigh_del(n
, np
, tbl
);
221 static int neigh_forced_gc(struct neigh_table
*tbl
)
223 int max_clean
= atomic_read(&tbl
->gc_entries
) - tbl
->gc_thresh2
;
224 unsigned long tref
= jiffies
- 5 * HZ
;
225 struct neighbour
*n
, *tmp
;
228 NEIGH_CACHE_STAT_INC(tbl
, forced_gc_runs
);
230 write_lock_bh(&tbl
->lock
);
232 list_for_each_entry_safe(n
, tmp
, &tbl
->gc_list
, gc_list
) {
233 if (refcount_read(&n
->refcnt
) == 1) {
236 write_lock(&n
->lock
);
237 if ((n
->nud_state
== NUD_FAILED
) ||
238 time_after(tref
, n
->updated
))
240 write_unlock(&n
->lock
);
242 if (remove
&& neigh_remove_one(n
, tbl
))
244 if (shrunk
>= max_clean
)
249 tbl
->last_flush
= jiffies
;
251 write_unlock_bh(&tbl
->lock
);
256 static void neigh_add_timer(struct neighbour
*n
, unsigned long when
)
259 if (unlikely(mod_timer(&n
->timer
, when
))) {
260 printk("NEIGH: BUG, double timer add, state is %x\n",
266 static int neigh_del_timer(struct neighbour
*n
)
268 if ((n
->nud_state
& NUD_IN_TIMER
) &&
269 del_timer(&n
->timer
)) {
276 static void pneigh_queue_purge(struct sk_buff_head
*list
)
280 while ((skb
= skb_dequeue(list
)) != NULL
) {
286 static void neigh_flush_dev(struct neigh_table
*tbl
, struct net_device
*dev
,
290 struct neigh_hash_table
*nht
;
292 nht
= rcu_dereference_protected(tbl
->nht
,
293 lockdep_is_held(&tbl
->lock
));
295 for (i
= 0; i
< (1 << nht
->hash_shift
); i
++) {
297 struct neighbour __rcu
**np
= &nht
->hash_buckets
[i
];
299 while ((n
= rcu_dereference_protected(*np
,
300 lockdep_is_held(&tbl
->lock
))) != NULL
) {
301 if (dev
&& n
->dev
!= dev
) {
305 if (skip_perm
&& n
->nud_state
& NUD_PERMANENT
) {
309 rcu_assign_pointer(*np
,
310 rcu_dereference_protected(n
->next
,
311 lockdep_is_held(&tbl
->lock
)));
312 write_lock(&n
->lock
);
315 if (refcount_read(&n
->refcnt
) != 1) {
316 /* The most unpleasant situation.
317 We must destroy neighbour entry,
318 but someone still uses it.
320 The destroy will be delayed until
321 the last user releases us, but
322 we must kill timers etc. and move
325 __skb_queue_purge(&n
->arp_queue
);
326 n
->arp_queue_len_bytes
= 0;
327 n
->output
= neigh_blackhole
;
328 if (n
->nud_state
& NUD_VALID
)
329 n
->nud_state
= NUD_NOARP
;
331 n
->nud_state
= NUD_NONE
;
332 neigh_dbg(2, "neigh %p is stray\n", n
);
334 write_unlock(&n
->lock
);
335 neigh_cleanup_and_release(n
);
340 void neigh_changeaddr(struct neigh_table
*tbl
, struct net_device
*dev
)
342 write_lock_bh(&tbl
->lock
);
343 neigh_flush_dev(tbl
, dev
, false);
344 write_unlock_bh(&tbl
->lock
);
346 EXPORT_SYMBOL(neigh_changeaddr
);
348 static int __neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
,
351 write_lock_bh(&tbl
->lock
);
352 neigh_flush_dev(tbl
, dev
, skip_perm
);
353 pneigh_ifdown_and_unlock(tbl
, dev
);
355 del_timer_sync(&tbl
->proxy_timer
);
356 pneigh_queue_purge(&tbl
->proxy_queue
);
360 int neigh_carrier_down(struct neigh_table
*tbl
, struct net_device
*dev
)
362 __neigh_ifdown(tbl
, dev
, true);
365 EXPORT_SYMBOL(neigh_carrier_down
);
367 int neigh_ifdown(struct neigh_table
*tbl
, struct net_device
*dev
)
369 __neigh_ifdown(tbl
, dev
, false);
372 EXPORT_SYMBOL(neigh_ifdown
);
374 static struct neighbour
*neigh_alloc(struct neigh_table
*tbl
,
375 struct net_device
*dev
,
378 struct neighbour
*n
= NULL
;
379 unsigned long now
= jiffies
;
385 entries
= atomic_inc_return(&tbl
->gc_entries
) - 1;
386 if (entries
>= tbl
->gc_thresh3
||
387 (entries
>= tbl
->gc_thresh2
&&
388 time_after(now
, tbl
->last_flush
+ 5 * HZ
))) {
389 if (!neigh_forced_gc(tbl
) &&
390 entries
>= tbl
->gc_thresh3
) {
391 net_info_ratelimited("%s: neighbor table overflow!\n",
393 NEIGH_CACHE_STAT_INC(tbl
, table_fulls
);
399 n
= kzalloc(tbl
->entry_size
+ dev
->neigh_priv_len
, GFP_ATOMIC
);
403 __skb_queue_head_init(&n
->arp_queue
);
404 rwlock_init(&n
->lock
);
405 seqlock_init(&n
->ha_lock
);
406 n
->updated
= n
->used
= now
;
407 n
->nud_state
= NUD_NONE
;
408 n
->output
= neigh_blackhole
;
409 seqlock_init(&n
->hh
.hh_lock
);
410 n
->parms
= neigh_parms_clone(&tbl
->parms
);
411 timer_setup(&n
->timer
, neigh_timer_handler
, 0);
413 NEIGH_CACHE_STAT_INC(tbl
, allocs
);
415 refcount_set(&n
->refcnt
, 1);
417 INIT_LIST_HEAD(&n
->gc_list
);
419 atomic_inc(&tbl
->entries
);
425 atomic_dec(&tbl
->gc_entries
);
429 static void neigh_get_hash_rnd(u32
*x
)
431 *x
= get_random_u32() | 1;
434 static struct neigh_hash_table
*neigh_hash_alloc(unsigned int shift
)
436 size_t size
= (1 << shift
) * sizeof(struct neighbour
*);
437 struct neigh_hash_table
*ret
;
438 struct neighbour __rcu
**buckets
;
441 ret
= kmalloc(sizeof(*ret
), GFP_ATOMIC
);
444 if (size
<= PAGE_SIZE
) {
445 buckets
= kzalloc(size
, GFP_ATOMIC
);
447 buckets
= (struct neighbour __rcu
**)
448 __get_free_pages(GFP_ATOMIC
| __GFP_ZERO
,
450 kmemleak_alloc(buckets
, size
, 1, GFP_ATOMIC
);
456 ret
->hash_buckets
= buckets
;
457 ret
->hash_shift
= shift
;
458 for (i
= 0; i
< NEIGH_NUM_HASH_RND
; i
++)
459 neigh_get_hash_rnd(&ret
->hash_rnd
[i
]);
463 static void neigh_hash_free_rcu(struct rcu_head
*head
)
465 struct neigh_hash_table
*nht
= container_of(head
,
466 struct neigh_hash_table
,
468 size_t size
= (1 << nht
->hash_shift
) * sizeof(struct neighbour
*);
469 struct neighbour __rcu
**buckets
= nht
->hash_buckets
;
471 if (size
<= PAGE_SIZE
) {
474 kmemleak_free(buckets
);
475 free_pages((unsigned long)buckets
, get_order(size
));
480 static struct neigh_hash_table
*neigh_hash_grow(struct neigh_table
*tbl
,
481 unsigned long new_shift
)
483 unsigned int i
, hash
;
484 struct neigh_hash_table
*new_nht
, *old_nht
;
486 NEIGH_CACHE_STAT_INC(tbl
, hash_grows
);
488 old_nht
= rcu_dereference_protected(tbl
->nht
,
489 lockdep_is_held(&tbl
->lock
));
490 new_nht
= neigh_hash_alloc(new_shift
);
494 for (i
= 0; i
< (1 << old_nht
->hash_shift
); i
++) {
495 struct neighbour
*n
, *next
;
497 for (n
= rcu_dereference_protected(old_nht
->hash_buckets
[i
],
498 lockdep_is_held(&tbl
->lock
));
501 hash
= tbl
->hash(n
->primary_key
, n
->dev
,
504 hash
>>= (32 - new_nht
->hash_shift
);
505 next
= rcu_dereference_protected(n
->next
,
506 lockdep_is_held(&tbl
->lock
));
508 rcu_assign_pointer(n
->next
,
509 rcu_dereference_protected(
510 new_nht
->hash_buckets
[hash
],
511 lockdep_is_held(&tbl
->lock
)));
512 rcu_assign_pointer(new_nht
->hash_buckets
[hash
], n
);
516 rcu_assign_pointer(tbl
->nht
, new_nht
);
517 call_rcu(&old_nht
->rcu
, neigh_hash_free_rcu
);
521 struct neighbour
*neigh_lookup(struct neigh_table
*tbl
, const void *pkey
,
522 struct net_device
*dev
)
526 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
529 n
= __neigh_lookup_noref(tbl
, pkey
, dev
);
531 if (!refcount_inc_not_zero(&n
->refcnt
))
533 NEIGH_CACHE_STAT_INC(tbl
, hits
);
536 rcu_read_unlock_bh();
539 EXPORT_SYMBOL(neigh_lookup
);
541 struct neighbour
*neigh_lookup_nodev(struct neigh_table
*tbl
, struct net
*net
,
545 unsigned int key_len
= tbl
->key_len
;
547 struct neigh_hash_table
*nht
;
549 NEIGH_CACHE_STAT_INC(tbl
, lookups
);
552 nht
= rcu_dereference_bh(tbl
->nht
);
553 hash_val
= tbl
->hash(pkey
, NULL
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
555 for (n
= rcu_dereference_bh(nht
->hash_buckets
[hash_val
]);
557 n
= rcu_dereference_bh(n
->next
)) {
558 if (!memcmp(n
->primary_key
, pkey
, key_len
) &&
559 net_eq(dev_net(n
->dev
), net
)) {
560 if (!refcount_inc_not_zero(&n
->refcnt
))
562 NEIGH_CACHE_STAT_INC(tbl
, hits
);
567 rcu_read_unlock_bh();
570 EXPORT_SYMBOL(neigh_lookup_nodev
);
572 static struct neighbour
*___neigh_create(struct neigh_table
*tbl
,
574 struct net_device
*dev
,
575 bool exempt_from_gc
, bool want_ref
)
577 struct neighbour
*n1
, *rc
, *n
= neigh_alloc(tbl
, dev
, exempt_from_gc
);
579 unsigned int key_len
= tbl
->key_len
;
581 struct neigh_hash_table
*nht
;
583 trace_neigh_create(tbl
, dev
, pkey
, n
, exempt_from_gc
);
586 rc
= ERR_PTR(-ENOBUFS
);
590 memcpy(n
->primary_key
, pkey
, key_len
);
594 /* Protocol specific setup. */
595 if (tbl
->constructor
&& (error
= tbl
->constructor(n
)) < 0) {
597 goto out_neigh_release
;
600 if (dev
->netdev_ops
->ndo_neigh_construct
) {
601 error
= dev
->netdev_ops
->ndo_neigh_construct(dev
, n
);
604 goto out_neigh_release
;
608 /* Device specific setup. */
609 if (n
->parms
->neigh_setup
&&
610 (error
= n
->parms
->neigh_setup(n
)) < 0) {
612 goto out_neigh_release
;
615 n
->confirmed
= jiffies
- (NEIGH_VAR(n
->parms
, BASE_REACHABLE_TIME
) << 1);
617 write_lock_bh(&tbl
->lock
);
618 nht
= rcu_dereference_protected(tbl
->nht
,
619 lockdep_is_held(&tbl
->lock
));
621 if (atomic_read(&tbl
->entries
) > (1 << nht
->hash_shift
))
622 nht
= neigh_hash_grow(tbl
, nht
->hash_shift
+ 1);
624 hash_val
= tbl
->hash(n
->primary_key
, dev
, nht
->hash_rnd
) >> (32 - nht
->hash_shift
);
626 if (n
->parms
->dead
) {
627 rc
= ERR_PTR(-EINVAL
);
631 for (n1
= rcu_dereference_protected(nht
->hash_buckets
[hash_val
],
632 lockdep_is_held(&tbl
->lock
));
634 n1
= rcu_dereference_protected(n1
->next
,
635 lockdep_is_held(&tbl
->lock
))) {
636 if (dev
== n1
->dev
&& !memcmp(n1
->primary_key
, n
->primary_key
, key_len
)) {
646 list_add_tail(&n
->gc_list
, &n
->tbl
->gc_list
);
650 rcu_assign_pointer(n
->next
,
651 rcu_dereference_protected(nht
->hash_buckets
[hash_val
],
652 lockdep_is_held(&tbl
->lock
)));
653 rcu_assign_pointer(nht
->hash_buckets
[hash_val
], n
);
654 write_unlock_bh(&tbl
->lock
);
655 neigh_dbg(2, "neigh %p is created\n", n
);
660 write_unlock_bh(&tbl
->lock
);
663 atomic_dec(&tbl
->gc_entries
);
668 struct neighbour
*__neigh_create(struct neigh_table
*tbl
, const void *pkey
,
669 struct net_device
*dev
, bool want_ref
)
671 return ___neigh_create(tbl
, pkey
, dev
, false, want_ref
);
673 EXPORT_SYMBOL(__neigh_create
);
675 static u32
pneigh_hash(const void *pkey
, unsigned int key_len
)
677 u32 hash_val
= *(u32
*)(pkey
+ key_len
- 4);
678 hash_val
^= (hash_val
>> 16);
679 hash_val
^= hash_val
>> 8;
680 hash_val
^= hash_val
>> 4;
681 hash_val
&= PNEIGH_HASHMASK
;
685 static struct pneigh_entry
*__pneigh_lookup_1(struct pneigh_entry
*n
,
688 unsigned int key_len
,
689 struct net_device
*dev
)
692 if (!memcmp(n
->key
, pkey
, key_len
) &&
693 net_eq(pneigh_net(n
), net
) &&
694 (n
->dev
== dev
|| !n
->dev
))
701 struct pneigh_entry
*__pneigh_lookup(struct neigh_table
*tbl
,
702 struct net
*net
, const void *pkey
, struct net_device
*dev
)
704 unsigned int key_len
= tbl
->key_len
;
705 u32 hash_val
= pneigh_hash(pkey
, key_len
);
707 return __pneigh_lookup_1(tbl
->phash_buckets
[hash_val
],
708 net
, pkey
, key_len
, dev
);
710 EXPORT_SYMBOL_GPL(__pneigh_lookup
);
712 struct pneigh_entry
* pneigh_lookup(struct neigh_table
*tbl
,
713 struct net
*net
, const void *pkey
,
714 struct net_device
*dev
, int creat
)
716 struct pneigh_entry
*n
;
717 unsigned int key_len
= tbl
->key_len
;
718 u32 hash_val
= pneigh_hash(pkey
, key_len
);
720 read_lock_bh(&tbl
->lock
);
721 n
= __pneigh_lookup_1(tbl
->phash_buckets
[hash_val
],
722 net
, pkey
, key_len
, dev
);
723 read_unlock_bh(&tbl
->lock
);
730 n
= kmalloc(sizeof(*n
) + key_len
, GFP_KERNEL
);
735 write_pnet(&n
->net
, net
);
736 memcpy(n
->key
, pkey
, key_len
);
741 if (tbl
->pconstructor
&& tbl
->pconstructor(n
)) {
749 write_lock_bh(&tbl
->lock
);
750 n
->next
= tbl
->phash_buckets
[hash_val
];
751 tbl
->phash_buckets
[hash_val
] = n
;
752 write_unlock_bh(&tbl
->lock
);
756 EXPORT_SYMBOL(pneigh_lookup
);
759 int pneigh_delete(struct neigh_table
*tbl
, struct net
*net
, const void *pkey
,
760 struct net_device
*dev
)
762 struct pneigh_entry
*n
, **np
;
763 unsigned int key_len
= tbl
->key_len
;
764 u32 hash_val
= pneigh_hash(pkey
, key_len
);
766 write_lock_bh(&tbl
->lock
);
767 for (np
= &tbl
->phash_buckets
[hash_val
]; (n
= *np
) != NULL
;
769 if (!memcmp(n
->key
, pkey
, key_len
) && n
->dev
== dev
&&
770 net_eq(pneigh_net(n
), net
)) {
772 write_unlock_bh(&tbl
->lock
);
773 if (tbl
->pdestructor
)
781 write_unlock_bh(&tbl
->lock
);
785 static int pneigh_ifdown_and_unlock(struct neigh_table
*tbl
,
786 struct net_device
*dev
)
788 struct pneigh_entry
*n
, **np
, *freelist
= NULL
;
791 for (h
= 0; h
<= PNEIGH_HASHMASK
; h
++) {
792 np
= &tbl
->phash_buckets
[h
];
793 while ((n
= *np
) != NULL
) {
794 if (!dev
|| n
->dev
== dev
) {
803 write_unlock_bh(&tbl
->lock
);
804 while ((n
= freelist
)) {
807 if (tbl
->pdestructor
)
816 static void neigh_parms_destroy(struct neigh_parms
*parms
);
818 static inline void neigh_parms_put(struct neigh_parms
*parms
)
820 if (refcount_dec_and_test(&parms
->refcnt
))
821 neigh_parms_destroy(parms
);
825 * neighbour must already be out of the table;
828 void neigh_destroy(struct neighbour
*neigh
)
830 struct net_device
*dev
= neigh
->dev
;
832 NEIGH_CACHE_STAT_INC(neigh
->tbl
, destroys
);
835 pr_warn("Destroying alive neighbour %p\n", neigh
);
840 if (neigh_del_timer(neigh
))
841 pr_warn("Impossible event\n");
843 write_lock_bh(&neigh
->lock
);
844 __skb_queue_purge(&neigh
->arp_queue
);
845 write_unlock_bh(&neigh
->lock
);
846 neigh
->arp_queue_len_bytes
= 0;
848 if (dev
->netdev_ops
->ndo_neigh_destroy
)
849 dev
->netdev_ops
->ndo_neigh_destroy(dev
, neigh
);
852 neigh_parms_put(neigh
->parms
);
854 neigh_dbg(2, "neigh %p is destroyed\n", neigh
);
856 atomic_dec(&neigh
->tbl
->entries
);
857 kfree_rcu(neigh
, rcu
);
859 EXPORT_SYMBOL(neigh_destroy
);
861 /* Neighbour state is suspicious;
864 Called with write_locked neigh.
866 static void neigh_suspect(struct neighbour
*neigh
)
868 neigh_dbg(2, "neigh %p is suspected\n", neigh
);
870 neigh
->output
= neigh
->ops
->output
;
873 /* Neighbour state is OK;
876 Called with write_locked neigh.
878 static void neigh_connect(struct neighbour
*neigh
)
880 neigh_dbg(2, "neigh %p is connected\n", neigh
);
882 neigh
->output
= neigh
->ops
->connected_output
;
885 static void neigh_periodic_work(struct work_struct
*work
)
887 struct neigh_table
*tbl
= container_of(work
, struct neigh_table
, gc_work
.work
);
889 struct neighbour __rcu
**np
;
891 struct neigh_hash_table
*nht
;
893 NEIGH_CACHE_STAT_INC(tbl
, periodic_gc_runs
);
895 write_lock_bh(&tbl
->lock
);
896 nht
= rcu_dereference_protected(tbl
->nht
,
897 lockdep_is_held(&tbl
->lock
));
900 * periodically recompute ReachableTime from random function
903 if (time_after(jiffies
, tbl
->last_rand
+ 300 * HZ
)) {
904 struct neigh_parms
*p
;
905 tbl
->last_rand
= jiffies
;
906 list_for_each_entry(p
, &tbl
->parms_list
, list
)
908 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
911 if (atomic_read(&tbl
->entries
) < tbl
->gc_thresh1
)
914 for (i
= 0 ; i
< (1 << nht
->hash_shift
); i
++) {
915 np
= &nht
->hash_buckets
[i
];
917 while ((n
= rcu_dereference_protected(*np
,
918 lockdep_is_held(&tbl
->lock
))) != NULL
) {
921 write_lock(&n
->lock
);
923 state
= n
->nud_state
;
924 if ((state
& (NUD_PERMANENT
| NUD_IN_TIMER
)) ||
925 (n
->flags
& NTF_EXT_LEARNED
)) {
926 write_unlock(&n
->lock
);
930 if (time_before(n
->used
, n
->confirmed
))
931 n
->used
= n
->confirmed
;
933 if (refcount_read(&n
->refcnt
) == 1 &&
934 (state
== NUD_FAILED
||
935 time_after(jiffies
, n
->used
+ NEIGH_VAR(n
->parms
, GC_STALETIME
)))) {
938 write_unlock(&n
->lock
);
939 neigh_cleanup_and_release(n
);
942 write_unlock(&n
->lock
);
948 * It's fine to release lock here, even if hash table
949 * grows while we are preempted.
951 write_unlock_bh(&tbl
->lock
);
953 write_lock_bh(&tbl
->lock
);
954 nht
= rcu_dereference_protected(tbl
->nht
,
955 lockdep_is_held(&tbl
->lock
));
958 /* Cycle through all hash buckets every BASE_REACHABLE_TIME/2 ticks.
959 * ARP entry timeouts range from 1/2 BASE_REACHABLE_TIME to 3/2
960 * BASE_REACHABLE_TIME.
962 queue_delayed_work(system_power_efficient_wq
, &tbl
->gc_work
,
963 NEIGH_VAR(&tbl
->parms
, BASE_REACHABLE_TIME
) >> 1);
964 write_unlock_bh(&tbl
->lock
);
967 static __inline__
int neigh_max_probes(struct neighbour
*n
)
969 struct neigh_parms
*p
= n
->parms
;
970 return NEIGH_VAR(p
, UCAST_PROBES
) + NEIGH_VAR(p
, APP_PROBES
) +
971 (n
->nud_state
& NUD_PROBE
? NEIGH_VAR(p
, MCAST_REPROBES
) :
972 NEIGH_VAR(p
, MCAST_PROBES
));
975 static void neigh_invalidate(struct neighbour
*neigh
)
976 __releases(neigh
->lock
)
977 __acquires(neigh
->lock
)
981 NEIGH_CACHE_STAT_INC(neigh
->tbl
, res_failed
);
982 neigh_dbg(2, "neigh %p is failed\n", neigh
);
983 neigh
->updated
= jiffies
;
985 /* It is very thin place. report_unreachable is very complicated
986 routine. Particularly, it can hit the same neighbour entry!
988 So that, we try to be accurate and avoid dead loop. --ANK
990 while (neigh
->nud_state
== NUD_FAILED
&&
991 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
992 write_unlock(&neigh
->lock
);
993 neigh
->ops
->error_report(neigh
, skb
);
994 write_lock(&neigh
->lock
);
996 __skb_queue_purge(&neigh
->arp_queue
);
997 neigh
->arp_queue_len_bytes
= 0;
1000 static void neigh_probe(struct neighbour
*neigh
)
1001 __releases(neigh
->lock
)
1003 struct sk_buff
*skb
= skb_peek_tail(&neigh
->arp_queue
);
1004 /* keep skb alive even if arp_queue overflows */
1006 skb
= skb_clone(skb
, GFP_ATOMIC
);
1007 write_unlock(&neigh
->lock
);
1008 if (neigh
->ops
->solicit
)
1009 neigh
->ops
->solicit(neigh
, skb
);
1010 atomic_inc(&neigh
->probes
);
1014 /* Called when a timer expires for a neighbour entry. */
1016 static void neigh_timer_handler(struct timer_list
*t
)
1018 unsigned long now
, next
;
1019 struct neighbour
*neigh
= from_timer(neigh
, t
, timer
);
1023 write_lock(&neigh
->lock
);
1025 state
= neigh
->nud_state
;
1029 if (!(state
& NUD_IN_TIMER
))
1032 if (state
& NUD_REACHABLE
) {
1033 if (time_before_eq(now
,
1034 neigh
->confirmed
+ neigh
->parms
->reachable_time
)) {
1035 neigh_dbg(2, "neigh %p is still alive\n", neigh
);
1036 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
1037 } else if (time_before_eq(now
,
1039 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
))) {
1040 neigh_dbg(2, "neigh %p is delayed\n", neigh
);
1041 neigh
->nud_state
= NUD_DELAY
;
1042 neigh
->updated
= jiffies
;
1043 neigh_suspect(neigh
);
1044 next
= now
+ NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
);
1046 neigh_dbg(2, "neigh %p is suspected\n", neigh
);
1047 neigh
->nud_state
= NUD_STALE
;
1048 neigh
->updated
= jiffies
;
1049 neigh_suspect(neigh
);
1052 } else if (state
& NUD_DELAY
) {
1053 if (time_before_eq(now
,
1055 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
))) {
1056 neigh_dbg(2, "neigh %p is now reachable\n", neigh
);
1057 neigh
->nud_state
= NUD_REACHABLE
;
1058 neigh
->updated
= jiffies
;
1059 neigh_connect(neigh
);
1061 next
= neigh
->confirmed
+ neigh
->parms
->reachable_time
;
1063 neigh_dbg(2, "neigh %p is probed\n", neigh
);
1064 neigh
->nud_state
= NUD_PROBE
;
1065 neigh
->updated
= jiffies
;
1066 atomic_set(&neigh
->probes
, 0);
1068 next
= now
+ max(NEIGH_VAR(neigh
->parms
, RETRANS_TIME
),
1072 /* NUD_PROBE|NUD_INCOMPLETE */
1073 next
= now
+ max(NEIGH_VAR(neigh
->parms
, RETRANS_TIME
), HZ
/100);
1076 if ((neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
1077 atomic_read(&neigh
->probes
) >= neigh_max_probes(neigh
)) {
1078 neigh
->nud_state
= NUD_FAILED
;
1080 neigh_invalidate(neigh
);
1084 if (neigh
->nud_state
& NUD_IN_TIMER
) {
1085 if (time_before(next
, jiffies
+ HZ
/100))
1086 next
= jiffies
+ HZ
/100;
1087 if (!mod_timer(&neigh
->timer
, next
))
1090 if (neigh
->nud_state
& (NUD_INCOMPLETE
| NUD_PROBE
)) {
1094 write_unlock(&neigh
->lock
);
1098 neigh_update_notify(neigh
, 0);
1100 trace_neigh_timer_handler(neigh
, 0);
1102 neigh_release(neigh
);
1105 int __neigh_event_send(struct neighbour
*neigh
, struct sk_buff
*skb
)
1108 bool immediate_probe
= false;
1110 write_lock_bh(&neigh
->lock
);
1113 if (neigh
->nud_state
& (NUD_CONNECTED
| NUD_DELAY
| NUD_PROBE
))
1118 if (!(neigh
->nud_state
& (NUD_STALE
| NUD_INCOMPLETE
))) {
1119 if (NEIGH_VAR(neigh
->parms
, MCAST_PROBES
) +
1120 NEIGH_VAR(neigh
->parms
, APP_PROBES
)) {
1121 unsigned long next
, now
= jiffies
;
1123 atomic_set(&neigh
->probes
,
1124 NEIGH_VAR(neigh
->parms
, UCAST_PROBES
));
1125 neigh_del_timer(neigh
);
1126 neigh
->nud_state
= NUD_INCOMPLETE
;
1127 neigh
->updated
= now
;
1128 next
= now
+ max(NEIGH_VAR(neigh
->parms
, RETRANS_TIME
),
1130 neigh_add_timer(neigh
, next
);
1131 immediate_probe
= true;
1133 neigh
->nud_state
= NUD_FAILED
;
1134 neigh
->updated
= jiffies
;
1135 write_unlock_bh(&neigh
->lock
);
1140 } else if (neigh
->nud_state
& NUD_STALE
) {
1141 neigh_dbg(2, "neigh %p is delayed\n", neigh
);
1142 neigh_del_timer(neigh
);
1143 neigh
->nud_state
= NUD_DELAY
;
1144 neigh
->updated
= jiffies
;
1145 neigh_add_timer(neigh
, jiffies
+
1146 NEIGH_VAR(neigh
->parms
, DELAY_PROBE_TIME
));
1149 if (neigh
->nud_state
== NUD_INCOMPLETE
) {
1151 while (neigh
->arp_queue_len_bytes
+ skb
->truesize
>
1152 NEIGH_VAR(neigh
->parms
, QUEUE_LEN_BYTES
)) {
1153 struct sk_buff
*buff
;
1155 buff
= __skb_dequeue(&neigh
->arp_queue
);
1158 neigh
->arp_queue_len_bytes
-= buff
->truesize
;
1160 NEIGH_CACHE_STAT_INC(neigh
->tbl
, unres_discards
);
1163 __skb_queue_tail(&neigh
->arp_queue
, skb
);
1164 neigh
->arp_queue_len_bytes
+= skb
->truesize
;
1169 if (immediate_probe
)
1172 write_unlock(&neigh
->lock
);
1174 trace_neigh_event_send_done(neigh
, rc
);
1178 if (neigh
->nud_state
& NUD_STALE
)
1180 write_unlock_bh(&neigh
->lock
);
1182 trace_neigh_event_send_dead(neigh
, 1);
1185 EXPORT_SYMBOL(__neigh_event_send
);
1187 static void neigh_update_hhs(struct neighbour
*neigh
)
1189 struct hh_cache
*hh
;
1190 void (*update
)(struct hh_cache
*, const struct net_device
*, const unsigned char *)
1193 if (neigh
->dev
->header_ops
)
1194 update
= neigh
->dev
->header_ops
->cache_update
;
1198 if (READ_ONCE(hh
->hh_len
)) {
1199 write_seqlock_bh(&hh
->hh_lock
);
1200 update(hh
, neigh
->dev
, neigh
->ha
);
1201 write_sequnlock_bh(&hh
->hh_lock
);
1208 /* Generic update routine.
1209 -- lladdr is new lladdr or NULL, if it is not supplied.
1210 -- new is new state.
1212 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
1214 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
1215 lladdr instead of overriding it
1217 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
1219 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
1221 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
1224 Caller MUST hold reference count on the entry.
1227 static int __neigh_update(struct neighbour
*neigh
, const u8
*lladdr
,
1228 u8
new, u32 flags
, u32 nlmsg_pid
,
1229 struct netlink_ext_ack
*extack
)
1231 bool ext_learn_change
= false;
1235 struct net_device
*dev
;
1236 int update_isrouter
= 0;
1238 trace_neigh_update(neigh
, lladdr
, new, flags
, nlmsg_pid
);
1240 write_lock_bh(&neigh
->lock
);
1243 old
= neigh
->nud_state
;
1246 if (!(flags
& NEIGH_UPDATE_F_ADMIN
) &&
1247 (old
& (NUD_NOARP
| NUD_PERMANENT
)))
1250 NL_SET_ERR_MSG(extack
, "Neighbor entry is now dead");
1254 ext_learn_change
= neigh_update_ext_learned(neigh
, flags
, ¬ify
);
1256 if (!(new & NUD_VALID
)) {
1257 neigh_del_timer(neigh
);
1258 if (old
& NUD_CONNECTED
)
1259 neigh_suspect(neigh
);
1260 neigh
->nud_state
= new;
1262 notify
= old
& NUD_VALID
;
1263 if ((old
& (NUD_INCOMPLETE
| NUD_PROBE
)) &&
1264 (new & NUD_FAILED
)) {
1265 neigh_invalidate(neigh
);
1271 /* Compare new lladdr with cached one */
1272 if (!dev
->addr_len
) {
1273 /* First case: device needs no address. */
1275 } else if (lladdr
) {
1276 /* The second case: if something is already cached
1277 and a new address is proposed:
1279 - if they are different, check override flag
1281 if ((old
& NUD_VALID
) &&
1282 !memcmp(lladdr
, neigh
->ha
, dev
->addr_len
))
1285 /* No address is supplied; if we know something,
1286 use it, otherwise discard the request.
1289 if (!(old
& NUD_VALID
)) {
1290 NL_SET_ERR_MSG(extack
, "No link layer address given");
1296 /* Update confirmed timestamp for neighbour entry after we
1297 * received ARP packet even if it doesn't change IP to MAC binding.
1299 if (new & NUD_CONNECTED
)
1300 neigh
->confirmed
= jiffies
;
1302 /* If entry was valid and address is not changed,
1303 do not change entry state, if new one is STALE.
1306 update_isrouter
= flags
& NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1307 if (old
& NUD_VALID
) {
1308 if (lladdr
!= neigh
->ha
&& !(flags
& NEIGH_UPDATE_F_OVERRIDE
)) {
1309 update_isrouter
= 0;
1310 if ((flags
& NEIGH_UPDATE_F_WEAK_OVERRIDE
) &&
1311 (old
& NUD_CONNECTED
)) {
1317 if (lladdr
== neigh
->ha
&& new == NUD_STALE
&&
1318 !(flags
& NEIGH_UPDATE_F_ADMIN
))
1323 /* Update timestamp only once we know we will make a change to the
1324 * neighbour entry. Otherwise we risk to move the locktime window with
1325 * noop updates and ignore relevant ARP updates.
1327 if (new != old
|| lladdr
!= neigh
->ha
)
1328 neigh
->updated
= jiffies
;
1331 neigh_del_timer(neigh
);
1332 if (new & NUD_PROBE
)
1333 atomic_set(&neigh
->probes
, 0);
1334 if (new & NUD_IN_TIMER
)
1335 neigh_add_timer(neigh
, (jiffies
+
1336 ((new & NUD_REACHABLE
) ?
1337 neigh
->parms
->reachable_time
:
1339 neigh
->nud_state
= new;
1343 if (lladdr
!= neigh
->ha
) {
1344 write_seqlock(&neigh
->ha_lock
);
1345 memcpy(&neigh
->ha
, lladdr
, dev
->addr_len
);
1346 write_sequnlock(&neigh
->ha_lock
);
1347 neigh_update_hhs(neigh
);
1348 if (!(new & NUD_CONNECTED
))
1349 neigh
->confirmed
= jiffies
-
1350 (NEIGH_VAR(neigh
->parms
, BASE_REACHABLE_TIME
) << 1);
1355 if (new & NUD_CONNECTED
)
1356 neigh_connect(neigh
);
1358 neigh_suspect(neigh
);
1359 if (!(old
& NUD_VALID
)) {
1360 struct sk_buff
*skb
;
1362 /* Again: avoid dead loop if something went wrong */
1364 while (neigh
->nud_state
& NUD_VALID
&&
1365 (skb
= __skb_dequeue(&neigh
->arp_queue
)) != NULL
) {
1366 struct dst_entry
*dst
= skb_dst(skb
);
1367 struct neighbour
*n2
, *n1
= neigh
;
1368 write_unlock_bh(&neigh
->lock
);
1372 /* Why not just use 'neigh' as-is? The problem is that
1373 * things such as shaper, eql, and sch_teql can end up
1374 * using alternative, different, neigh objects to output
1375 * the packet in the output path. So what we need to do
1376 * here is re-lookup the top-level neigh in the path so
1377 * we can reinject the packet there.
1381 n2
= dst_neigh_lookup_skb(dst
, skb
);
1385 n1
->output(n1
, skb
);
1390 write_lock_bh(&neigh
->lock
);
1392 __skb_queue_purge(&neigh
->arp_queue
);
1393 neigh
->arp_queue_len_bytes
= 0;
1396 if (update_isrouter
)
1397 neigh_update_is_router(neigh
, flags
, ¬ify
);
1398 write_unlock_bh(&neigh
->lock
);
1400 if (((new ^ old
) & NUD_PERMANENT
) || ext_learn_change
)
1401 neigh_update_gc_list(neigh
);
1404 neigh_update_notify(neigh
, nlmsg_pid
);
1406 trace_neigh_update_done(neigh
, err
);
1411 int neigh_update(struct neighbour
*neigh
, const u8
*lladdr
, u8
new,
1412 u32 flags
, u32 nlmsg_pid
)
1414 return __neigh_update(neigh
, lladdr
, new, flags
, nlmsg_pid
, NULL
);
1416 EXPORT_SYMBOL(neigh_update
);
1418 /* Update the neigh to listen temporarily for probe responses, even if it is
1419 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1421 void __neigh_set_probe_once(struct neighbour
*neigh
)
1425 neigh
->updated
= jiffies
;
1426 if (!(neigh
->nud_state
& NUD_FAILED
))
1428 neigh
->nud_state
= NUD_INCOMPLETE
;
1429 atomic_set(&neigh
->probes
, neigh_max_probes(neigh
));
1430 neigh_add_timer(neigh
,
1431 jiffies
+ max(NEIGH_VAR(neigh
->parms
, RETRANS_TIME
),
1434 EXPORT_SYMBOL(__neigh_set_probe_once
);
1436 struct neighbour
*neigh_event_ns(struct neigh_table
*tbl
,
1437 u8
*lladdr
, void *saddr
,
1438 struct net_device
*dev
)
1440 struct neighbour
*neigh
= __neigh_lookup(tbl
, saddr
, dev
,
1441 lladdr
|| !dev
->addr_len
);
1443 neigh_update(neigh
, lladdr
, NUD_STALE
,
1444 NEIGH_UPDATE_F_OVERRIDE
, 0);
1447 EXPORT_SYMBOL(neigh_event_ns
);
1449 /* called with read_lock_bh(&n->lock); */
1450 static void neigh_hh_init(struct neighbour
*n
)
1452 struct net_device
*dev
= n
->dev
;
1453 __be16 prot
= n
->tbl
->protocol
;
1454 struct hh_cache
*hh
= &n
->hh
;
1456 write_lock_bh(&n
->lock
);
1458 /* Only one thread can come in here and initialize the
1462 dev
->header_ops
->cache(n
, hh
, prot
);
1464 write_unlock_bh(&n
->lock
);
1467 /* Slow and careful. */
1469 int neigh_resolve_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1473 if (!neigh_event_send(neigh
, skb
)) {
1475 struct net_device
*dev
= neigh
->dev
;
1478 if (dev
->header_ops
->cache
&& !READ_ONCE(neigh
->hh
.hh_len
))
1479 neigh_hh_init(neigh
);
1482 __skb_pull(skb
, skb_network_offset(skb
));
1483 seq
= read_seqbegin(&neigh
->ha_lock
);
1484 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1485 neigh
->ha
, NULL
, skb
->len
);
1486 } while (read_seqretry(&neigh
->ha_lock
, seq
));
1489 rc
= dev_queue_xmit(skb
);
1500 EXPORT_SYMBOL(neigh_resolve_output
);
1502 /* As fast as possible without hh cache */
1504 int neigh_connected_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1506 struct net_device
*dev
= neigh
->dev
;
1511 __skb_pull(skb
, skb_network_offset(skb
));
1512 seq
= read_seqbegin(&neigh
->ha_lock
);
1513 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
1514 neigh
->ha
, NULL
, skb
->len
);
1515 } while (read_seqretry(&neigh
->ha_lock
, seq
));
1518 err
= dev_queue_xmit(skb
);
1525 EXPORT_SYMBOL(neigh_connected_output
);
1527 int neigh_direct_output(struct neighbour
*neigh
, struct sk_buff
*skb
)
1529 return dev_queue_xmit(skb
);
1531 EXPORT_SYMBOL(neigh_direct_output
);
1533 static void neigh_proxy_process(struct timer_list
*t
)
1535 struct neigh_table
*tbl
= from_timer(tbl
, t
, proxy_timer
);
1536 long sched_next
= 0;
1537 unsigned long now
= jiffies
;
1538 struct sk_buff
*skb
, *n
;
1540 spin_lock(&tbl
->proxy_queue
.lock
);
1542 skb_queue_walk_safe(&tbl
->proxy_queue
, skb
, n
) {
1543 long tdif
= NEIGH_CB(skb
)->sched_next
- now
;
1546 struct net_device
*dev
= skb
->dev
;
1548 __skb_unlink(skb
, &tbl
->proxy_queue
);
1549 if (tbl
->proxy_redo
&& netif_running(dev
)) {
1551 tbl
->proxy_redo(skb
);
1558 } else if (!sched_next
|| tdif
< sched_next
)
1561 del_timer(&tbl
->proxy_timer
);
1563 mod_timer(&tbl
->proxy_timer
, jiffies
+ sched_next
);
1564 spin_unlock(&tbl
->proxy_queue
.lock
);
1567 void pneigh_enqueue(struct neigh_table
*tbl
, struct neigh_parms
*p
,
1568 struct sk_buff
*skb
)
1570 unsigned long now
= jiffies
;
1572 unsigned long sched_next
= now
+ (prandom_u32() %
1573 NEIGH_VAR(p
, PROXY_DELAY
));
1575 if (tbl
->proxy_queue
.qlen
> NEIGH_VAR(p
, PROXY_QLEN
)) {
1580 NEIGH_CB(skb
)->sched_next
= sched_next
;
1581 NEIGH_CB(skb
)->flags
|= LOCALLY_ENQUEUED
;
1583 spin_lock(&tbl
->proxy_queue
.lock
);
1584 if (del_timer(&tbl
->proxy_timer
)) {
1585 if (time_before(tbl
->proxy_timer
.expires
, sched_next
))
1586 sched_next
= tbl
->proxy_timer
.expires
;
1590 __skb_queue_tail(&tbl
->proxy_queue
, skb
);
1591 mod_timer(&tbl
->proxy_timer
, sched_next
);
1592 spin_unlock(&tbl
->proxy_queue
.lock
);
1594 EXPORT_SYMBOL(pneigh_enqueue
);
1596 static inline struct neigh_parms
*lookup_neigh_parms(struct neigh_table
*tbl
,
1597 struct net
*net
, int ifindex
)
1599 struct neigh_parms
*p
;
1601 list_for_each_entry(p
, &tbl
->parms_list
, list
) {
1602 if ((p
->dev
&& p
->dev
->ifindex
== ifindex
&& net_eq(neigh_parms_net(p
), net
)) ||
1603 (!p
->dev
&& !ifindex
&& net_eq(net
, &init_net
)))
1610 struct neigh_parms
*neigh_parms_alloc(struct net_device
*dev
,
1611 struct neigh_table
*tbl
)
1613 struct neigh_parms
*p
;
1614 struct net
*net
= dev_net(dev
);
1615 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1617 p
= kmemdup(&tbl
->parms
, sizeof(*p
), GFP_KERNEL
);
1620 refcount_set(&p
->refcnt
, 1);
1622 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
1625 write_pnet(&p
->net
, net
);
1626 p
->sysctl_table
= NULL
;
1628 if (ops
->ndo_neigh_setup
&& ops
->ndo_neigh_setup(dev
, p
)) {
1634 write_lock_bh(&tbl
->lock
);
1635 list_add(&p
->list
, &tbl
->parms
.list
);
1636 write_unlock_bh(&tbl
->lock
);
1638 neigh_parms_data_state_cleanall(p
);
1642 EXPORT_SYMBOL(neigh_parms_alloc
);
1644 static void neigh_rcu_free_parms(struct rcu_head
*head
)
1646 struct neigh_parms
*parms
=
1647 container_of(head
, struct neigh_parms
, rcu_head
);
1649 neigh_parms_put(parms
);
1652 void neigh_parms_release(struct neigh_table
*tbl
, struct neigh_parms
*parms
)
1654 if (!parms
|| parms
== &tbl
->parms
)
1656 write_lock_bh(&tbl
->lock
);
1657 list_del(&parms
->list
);
1659 write_unlock_bh(&tbl
->lock
);
1661 dev_put(parms
->dev
);
1662 call_rcu(&parms
->rcu_head
, neigh_rcu_free_parms
);
1664 EXPORT_SYMBOL(neigh_parms_release
);
1666 static void neigh_parms_destroy(struct neigh_parms
*parms
)
1671 static struct lock_class_key neigh_table_proxy_queue_class
;
1673 static struct neigh_table
*neigh_tables
[NEIGH_NR_TABLES
] __read_mostly
;
1675 void neigh_table_init(int index
, struct neigh_table
*tbl
)
1677 unsigned long now
= jiffies
;
1678 unsigned long phsize
;
1680 INIT_LIST_HEAD(&tbl
->parms_list
);
1681 INIT_LIST_HEAD(&tbl
->gc_list
);
1682 list_add(&tbl
->parms
.list
, &tbl
->parms_list
);
1683 write_pnet(&tbl
->parms
.net
, &init_net
);
1684 refcount_set(&tbl
->parms
.refcnt
, 1);
1685 tbl
->parms
.reachable_time
=
1686 neigh_rand_reach_time(NEIGH_VAR(&tbl
->parms
, BASE_REACHABLE_TIME
));
1688 tbl
->stats
= alloc_percpu(struct neigh_statistics
);
1690 panic("cannot create neighbour cache statistics");
1692 #ifdef CONFIG_PROC_FS
1693 if (!proc_create_seq_data(tbl
->id
, 0, init_net
.proc_net_stat
,
1694 &neigh_stat_seq_ops
, tbl
))
1695 panic("cannot create neighbour proc dir entry");
1698 RCU_INIT_POINTER(tbl
->nht
, neigh_hash_alloc(3));
1700 phsize
= (PNEIGH_HASHMASK
+ 1) * sizeof(struct pneigh_entry
*);
1701 tbl
->phash_buckets
= kzalloc(phsize
, GFP_KERNEL
);
1703 if (!tbl
->nht
|| !tbl
->phash_buckets
)
1704 panic("cannot allocate neighbour cache hashes");
1706 if (!tbl
->entry_size
)
1707 tbl
->entry_size
= ALIGN(offsetof(struct neighbour
, primary_key
) +
1708 tbl
->key_len
, NEIGH_PRIV_ALIGN
);
1710 WARN_ON(tbl
->entry_size
% NEIGH_PRIV_ALIGN
);
1712 rwlock_init(&tbl
->lock
);
1713 INIT_DEFERRABLE_WORK(&tbl
->gc_work
, neigh_periodic_work
);
1714 queue_delayed_work(system_power_efficient_wq
, &tbl
->gc_work
,
1715 tbl
->parms
.reachable_time
);
1716 timer_setup(&tbl
->proxy_timer
, neigh_proxy_process
, 0);
1717 skb_queue_head_init_class(&tbl
->proxy_queue
,
1718 &neigh_table_proxy_queue_class
);
1720 tbl
->last_flush
= now
;
1721 tbl
->last_rand
= now
+ tbl
->parms
.reachable_time
* 20;
1723 neigh_tables
[index
] = tbl
;
1725 EXPORT_SYMBOL(neigh_table_init
);
1727 int neigh_table_clear(int index
, struct neigh_table
*tbl
)
1729 neigh_tables
[index
] = NULL
;
1730 /* It is not clean... Fix it to unload IPv6 module safely */
1731 cancel_delayed_work_sync(&tbl
->gc_work
);
1732 del_timer_sync(&tbl
->proxy_timer
);
1733 pneigh_queue_purge(&tbl
->proxy_queue
);
1734 neigh_ifdown(tbl
, NULL
);
1735 if (atomic_read(&tbl
->entries
))
1736 pr_crit("neighbour leakage\n");
1738 call_rcu(&rcu_dereference_protected(tbl
->nht
, 1)->rcu
,
1739 neigh_hash_free_rcu
);
1742 kfree(tbl
->phash_buckets
);
1743 tbl
->phash_buckets
= NULL
;
1745 remove_proc_entry(tbl
->id
, init_net
.proc_net_stat
);
1747 free_percpu(tbl
->stats
);
1752 EXPORT_SYMBOL(neigh_table_clear
);
1754 static struct neigh_table
*neigh_find_table(int family
)
1756 struct neigh_table
*tbl
= NULL
;
1760 tbl
= neigh_tables
[NEIGH_ARP_TABLE
];
1763 tbl
= neigh_tables
[NEIGH_ND_TABLE
];
1766 tbl
= neigh_tables
[NEIGH_DN_TABLE
];
1773 const struct nla_policy nda_policy
[NDA_MAX
+1] = {
1774 [NDA_UNSPEC
] = { .strict_start_type
= NDA_NH_ID
},
1775 [NDA_DST
] = { .type
= NLA_BINARY
, .len
= MAX_ADDR_LEN
},
1776 [NDA_LLADDR
] = { .type
= NLA_BINARY
, .len
= MAX_ADDR_LEN
},
1777 [NDA_CACHEINFO
] = { .len
= sizeof(struct nda_cacheinfo
) },
1778 [NDA_PROBES
] = { .type
= NLA_U32
},
1779 [NDA_VLAN
] = { .type
= NLA_U16
},
1780 [NDA_PORT
] = { .type
= NLA_U16
},
1781 [NDA_VNI
] = { .type
= NLA_U32
},
1782 [NDA_IFINDEX
] = { .type
= NLA_U32
},
1783 [NDA_MASTER
] = { .type
= NLA_U32
},
1784 [NDA_PROTOCOL
] = { .type
= NLA_U8
},
1785 [NDA_NH_ID
] = { .type
= NLA_U32
},
1788 static int neigh_delete(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1789 struct netlink_ext_ack
*extack
)
1791 struct net
*net
= sock_net(skb
->sk
);
1793 struct nlattr
*dst_attr
;
1794 struct neigh_table
*tbl
;
1795 struct neighbour
*neigh
;
1796 struct net_device
*dev
= NULL
;
1800 if (nlmsg_len(nlh
) < sizeof(*ndm
))
1803 dst_attr
= nlmsg_find_attr(nlh
, sizeof(*ndm
), NDA_DST
);
1805 NL_SET_ERR_MSG(extack
, "Network address not specified");
1809 ndm
= nlmsg_data(nlh
);
1810 if (ndm
->ndm_ifindex
) {
1811 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
1818 tbl
= neigh_find_table(ndm
->ndm_family
);
1820 return -EAFNOSUPPORT
;
1822 if (nla_len(dst_attr
) < (int)tbl
->key_len
) {
1823 NL_SET_ERR_MSG(extack
, "Invalid network address");
1827 if (ndm
->ndm_flags
& NTF_PROXY
) {
1828 err
= pneigh_delete(tbl
, net
, nla_data(dst_attr
), dev
);
1835 neigh
= neigh_lookup(tbl
, nla_data(dst_attr
), dev
);
1836 if (neigh
== NULL
) {
1841 err
= __neigh_update(neigh
, NULL
, NUD_FAILED
,
1842 NEIGH_UPDATE_F_OVERRIDE
| NEIGH_UPDATE_F_ADMIN
,
1843 NETLINK_CB(skb
).portid
, extack
);
1844 write_lock_bh(&tbl
->lock
);
1845 neigh_release(neigh
);
1846 neigh_remove_one(neigh
, tbl
);
1847 write_unlock_bh(&tbl
->lock
);
1853 static int neigh_add(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1854 struct netlink_ext_ack
*extack
)
1856 int flags
= NEIGH_UPDATE_F_ADMIN
| NEIGH_UPDATE_F_OVERRIDE
|
1857 NEIGH_UPDATE_F_OVERRIDE_ISROUTER
;
1858 struct net
*net
= sock_net(skb
->sk
);
1860 struct nlattr
*tb
[NDA_MAX
+1];
1861 struct neigh_table
*tbl
;
1862 struct net_device
*dev
= NULL
;
1863 struct neighbour
*neigh
;
1869 err
= nlmsg_parse_deprecated(nlh
, sizeof(*ndm
), tb
, NDA_MAX
,
1870 nda_policy
, extack
);
1876 NL_SET_ERR_MSG(extack
, "Network address not specified");
1880 ndm
= nlmsg_data(nlh
);
1881 if (ndm
->ndm_ifindex
) {
1882 dev
= __dev_get_by_index(net
, ndm
->ndm_ifindex
);
1888 if (tb
[NDA_LLADDR
] && nla_len(tb
[NDA_LLADDR
]) < dev
->addr_len
) {
1889 NL_SET_ERR_MSG(extack
, "Invalid link address");
1894 tbl
= neigh_find_table(ndm
->ndm_family
);
1896 return -EAFNOSUPPORT
;
1898 if (nla_len(tb
[NDA_DST
]) < (int)tbl
->key_len
) {
1899 NL_SET_ERR_MSG(extack
, "Invalid network address");
1903 dst
= nla_data(tb
[NDA_DST
]);
1904 lladdr
= tb
[NDA_LLADDR
] ? nla_data(tb
[NDA_LLADDR
]) : NULL
;
1906 if (tb
[NDA_PROTOCOL
])
1907 protocol
= nla_get_u8(tb
[NDA_PROTOCOL
]);
1909 if (ndm
->ndm_flags
& NTF_PROXY
) {
1910 struct pneigh_entry
*pn
;
1913 pn
= pneigh_lookup(tbl
, net
, dst
, dev
, 1);
1915 pn
->flags
= ndm
->ndm_flags
;
1917 pn
->protocol
= protocol
;
1924 NL_SET_ERR_MSG(extack
, "Device not specified");
1928 if (tbl
->allow_add
&& !tbl
->allow_add(dev
, extack
)) {
1933 neigh
= neigh_lookup(tbl
, dst
, dev
);
1934 if (neigh
== NULL
) {
1935 bool exempt_from_gc
;
1937 if (!(nlh
->nlmsg_flags
& NLM_F_CREATE
)) {
1942 exempt_from_gc
= ndm
->ndm_state
& NUD_PERMANENT
||
1943 ndm
->ndm_flags
& NTF_EXT_LEARNED
;
1944 neigh
= ___neigh_create(tbl
, dst
, dev
, exempt_from_gc
, true);
1945 if (IS_ERR(neigh
)) {
1946 err
= PTR_ERR(neigh
);
1950 if (nlh
->nlmsg_flags
& NLM_F_EXCL
) {
1952 neigh_release(neigh
);
1956 if (!(nlh
->nlmsg_flags
& NLM_F_REPLACE
))
1957 flags
&= ~(NEIGH_UPDATE_F_OVERRIDE
|
1958 NEIGH_UPDATE_F_OVERRIDE_ISROUTER
);
1962 neigh
->protocol
= protocol
;
1964 if (ndm
->ndm_flags
& NTF_EXT_LEARNED
)
1965 flags
|= NEIGH_UPDATE_F_EXT_LEARNED
;
1967 if (ndm
->ndm_flags
& NTF_ROUTER
)
1968 flags
|= NEIGH_UPDATE_F_ISROUTER
;
1970 if (ndm
->ndm_flags
& NTF_USE
) {
1971 neigh_event_send(neigh
, NULL
);
1974 err
= __neigh_update(neigh
, lladdr
, ndm
->ndm_state
, flags
,
1975 NETLINK_CB(skb
).portid
, extack
);
1977 neigh_release(neigh
);
1983 static int neightbl_fill_parms(struct sk_buff
*skb
, struct neigh_parms
*parms
)
1985 struct nlattr
*nest
;
1987 nest
= nla_nest_start_noflag(skb
, NDTA_PARMS
);
1992 nla_put_u32(skb
, NDTPA_IFINDEX
, parms
->dev
->ifindex
)) ||
1993 nla_put_u32(skb
, NDTPA_REFCNT
, refcount_read(&parms
->refcnt
)) ||
1994 nla_put_u32(skb
, NDTPA_QUEUE_LENBYTES
,
1995 NEIGH_VAR(parms
, QUEUE_LEN_BYTES
)) ||
1996 /* approximative value for deprecated QUEUE_LEN (in packets) */
1997 nla_put_u32(skb
, NDTPA_QUEUE_LEN
,
1998 NEIGH_VAR(parms
, QUEUE_LEN_BYTES
) / SKB_TRUESIZE(ETH_FRAME_LEN
)) ||
1999 nla_put_u32(skb
, NDTPA_PROXY_QLEN
, NEIGH_VAR(parms
, PROXY_QLEN
)) ||
2000 nla_put_u32(skb
, NDTPA_APP_PROBES
, NEIGH_VAR(parms
, APP_PROBES
)) ||
2001 nla_put_u32(skb
, NDTPA_UCAST_PROBES
,
2002 NEIGH_VAR(parms
, UCAST_PROBES
)) ||
2003 nla_put_u32(skb
, NDTPA_MCAST_PROBES
,
2004 NEIGH_VAR(parms
, MCAST_PROBES
)) ||
2005 nla_put_u32(skb
, NDTPA_MCAST_REPROBES
,
2006 NEIGH_VAR(parms
, MCAST_REPROBES
)) ||
2007 nla_put_msecs(skb
, NDTPA_REACHABLE_TIME
, parms
->reachable_time
,
2009 nla_put_msecs(skb
, NDTPA_BASE_REACHABLE_TIME
,
2010 NEIGH_VAR(parms
, BASE_REACHABLE_TIME
), NDTPA_PAD
) ||
2011 nla_put_msecs(skb
, NDTPA_GC_STALETIME
,
2012 NEIGH_VAR(parms
, GC_STALETIME
), NDTPA_PAD
) ||
2013 nla_put_msecs(skb
, NDTPA_DELAY_PROBE_TIME
,
2014 NEIGH_VAR(parms
, DELAY_PROBE_TIME
), NDTPA_PAD
) ||
2015 nla_put_msecs(skb
, NDTPA_RETRANS_TIME
,
2016 NEIGH_VAR(parms
, RETRANS_TIME
), NDTPA_PAD
) ||
2017 nla_put_msecs(skb
, NDTPA_ANYCAST_DELAY
,
2018 NEIGH_VAR(parms
, ANYCAST_DELAY
), NDTPA_PAD
) ||
2019 nla_put_msecs(skb
, NDTPA_PROXY_DELAY
,
2020 NEIGH_VAR(parms
, PROXY_DELAY
), NDTPA_PAD
) ||
2021 nla_put_msecs(skb
, NDTPA_LOCKTIME
,
2022 NEIGH_VAR(parms
, LOCKTIME
), NDTPA_PAD
))
2023 goto nla_put_failure
;
2024 return nla_nest_end(skb
, nest
);
2027 nla_nest_cancel(skb
, nest
);
2031 static int neightbl_fill_info(struct sk_buff
*skb
, struct neigh_table
*tbl
,
2032 u32 pid
, u32 seq
, int type
, int flags
)
2034 struct nlmsghdr
*nlh
;
2035 struct ndtmsg
*ndtmsg
;
2037 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
2041 ndtmsg
= nlmsg_data(nlh
);
2043 read_lock_bh(&tbl
->lock
);
2044 ndtmsg
->ndtm_family
= tbl
->family
;
2045 ndtmsg
->ndtm_pad1
= 0;
2046 ndtmsg
->ndtm_pad2
= 0;
2048 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) ||
2049 nla_put_msecs(skb
, NDTA_GC_INTERVAL
, tbl
->gc_interval
, NDTA_PAD
) ||
2050 nla_put_u32(skb
, NDTA_THRESH1
, tbl
->gc_thresh1
) ||
2051 nla_put_u32(skb
, NDTA_THRESH2
, tbl
->gc_thresh2
) ||
2052 nla_put_u32(skb
, NDTA_THRESH3
, tbl
->gc_thresh3
))
2053 goto nla_put_failure
;
2055 unsigned long now
= jiffies
;
2056 long flush_delta
= now
- tbl
->last_flush
;
2057 long rand_delta
= now
- tbl
->last_rand
;
2058 struct neigh_hash_table
*nht
;
2059 struct ndt_config ndc
= {
2060 .ndtc_key_len
= tbl
->key_len
,
2061 .ndtc_entry_size
= tbl
->entry_size
,
2062 .ndtc_entries
= atomic_read(&tbl
->entries
),
2063 .ndtc_last_flush
= jiffies_to_msecs(flush_delta
),
2064 .ndtc_last_rand
= jiffies_to_msecs(rand_delta
),
2065 .ndtc_proxy_qlen
= tbl
->proxy_queue
.qlen
,
2069 nht
= rcu_dereference_bh(tbl
->nht
);
2070 ndc
.ndtc_hash_rnd
= nht
->hash_rnd
[0];
2071 ndc
.ndtc_hash_mask
= ((1 << nht
->hash_shift
) - 1);
2072 rcu_read_unlock_bh();
2074 if (nla_put(skb
, NDTA_CONFIG
, sizeof(ndc
), &ndc
))
2075 goto nla_put_failure
;
2080 struct ndt_stats ndst
;
2082 memset(&ndst
, 0, sizeof(ndst
));
2084 for_each_possible_cpu(cpu
) {
2085 struct neigh_statistics
*st
;
2087 st
= per_cpu_ptr(tbl
->stats
, cpu
);
2088 ndst
.ndts_allocs
+= st
->allocs
;
2089 ndst
.ndts_destroys
+= st
->destroys
;
2090 ndst
.ndts_hash_grows
+= st
->hash_grows
;
2091 ndst
.ndts_res_failed
+= st
->res_failed
;
2092 ndst
.ndts_lookups
+= st
->lookups
;
2093 ndst
.ndts_hits
+= st
->hits
;
2094 ndst
.ndts_rcv_probes_mcast
+= st
->rcv_probes_mcast
;
2095 ndst
.ndts_rcv_probes_ucast
+= st
->rcv_probes_ucast
;
2096 ndst
.ndts_periodic_gc_runs
+= st
->periodic_gc_runs
;
2097 ndst
.ndts_forced_gc_runs
+= st
->forced_gc_runs
;
2098 ndst
.ndts_table_fulls
+= st
->table_fulls
;
2101 if (nla_put_64bit(skb
, NDTA_STATS
, sizeof(ndst
), &ndst
,
2103 goto nla_put_failure
;
2106 BUG_ON(tbl
->parms
.dev
);
2107 if (neightbl_fill_parms(skb
, &tbl
->parms
) < 0)
2108 goto nla_put_failure
;
2110 read_unlock_bh(&tbl
->lock
);
2111 nlmsg_end(skb
, nlh
);
2115 read_unlock_bh(&tbl
->lock
);
2116 nlmsg_cancel(skb
, nlh
);
2120 static int neightbl_fill_param_info(struct sk_buff
*skb
,
2121 struct neigh_table
*tbl
,
2122 struct neigh_parms
*parms
,
2123 u32 pid
, u32 seq
, int type
,
2126 struct ndtmsg
*ndtmsg
;
2127 struct nlmsghdr
*nlh
;
2129 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndtmsg
), flags
);
2133 ndtmsg
= nlmsg_data(nlh
);
2135 read_lock_bh(&tbl
->lock
);
2136 ndtmsg
->ndtm_family
= tbl
->family
;
2137 ndtmsg
->ndtm_pad1
= 0;
2138 ndtmsg
->ndtm_pad2
= 0;
2140 if (nla_put_string(skb
, NDTA_NAME
, tbl
->id
) < 0 ||
2141 neightbl_fill_parms(skb
, parms
) < 0)
2144 read_unlock_bh(&tbl
->lock
);
2145 nlmsg_end(skb
, nlh
);
2148 read_unlock_bh(&tbl
->lock
);
2149 nlmsg_cancel(skb
, nlh
);
2153 static const struct nla_policy nl_neightbl_policy
[NDTA_MAX
+1] = {
2154 [NDTA_NAME
] = { .type
= NLA_STRING
},
2155 [NDTA_THRESH1
] = { .type
= NLA_U32
},
2156 [NDTA_THRESH2
] = { .type
= NLA_U32
},
2157 [NDTA_THRESH3
] = { .type
= NLA_U32
},
2158 [NDTA_GC_INTERVAL
] = { .type
= NLA_U64
},
2159 [NDTA_PARMS
] = { .type
= NLA_NESTED
},
2162 static const struct nla_policy nl_ntbl_parm_policy
[NDTPA_MAX
+1] = {
2163 [NDTPA_IFINDEX
] = { .type
= NLA_U32
},
2164 [NDTPA_QUEUE_LEN
] = { .type
= NLA_U32
},
2165 [NDTPA_PROXY_QLEN
] = { .type
= NLA_U32
},
2166 [NDTPA_APP_PROBES
] = { .type
= NLA_U32
},
2167 [NDTPA_UCAST_PROBES
] = { .type
= NLA_U32
},
2168 [NDTPA_MCAST_PROBES
] = { .type
= NLA_U32
},
2169 [NDTPA_MCAST_REPROBES
] = { .type
= NLA_U32
},
2170 [NDTPA_BASE_REACHABLE_TIME
] = { .type
= NLA_U64
},
2171 [NDTPA_GC_STALETIME
] = { .type
= NLA_U64
},
2172 [NDTPA_DELAY_PROBE_TIME
] = { .type
= NLA_U64
},
2173 [NDTPA_RETRANS_TIME
] = { .type
= NLA_U64
},
2174 [NDTPA_ANYCAST_DELAY
] = { .type
= NLA_U64
},
2175 [NDTPA_PROXY_DELAY
] = { .type
= NLA_U64
},
2176 [NDTPA_LOCKTIME
] = { .type
= NLA_U64
},
2179 static int neightbl_set(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2180 struct netlink_ext_ack
*extack
)
2182 struct net
*net
= sock_net(skb
->sk
);
2183 struct neigh_table
*tbl
;
2184 struct ndtmsg
*ndtmsg
;
2185 struct nlattr
*tb
[NDTA_MAX
+1];
2189 err
= nlmsg_parse_deprecated(nlh
, sizeof(*ndtmsg
), tb
, NDTA_MAX
,
2190 nl_neightbl_policy
, extack
);
2194 if (tb
[NDTA_NAME
] == NULL
) {
2199 ndtmsg
= nlmsg_data(nlh
);
2201 for (tidx
= 0; tidx
< NEIGH_NR_TABLES
; tidx
++) {
2202 tbl
= neigh_tables
[tidx
];
2205 if (ndtmsg
->ndtm_family
&& tbl
->family
!= ndtmsg
->ndtm_family
)
2207 if (nla_strcmp(tb
[NDTA_NAME
], tbl
->id
) == 0) {
2217 * We acquire tbl->lock to be nice to the periodic timers and
2218 * make sure they always see a consistent set of values.
2220 write_lock_bh(&tbl
->lock
);
2222 if (tb
[NDTA_PARMS
]) {
2223 struct nlattr
*tbp
[NDTPA_MAX
+1];
2224 struct neigh_parms
*p
;
2227 err
= nla_parse_nested_deprecated(tbp
, NDTPA_MAX
,
2229 nl_ntbl_parm_policy
, extack
);
2231 goto errout_tbl_lock
;
2233 if (tbp
[NDTPA_IFINDEX
])
2234 ifindex
= nla_get_u32(tbp
[NDTPA_IFINDEX
]);
2236 p
= lookup_neigh_parms(tbl
, net
, ifindex
);
2239 goto errout_tbl_lock
;
2242 for (i
= 1; i
<= NDTPA_MAX
; i
++) {
2247 case NDTPA_QUEUE_LEN
:
2248 NEIGH_VAR_SET(p
, QUEUE_LEN_BYTES
,
2249 nla_get_u32(tbp
[i
]) *
2250 SKB_TRUESIZE(ETH_FRAME_LEN
));
2252 case NDTPA_QUEUE_LENBYTES
:
2253 NEIGH_VAR_SET(p
, QUEUE_LEN_BYTES
,
2254 nla_get_u32(tbp
[i
]));
2256 case NDTPA_PROXY_QLEN
:
2257 NEIGH_VAR_SET(p
, PROXY_QLEN
,
2258 nla_get_u32(tbp
[i
]));
2260 case NDTPA_APP_PROBES
:
2261 NEIGH_VAR_SET(p
, APP_PROBES
,
2262 nla_get_u32(tbp
[i
]));
2264 case NDTPA_UCAST_PROBES
:
2265 NEIGH_VAR_SET(p
, UCAST_PROBES
,
2266 nla_get_u32(tbp
[i
]));
2268 case NDTPA_MCAST_PROBES
:
2269 NEIGH_VAR_SET(p
, MCAST_PROBES
,
2270 nla_get_u32(tbp
[i
]));
2272 case NDTPA_MCAST_REPROBES
:
2273 NEIGH_VAR_SET(p
, MCAST_REPROBES
,
2274 nla_get_u32(tbp
[i
]));
2276 case NDTPA_BASE_REACHABLE_TIME
:
2277 NEIGH_VAR_SET(p
, BASE_REACHABLE_TIME
,
2278 nla_get_msecs(tbp
[i
]));
2279 /* update reachable_time as well, otherwise, the change will
2280 * only be effective after the next time neigh_periodic_work
2281 * decides to recompute it (can be multiple minutes)
2284 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
2286 case NDTPA_GC_STALETIME
:
2287 NEIGH_VAR_SET(p
, GC_STALETIME
,
2288 nla_get_msecs(tbp
[i
]));
2290 case NDTPA_DELAY_PROBE_TIME
:
2291 NEIGH_VAR_SET(p
, DELAY_PROBE_TIME
,
2292 nla_get_msecs(tbp
[i
]));
2293 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE
, p
);
2295 case NDTPA_RETRANS_TIME
:
2296 NEIGH_VAR_SET(p
, RETRANS_TIME
,
2297 nla_get_msecs(tbp
[i
]));
2299 case NDTPA_ANYCAST_DELAY
:
2300 NEIGH_VAR_SET(p
, ANYCAST_DELAY
,
2301 nla_get_msecs(tbp
[i
]));
2303 case NDTPA_PROXY_DELAY
:
2304 NEIGH_VAR_SET(p
, PROXY_DELAY
,
2305 nla_get_msecs(tbp
[i
]));
2307 case NDTPA_LOCKTIME
:
2308 NEIGH_VAR_SET(p
, LOCKTIME
,
2309 nla_get_msecs(tbp
[i
]));
2316 if ((tb
[NDTA_THRESH1
] || tb
[NDTA_THRESH2
] ||
2317 tb
[NDTA_THRESH3
] || tb
[NDTA_GC_INTERVAL
]) &&
2318 !net_eq(net
, &init_net
))
2319 goto errout_tbl_lock
;
2321 if (tb
[NDTA_THRESH1
])
2322 tbl
->gc_thresh1
= nla_get_u32(tb
[NDTA_THRESH1
]);
2324 if (tb
[NDTA_THRESH2
])
2325 tbl
->gc_thresh2
= nla_get_u32(tb
[NDTA_THRESH2
]);
2327 if (tb
[NDTA_THRESH3
])
2328 tbl
->gc_thresh3
= nla_get_u32(tb
[NDTA_THRESH3
]);
2330 if (tb
[NDTA_GC_INTERVAL
])
2331 tbl
->gc_interval
= nla_get_msecs(tb
[NDTA_GC_INTERVAL
]);
2336 write_unlock_bh(&tbl
->lock
);
2341 static int neightbl_valid_dump_info(const struct nlmsghdr
*nlh
,
2342 struct netlink_ext_ack
*extack
)
2344 struct ndtmsg
*ndtm
;
2346 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*ndtm
))) {
2347 NL_SET_ERR_MSG(extack
, "Invalid header for neighbor table dump request");
2351 ndtm
= nlmsg_data(nlh
);
2352 if (ndtm
->ndtm_pad1
|| ndtm
->ndtm_pad2
) {
2353 NL_SET_ERR_MSG(extack
, "Invalid values in header for neighbor table dump request");
2357 if (nlmsg_attrlen(nlh
, sizeof(*ndtm
))) {
2358 NL_SET_ERR_MSG(extack
, "Invalid data after header in neighbor table dump request");
2365 static int neightbl_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2367 const struct nlmsghdr
*nlh
= cb
->nlh
;
2368 struct net
*net
= sock_net(skb
->sk
);
2369 int family
, tidx
, nidx
= 0;
2370 int tbl_skip
= cb
->args
[0];
2371 int neigh_skip
= cb
->args
[1];
2372 struct neigh_table
*tbl
;
2374 if (cb
->strict_check
) {
2375 int err
= neightbl_valid_dump_info(nlh
, cb
->extack
);
2381 family
= ((struct rtgenmsg
*)nlmsg_data(nlh
))->rtgen_family
;
2383 for (tidx
= 0; tidx
< NEIGH_NR_TABLES
; tidx
++) {
2384 struct neigh_parms
*p
;
2386 tbl
= neigh_tables
[tidx
];
2390 if (tidx
< tbl_skip
|| (family
&& tbl
->family
!= family
))
2393 if (neightbl_fill_info(skb
, tbl
, NETLINK_CB(cb
->skb
).portid
,
2394 nlh
->nlmsg_seq
, RTM_NEWNEIGHTBL
,
2399 p
= list_next_entry(&tbl
->parms
, list
);
2400 list_for_each_entry_from(p
, &tbl
->parms_list
, list
) {
2401 if (!net_eq(neigh_parms_net(p
), net
))
2404 if (nidx
< neigh_skip
)
2407 if (neightbl_fill_param_info(skb
, tbl
, p
,
2408 NETLINK_CB(cb
->skb
).portid
,
2426 static int neigh_fill_info(struct sk_buff
*skb
, struct neighbour
*neigh
,
2427 u32 pid
, u32 seq
, int type
, unsigned int flags
)
2429 unsigned long now
= jiffies
;
2430 struct nda_cacheinfo ci
;
2431 struct nlmsghdr
*nlh
;
2434 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
2438 ndm
= nlmsg_data(nlh
);
2439 ndm
->ndm_family
= neigh
->ops
->family
;
2442 ndm
->ndm_flags
= neigh
->flags
;
2443 ndm
->ndm_type
= neigh
->type
;
2444 ndm
->ndm_ifindex
= neigh
->dev
->ifindex
;
2446 if (nla_put(skb
, NDA_DST
, neigh
->tbl
->key_len
, neigh
->primary_key
))
2447 goto nla_put_failure
;
2449 read_lock_bh(&neigh
->lock
);
2450 ndm
->ndm_state
= neigh
->nud_state
;
2451 if (neigh
->nud_state
& NUD_VALID
) {
2452 char haddr
[MAX_ADDR_LEN
];
2454 neigh_ha_snapshot(haddr
, neigh
, neigh
->dev
);
2455 if (nla_put(skb
, NDA_LLADDR
, neigh
->dev
->addr_len
, haddr
) < 0) {
2456 read_unlock_bh(&neigh
->lock
);
2457 goto nla_put_failure
;
2461 ci
.ndm_used
= jiffies_to_clock_t(now
- neigh
->used
);
2462 ci
.ndm_confirmed
= jiffies_to_clock_t(now
- neigh
->confirmed
);
2463 ci
.ndm_updated
= jiffies_to_clock_t(now
- neigh
->updated
);
2464 ci
.ndm_refcnt
= refcount_read(&neigh
->refcnt
) - 1;
2465 read_unlock_bh(&neigh
->lock
);
2467 if (nla_put_u32(skb
, NDA_PROBES
, atomic_read(&neigh
->probes
)) ||
2468 nla_put(skb
, NDA_CACHEINFO
, sizeof(ci
), &ci
))
2469 goto nla_put_failure
;
2471 if (neigh
->protocol
&& nla_put_u8(skb
, NDA_PROTOCOL
, neigh
->protocol
))
2472 goto nla_put_failure
;
2474 nlmsg_end(skb
, nlh
);
2478 nlmsg_cancel(skb
, nlh
);
2482 static int pneigh_fill_info(struct sk_buff
*skb
, struct pneigh_entry
*pn
,
2483 u32 pid
, u32 seq
, int type
, unsigned int flags
,
2484 struct neigh_table
*tbl
)
2486 struct nlmsghdr
*nlh
;
2489 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*ndm
), flags
);
2493 ndm
= nlmsg_data(nlh
);
2494 ndm
->ndm_family
= tbl
->family
;
2497 ndm
->ndm_flags
= pn
->flags
| NTF_PROXY
;
2498 ndm
->ndm_type
= RTN_UNICAST
;
2499 ndm
->ndm_ifindex
= pn
->dev
? pn
->dev
->ifindex
: 0;
2500 ndm
->ndm_state
= NUD_NONE
;
2502 if (nla_put(skb
, NDA_DST
, tbl
->key_len
, pn
->key
))
2503 goto nla_put_failure
;
2505 if (pn
->protocol
&& nla_put_u8(skb
, NDA_PROTOCOL
, pn
->protocol
))
2506 goto nla_put_failure
;
2508 nlmsg_end(skb
, nlh
);
2512 nlmsg_cancel(skb
, nlh
);
2516 static void neigh_update_notify(struct neighbour
*neigh
, u32 nlmsg_pid
)
2518 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE
, neigh
);
2519 __neigh_notify(neigh
, RTM_NEWNEIGH
, 0, nlmsg_pid
);
2522 static bool neigh_master_filtered(struct net_device
*dev
, int master_idx
)
2524 struct net_device
*master
;
2529 master
= dev
? netdev_master_upper_dev_get(dev
) : NULL
;
2530 if (!master
|| master
->ifindex
!= master_idx
)
2536 static bool neigh_ifindex_filtered(struct net_device
*dev
, int filter_idx
)
2538 if (filter_idx
&& (!dev
|| dev
->ifindex
!= filter_idx
))
2544 struct neigh_dump_filter
{
2549 static int neigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2550 struct netlink_callback
*cb
,
2551 struct neigh_dump_filter
*filter
)
2553 struct net
*net
= sock_net(skb
->sk
);
2554 struct neighbour
*n
;
2555 int rc
, h
, s_h
= cb
->args
[1];
2556 int idx
, s_idx
= idx
= cb
->args
[2];
2557 struct neigh_hash_table
*nht
;
2558 unsigned int flags
= NLM_F_MULTI
;
2560 if (filter
->dev_idx
|| filter
->master_idx
)
2561 flags
|= NLM_F_DUMP_FILTERED
;
2564 nht
= rcu_dereference_bh(tbl
->nht
);
2566 for (h
= s_h
; h
< (1 << nht
->hash_shift
); h
++) {
2569 for (n
= rcu_dereference_bh(nht
->hash_buckets
[h
]), idx
= 0;
2571 n
= rcu_dereference_bh(n
->next
)) {
2572 if (idx
< s_idx
|| !net_eq(dev_net(n
->dev
), net
))
2574 if (neigh_ifindex_filtered(n
->dev
, filter
->dev_idx
) ||
2575 neigh_master_filtered(n
->dev
, filter
->master_idx
))
2577 if (neigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).portid
,
2590 rcu_read_unlock_bh();
2596 static int pneigh_dump_table(struct neigh_table
*tbl
, struct sk_buff
*skb
,
2597 struct netlink_callback
*cb
,
2598 struct neigh_dump_filter
*filter
)
2600 struct pneigh_entry
*n
;
2601 struct net
*net
= sock_net(skb
->sk
);
2602 int rc
, h
, s_h
= cb
->args
[3];
2603 int idx
, s_idx
= idx
= cb
->args
[4];
2604 unsigned int flags
= NLM_F_MULTI
;
2606 if (filter
->dev_idx
|| filter
->master_idx
)
2607 flags
|= NLM_F_DUMP_FILTERED
;
2609 read_lock_bh(&tbl
->lock
);
2611 for (h
= s_h
; h
<= PNEIGH_HASHMASK
; h
++) {
2614 for (n
= tbl
->phash_buckets
[h
], idx
= 0; n
; n
= n
->next
) {
2615 if (idx
< s_idx
|| pneigh_net(n
) != net
)
2617 if (neigh_ifindex_filtered(n
->dev
, filter
->dev_idx
) ||
2618 neigh_master_filtered(n
->dev
, filter
->master_idx
))
2620 if (pneigh_fill_info(skb
, n
, NETLINK_CB(cb
->skb
).portid
,
2622 RTM_NEWNEIGH
, flags
, tbl
) < 0) {
2623 read_unlock_bh(&tbl
->lock
);
2632 read_unlock_bh(&tbl
->lock
);
2641 static int neigh_valid_dump_req(const struct nlmsghdr
*nlh
,
2643 struct neigh_dump_filter
*filter
,
2644 struct netlink_ext_ack
*extack
)
2646 struct nlattr
*tb
[NDA_MAX
+ 1];
2652 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*ndm
))) {
2653 NL_SET_ERR_MSG(extack
, "Invalid header for neighbor dump request");
2657 ndm
= nlmsg_data(nlh
);
2658 if (ndm
->ndm_pad1
|| ndm
->ndm_pad2
|| ndm
->ndm_ifindex
||
2659 ndm
->ndm_state
|| ndm
->ndm_type
) {
2660 NL_SET_ERR_MSG(extack
, "Invalid values in header for neighbor dump request");
2664 if (ndm
->ndm_flags
& ~NTF_PROXY
) {
2665 NL_SET_ERR_MSG(extack
, "Invalid flags in header for neighbor dump request");
2669 err
= nlmsg_parse_deprecated_strict(nlh
, sizeof(struct ndmsg
),
2670 tb
, NDA_MAX
, nda_policy
,
2673 err
= nlmsg_parse_deprecated(nlh
, sizeof(struct ndmsg
), tb
,
2674 NDA_MAX
, nda_policy
, extack
);
2679 for (i
= 0; i
<= NDA_MAX
; ++i
) {
2683 /* all new attributes should require strict_check */
2686 filter
->dev_idx
= nla_get_u32(tb
[i
]);
2689 filter
->master_idx
= nla_get_u32(tb
[i
]);
2693 NL_SET_ERR_MSG(extack
, "Unsupported attribute in neighbor dump request");
2702 static int neigh_dump_info(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2704 const struct nlmsghdr
*nlh
= cb
->nlh
;
2705 struct neigh_dump_filter filter
= {};
2706 struct neigh_table
*tbl
;
2711 family
= ((struct rtgenmsg
*)nlmsg_data(nlh
))->rtgen_family
;
2713 /* check for full ndmsg structure presence, family member is
2714 * the same for both structures
2716 if (nlmsg_len(nlh
) >= sizeof(struct ndmsg
) &&
2717 ((struct ndmsg
*)nlmsg_data(nlh
))->ndm_flags
== NTF_PROXY
)
2720 err
= neigh_valid_dump_req(nlh
, cb
->strict_check
, &filter
, cb
->extack
);
2721 if (err
< 0 && cb
->strict_check
)
2726 for (t
= 0; t
< NEIGH_NR_TABLES
; t
++) {
2727 tbl
= neigh_tables
[t
];
2731 if (t
< s_t
|| (family
&& tbl
->family
!= family
))
2734 memset(&cb
->args
[1], 0, sizeof(cb
->args
) -
2735 sizeof(cb
->args
[0]));
2737 err
= pneigh_dump_table(tbl
, skb
, cb
, &filter
);
2739 err
= neigh_dump_table(tbl
, skb
, cb
, &filter
);
2748 static int neigh_valid_get_req(const struct nlmsghdr
*nlh
,
2749 struct neigh_table
**tbl
,
2750 void **dst
, int *dev_idx
, u8
*ndm_flags
,
2751 struct netlink_ext_ack
*extack
)
2753 struct nlattr
*tb
[NDA_MAX
+ 1];
2757 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*ndm
))) {
2758 NL_SET_ERR_MSG(extack
, "Invalid header for neighbor get request");
2762 ndm
= nlmsg_data(nlh
);
2763 if (ndm
->ndm_pad1
|| ndm
->ndm_pad2
|| ndm
->ndm_state
||
2765 NL_SET_ERR_MSG(extack
, "Invalid values in header for neighbor get request");
2769 if (ndm
->ndm_flags
& ~NTF_PROXY
) {
2770 NL_SET_ERR_MSG(extack
, "Invalid flags in header for neighbor get request");
2774 err
= nlmsg_parse_deprecated_strict(nlh
, sizeof(struct ndmsg
), tb
,
2775 NDA_MAX
, nda_policy
, extack
);
2779 *ndm_flags
= ndm
->ndm_flags
;
2780 *dev_idx
= ndm
->ndm_ifindex
;
2781 *tbl
= neigh_find_table(ndm
->ndm_family
);
2783 NL_SET_ERR_MSG(extack
, "Unsupported family in header for neighbor get request");
2784 return -EAFNOSUPPORT
;
2787 for (i
= 0; i
<= NDA_MAX
; ++i
) {
2793 if (nla_len(tb
[i
]) != (int)(*tbl
)->key_len
) {
2794 NL_SET_ERR_MSG(extack
, "Invalid network address in neighbor get request");
2797 *dst
= nla_data(tb
[i
]);
2800 NL_SET_ERR_MSG(extack
, "Unsupported attribute in neighbor get request");
2808 static inline size_t neigh_nlmsg_size(void)
2810 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2811 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2812 + nla_total_size(MAX_ADDR_LEN
) /* NDA_LLADDR */
2813 + nla_total_size(sizeof(struct nda_cacheinfo
))
2814 + nla_total_size(4) /* NDA_PROBES */
2815 + nla_total_size(1); /* NDA_PROTOCOL */
2818 static int neigh_get_reply(struct net
*net
, struct neighbour
*neigh
,
2821 struct sk_buff
*skb
;
2824 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL
);
2828 err
= neigh_fill_info(skb
, neigh
, pid
, seq
, RTM_NEWNEIGH
, 0);
2834 err
= rtnl_unicast(skb
, net
, pid
);
2839 static inline size_t pneigh_nlmsg_size(void)
2841 return NLMSG_ALIGN(sizeof(struct ndmsg
))
2842 + nla_total_size(MAX_ADDR_LEN
) /* NDA_DST */
2843 + nla_total_size(1); /* NDA_PROTOCOL */
2846 static int pneigh_get_reply(struct net
*net
, struct pneigh_entry
*neigh
,
2847 u32 pid
, u32 seq
, struct neigh_table
*tbl
)
2849 struct sk_buff
*skb
;
2852 skb
= nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL
);
2856 err
= pneigh_fill_info(skb
, neigh
, pid
, seq
, RTM_NEWNEIGH
, 0, tbl
);
2862 err
= rtnl_unicast(skb
, net
, pid
);
2867 static int neigh_get(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
,
2868 struct netlink_ext_ack
*extack
)
2870 struct net
*net
= sock_net(in_skb
->sk
);
2871 struct net_device
*dev
= NULL
;
2872 struct neigh_table
*tbl
= NULL
;
2873 struct neighbour
*neigh
;
2879 err
= neigh_valid_get_req(nlh
, &tbl
, &dst
, &dev_idx
, &ndm_flags
,
2885 dev
= __dev_get_by_index(net
, dev_idx
);
2887 NL_SET_ERR_MSG(extack
, "Unknown device ifindex");
2893 NL_SET_ERR_MSG(extack
, "Network address not specified");
2897 if (ndm_flags
& NTF_PROXY
) {
2898 struct pneigh_entry
*pn
;
2900 pn
= pneigh_lookup(tbl
, net
, dst
, dev
, 0);
2902 NL_SET_ERR_MSG(extack
, "Proxy neighbour entry not found");
2905 return pneigh_get_reply(net
, pn
, NETLINK_CB(in_skb
).portid
,
2906 nlh
->nlmsg_seq
, tbl
);
2910 NL_SET_ERR_MSG(extack
, "No device specified");
2914 neigh
= neigh_lookup(tbl
, dst
, dev
);
2916 NL_SET_ERR_MSG(extack
, "Neighbour entry not found");
2920 err
= neigh_get_reply(net
, neigh
, NETLINK_CB(in_skb
).portid
,
2923 neigh_release(neigh
);
2928 void neigh_for_each(struct neigh_table
*tbl
, void (*cb
)(struct neighbour
*, void *), void *cookie
)
2931 struct neigh_hash_table
*nht
;
2934 nht
= rcu_dereference_bh(tbl
->nht
);
2936 read_lock(&tbl
->lock
); /* avoid resizes */
2937 for (chain
= 0; chain
< (1 << nht
->hash_shift
); chain
++) {
2938 struct neighbour
*n
;
2940 for (n
= rcu_dereference_bh(nht
->hash_buckets
[chain
]);
2942 n
= rcu_dereference_bh(n
->next
))
2945 read_unlock(&tbl
->lock
);
2946 rcu_read_unlock_bh();
2948 EXPORT_SYMBOL(neigh_for_each
);
2950 /* The tbl->lock must be held as a writer and BH disabled. */
2951 void __neigh_for_each_release(struct neigh_table
*tbl
,
2952 int (*cb
)(struct neighbour
*))
2955 struct neigh_hash_table
*nht
;
2957 nht
= rcu_dereference_protected(tbl
->nht
,
2958 lockdep_is_held(&tbl
->lock
));
2959 for (chain
= 0; chain
< (1 << nht
->hash_shift
); chain
++) {
2960 struct neighbour
*n
;
2961 struct neighbour __rcu
**np
;
2963 np
= &nht
->hash_buckets
[chain
];
2964 while ((n
= rcu_dereference_protected(*np
,
2965 lockdep_is_held(&tbl
->lock
))) != NULL
) {
2968 write_lock(&n
->lock
);
2971 rcu_assign_pointer(*np
,
2972 rcu_dereference_protected(n
->next
,
2973 lockdep_is_held(&tbl
->lock
)));
2977 write_unlock(&n
->lock
);
2979 neigh_cleanup_and_release(n
);
2983 EXPORT_SYMBOL(__neigh_for_each_release
);
2985 int neigh_xmit(int index
, struct net_device
*dev
,
2986 const void *addr
, struct sk_buff
*skb
)
2988 int err
= -EAFNOSUPPORT
;
2989 if (likely(index
< NEIGH_NR_TABLES
)) {
2990 struct neigh_table
*tbl
;
2991 struct neighbour
*neigh
;
2993 tbl
= neigh_tables
[index
];
2997 if (index
== NEIGH_ARP_TABLE
) {
2998 u32 key
= *((u32
*)addr
);
3000 neigh
= __ipv4_neigh_lookup_noref(dev
, key
);
3002 neigh
= __neigh_lookup_noref(tbl
, addr
, dev
);
3005 neigh
= __neigh_create(tbl
, addr
, dev
, false);
3006 err
= PTR_ERR(neigh
);
3007 if (IS_ERR(neigh
)) {
3008 rcu_read_unlock_bh();
3011 err
= neigh
->output(neigh
, skb
);
3012 rcu_read_unlock_bh();
3014 else if (index
== NEIGH_LINK_TABLE
) {
3015 err
= dev_hard_header(skb
, dev
, ntohs(skb
->protocol
),
3016 addr
, NULL
, skb
->len
);
3019 err
= dev_queue_xmit(skb
);
3027 EXPORT_SYMBOL(neigh_xmit
);
3029 #ifdef CONFIG_PROC_FS
3031 static struct neighbour
*neigh_get_first(struct seq_file
*seq
)
3033 struct neigh_seq_state
*state
= seq
->private;
3034 struct net
*net
= seq_file_net(seq
);
3035 struct neigh_hash_table
*nht
= state
->nht
;
3036 struct neighbour
*n
= NULL
;
3039 state
->flags
&= ~NEIGH_SEQ_IS_PNEIGH
;
3040 for (bucket
= 0; bucket
< (1 << nht
->hash_shift
); bucket
++) {
3041 n
= rcu_dereference_bh(nht
->hash_buckets
[bucket
]);
3044 if (!net_eq(dev_net(n
->dev
), net
))
3046 if (state
->neigh_sub_iter
) {
3050 v
= state
->neigh_sub_iter(state
, n
, &fakep
);
3054 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
3056 if (n
->nud_state
& ~NUD_NOARP
)
3059 n
= rcu_dereference_bh(n
->next
);
3065 state
->bucket
= bucket
;
3070 static struct neighbour
*neigh_get_next(struct seq_file
*seq
,
3071 struct neighbour
*n
,
3074 struct neigh_seq_state
*state
= seq
->private;
3075 struct net
*net
= seq_file_net(seq
);
3076 struct neigh_hash_table
*nht
= state
->nht
;
3078 if (state
->neigh_sub_iter
) {
3079 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
3083 n
= rcu_dereference_bh(n
->next
);
3087 if (!net_eq(dev_net(n
->dev
), net
))
3089 if (state
->neigh_sub_iter
) {
3090 void *v
= state
->neigh_sub_iter(state
, n
, pos
);
3095 if (!(state
->flags
& NEIGH_SEQ_SKIP_NOARP
))
3098 if (n
->nud_state
& ~NUD_NOARP
)
3101 n
= rcu_dereference_bh(n
->next
);
3107 if (++state
->bucket
>= (1 << nht
->hash_shift
))
3110 n
= rcu_dereference_bh(nht
->hash_buckets
[state
->bucket
]);
3118 static struct neighbour
*neigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
3120 struct neighbour
*n
= neigh_get_first(seq
);
3125 n
= neigh_get_next(seq
, n
, pos
);
3130 return *pos
? NULL
: n
;
3133 static struct pneigh_entry
*pneigh_get_first(struct seq_file
*seq
)
3135 struct neigh_seq_state
*state
= seq
->private;
3136 struct net
*net
= seq_file_net(seq
);
3137 struct neigh_table
*tbl
= state
->tbl
;
3138 struct pneigh_entry
*pn
= NULL
;
3139 int bucket
= state
->bucket
;
3141 state
->flags
|= NEIGH_SEQ_IS_PNEIGH
;
3142 for (bucket
= 0; bucket
<= PNEIGH_HASHMASK
; bucket
++) {
3143 pn
= tbl
->phash_buckets
[bucket
];
3144 while (pn
&& !net_eq(pneigh_net(pn
), net
))
3149 state
->bucket
= bucket
;
3154 static struct pneigh_entry
*pneigh_get_next(struct seq_file
*seq
,
3155 struct pneigh_entry
*pn
,
3158 struct neigh_seq_state
*state
= seq
->private;
3159 struct net
*net
= seq_file_net(seq
);
3160 struct neigh_table
*tbl
= state
->tbl
;
3164 } while (pn
&& !net_eq(pneigh_net(pn
), net
));
3167 if (++state
->bucket
> PNEIGH_HASHMASK
)
3169 pn
= tbl
->phash_buckets
[state
->bucket
];
3170 while (pn
&& !net_eq(pneigh_net(pn
), net
))
3182 static struct pneigh_entry
*pneigh_get_idx(struct seq_file
*seq
, loff_t
*pos
)
3184 struct pneigh_entry
*pn
= pneigh_get_first(seq
);
3189 pn
= pneigh_get_next(seq
, pn
, pos
);
3194 return *pos
? NULL
: pn
;
3197 static void *neigh_get_idx_any(struct seq_file
*seq
, loff_t
*pos
)
3199 struct neigh_seq_state
*state
= seq
->private;
3201 loff_t idxpos
= *pos
;
3203 rc
= neigh_get_idx(seq
, &idxpos
);
3204 if (!rc
&& !(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
3205 rc
= pneigh_get_idx(seq
, &idxpos
);
3210 void *neigh_seq_start(struct seq_file
*seq
, loff_t
*pos
, struct neigh_table
*tbl
, unsigned int neigh_seq_flags
)
3211 __acquires(tbl
->lock
)
3214 struct neigh_seq_state
*state
= seq
->private;
3218 state
->flags
= (neigh_seq_flags
& ~NEIGH_SEQ_IS_PNEIGH
);
3221 state
->nht
= rcu_dereference_bh(tbl
->nht
);
3222 read_lock(&tbl
->lock
);
3224 return *pos
? neigh_get_idx_any(seq
, pos
) : SEQ_START_TOKEN
;
3226 EXPORT_SYMBOL(neigh_seq_start
);
3228 void *neigh_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3230 struct neigh_seq_state
*state
;
3233 if (v
== SEQ_START_TOKEN
) {
3234 rc
= neigh_get_first(seq
);
3238 state
= seq
->private;
3239 if (!(state
->flags
& NEIGH_SEQ_IS_PNEIGH
)) {
3240 rc
= neigh_get_next(seq
, v
, NULL
);
3243 if (!(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
))
3244 rc
= pneigh_get_first(seq
);
3246 BUG_ON(state
->flags
& NEIGH_SEQ_NEIGH_ONLY
);
3247 rc
= pneigh_get_next(seq
, v
, NULL
);
3253 EXPORT_SYMBOL(neigh_seq_next
);
3255 void neigh_seq_stop(struct seq_file
*seq
, void *v
)
3256 __releases(tbl
->lock
)
3259 struct neigh_seq_state
*state
= seq
->private;
3260 struct neigh_table
*tbl
= state
->tbl
;
3262 read_unlock(&tbl
->lock
);
3263 rcu_read_unlock_bh();
3265 EXPORT_SYMBOL(neigh_seq_stop
);
3267 /* statistics via seq_file */
3269 static void *neigh_stat_seq_start(struct seq_file
*seq
, loff_t
*pos
)
3271 struct neigh_table
*tbl
= PDE_DATA(file_inode(seq
->file
));
3275 return SEQ_START_TOKEN
;
3277 for (cpu
= *pos
-1; cpu
< nr_cpu_ids
; ++cpu
) {
3278 if (!cpu_possible(cpu
))
3281 return per_cpu_ptr(tbl
->stats
, cpu
);
3286 static void *neigh_stat_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3288 struct neigh_table
*tbl
= PDE_DATA(file_inode(seq
->file
));
3291 for (cpu
= *pos
; cpu
< nr_cpu_ids
; ++cpu
) {
3292 if (!cpu_possible(cpu
))
3295 return per_cpu_ptr(tbl
->stats
, cpu
);
3301 static void neigh_stat_seq_stop(struct seq_file
*seq
, void *v
)
3306 static int neigh_stat_seq_show(struct seq_file
*seq
, void *v
)
3308 struct neigh_table
*tbl
= PDE_DATA(file_inode(seq
->file
));
3309 struct neigh_statistics
*st
= v
;
3311 if (v
== SEQ_START_TOKEN
) {
3312 seq_printf(seq
, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
3316 seq_printf(seq
, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
3317 "%08lx %08lx %08lx %08lx %08lx %08lx\n",
3318 atomic_read(&tbl
->entries
),
3329 st
->rcv_probes_mcast
,
3330 st
->rcv_probes_ucast
,
3332 st
->periodic_gc_runs
,
3341 static const struct seq_operations neigh_stat_seq_ops
= {
3342 .start
= neigh_stat_seq_start
,
3343 .next
= neigh_stat_seq_next
,
3344 .stop
= neigh_stat_seq_stop
,
3345 .show
= neigh_stat_seq_show
,
3347 #endif /* CONFIG_PROC_FS */
3349 static void __neigh_notify(struct neighbour
*n
, int type
, int flags
,
3352 struct net
*net
= dev_net(n
->dev
);
3353 struct sk_buff
*skb
;
3356 skb
= nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC
);
3360 err
= neigh_fill_info(skb
, n
, pid
, 0, type
, flags
);
3362 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
3363 WARN_ON(err
== -EMSGSIZE
);
3367 rtnl_notify(skb
, net
, 0, RTNLGRP_NEIGH
, NULL
, GFP_ATOMIC
);
3371 rtnl_set_sk_err(net
, RTNLGRP_NEIGH
, err
);
3374 void neigh_app_ns(struct neighbour
*n
)
3376 __neigh_notify(n
, RTM_GETNEIGH
, NLM_F_REQUEST
, 0);
3378 EXPORT_SYMBOL(neigh_app_ns
);
3380 #ifdef CONFIG_SYSCTL
3381 static int unres_qlen_max
= INT_MAX
/ SKB_TRUESIZE(ETH_FRAME_LEN
);
3383 static int proc_unres_qlen(struct ctl_table
*ctl
, int write
,
3384 void *buffer
, size_t *lenp
, loff_t
*ppos
)
3387 struct ctl_table tmp
= *ctl
;
3389 tmp
.extra1
= SYSCTL_ZERO
;
3390 tmp
.extra2
= &unres_qlen_max
;
3393 size
= *(int *)ctl
->data
/ SKB_TRUESIZE(ETH_FRAME_LEN
);
3394 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
3397 *(int *)ctl
->data
= size
* SKB_TRUESIZE(ETH_FRAME_LEN
);
3401 static struct neigh_parms
*neigh_get_dev_parms_rcu(struct net_device
*dev
,
3406 return __in_dev_arp_parms_get_rcu(dev
);
3408 return __in6_dev_nd_parms_get_rcu(dev
);
3413 static void neigh_copy_dflt_parms(struct net
*net
, struct neigh_parms
*p
,
3416 struct net_device
*dev
;
3417 int family
= neigh_parms_family(p
);
3420 for_each_netdev_rcu(net
, dev
) {
3421 struct neigh_parms
*dst_p
=
3422 neigh_get_dev_parms_rcu(dev
, family
);
3424 if (dst_p
&& !test_bit(index
, dst_p
->data_state
))
3425 dst_p
->data
[index
] = p
->data
[index
];
3430 static void neigh_proc_update(struct ctl_table
*ctl
, int write
)
3432 struct net_device
*dev
= ctl
->extra1
;
3433 struct neigh_parms
*p
= ctl
->extra2
;
3434 struct net
*net
= neigh_parms_net(p
);
3435 int index
= (int *) ctl
->data
- p
->data
;
3440 set_bit(index
, p
->data_state
);
3441 if (index
== NEIGH_VAR_DELAY_PROBE_TIME
)
3442 call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE
, p
);
3443 if (!dev
) /* NULL dev means this is default value */
3444 neigh_copy_dflt_parms(net
, p
, index
);
3447 static int neigh_proc_dointvec_zero_intmax(struct ctl_table
*ctl
, int write
,
3448 void *buffer
, size_t *lenp
,
3451 struct ctl_table tmp
= *ctl
;
3454 tmp
.extra1
= SYSCTL_ZERO
;
3455 tmp
.extra2
= SYSCTL_INT_MAX
;
3457 ret
= proc_dointvec_minmax(&tmp
, write
, buffer
, lenp
, ppos
);
3458 neigh_proc_update(ctl
, write
);
3462 int neigh_proc_dointvec(struct ctl_table
*ctl
, int write
, void *buffer
,
3463 size_t *lenp
, loff_t
*ppos
)
3465 int ret
= proc_dointvec(ctl
, write
, buffer
, lenp
, ppos
);
3467 neigh_proc_update(ctl
, write
);
3470 EXPORT_SYMBOL(neigh_proc_dointvec
);
3472 int neigh_proc_dointvec_jiffies(struct ctl_table
*ctl
, int write
, void *buffer
,
3473 size_t *lenp
, loff_t
*ppos
)
3475 int ret
= proc_dointvec_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3477 neigh_proc_update(ctl
, write
);
3480 EXPORT_SYMBOL(neigh_proc_dointvec_jiffies
);
3482 static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table
*ctl
, int write
,
3483 void *buffer
, size_t *lenp
,
3486 int ret
= proc_dointvec_userhz_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3488 neigh_proc_update(ctl
, write
);
3492 int neigh_proc_dointvec_ms_jiffies(struct ctl_table
*ctl
, int write
,
3493 void *buffer
, size_t *lenp
, loff_t
*ppos
)
3495 int ret
= proc_dointvec_ms_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3497 neigh_proc_update(ctl
, write
);
3500 EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies
);
3502 static int neigh_proc_dointvec_unres_qlen(struct ctl_table
*ctl
, int write
,
3503 void *buffer
, size_t *lenp
,
3506 int ret
= proc_unres_qlen(ctl
, write
, buffer
, lenp
, ppos
);
3508 neigh_proc_update(ctl
, write
);
3512 static int neigh_proc_base_reachable_time(struct ctl_table
*ctl
, int write
,
3513 void *buffer
, size_t *lenp
,
3516 struct neigh_parms
*p
= ctl
->extra2
;
3519 if (strcmp(ctl
->procname
, "base_reachable_time") == 0)
3520 ret
= neigh_proc_dointvec_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3521 else if (strcmp(ctl
->procname
, "base_reachable_time_ms") == 0)
3522 ret
= neigh_proc_dointvec_ms_jiffies(ctl
, write
, buffer
, lenp
, ppos
);
3526 if (write
&& ret
== 0) {
3527 /* update reachable_time as well, otherwise, the change will
3528 * only be effective after the next time neigh_periodic_work
3529 * decides to recompute it
3532 neigh_rand_reach_time(NEIGH_VAR(p
, BASE_REACHABLE_TIME
));
3537 #define NEIGH_PARMS_DATA_OFFSET(index) \
3538 (&((struct neigh_parms *) 0)->data[index])
3540 #define NEIGH_SYSCTL_ENTRY(attr, data_attr, name, mval, proc) \
3541 [NEIGH_VAR_ ## attr] = { \
3543 .data = NEIGH_PARMS_DATA_OFFSET(NEIGH_VAR_ ## data_attr), \
3544 .maxlen = sizeof(int), \
3546 .proc_handler = proc, \
3549 #define NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(attr, name) \
3550 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_zero_intmax)
3552 #define NEIGH_SYSCTL_JIFFIES_ENTRY(attr, name) \
3553 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_jiffies)
3555 #define NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(attr, name) \
3556 NEIGH_SYSCTL_ENTRY(attr, attr, name, 0644, neigh_proc_dointvec_userhz_jiffies)
3558 #define NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(attr, data_attr, name) \
3559 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_ms_jiffies)
3561 #define NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(attr, data_attr, name) \
3562 NEIGH_SYSCTL_ENTRY(attr, data_attr, name, 0644, neigh_proc_dointvec_unres_qlen)
3564 static struct neigh_sysctl_table
{
3565 struct ctl_table_header
*sysctl_header
;
3566 struct ctl_table neigh_vars
[NEIGH_VAR_MAX
+ 1];
3567 } neigh_sysctl_template __read_mostly
= {
3569 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_PROBES
, "mcast_solicit"),
3570 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(UCAST_PROBES
, "ucast_solicit"),
3571 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(APP_PROBES
, "app_solicit"),
3572 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(MCAST_REPROBES
, "mcast_resolicit"),
3573 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(RETRANS_TIME
, "retrans_time"),
3574 NEIGH_SYSCTL_JIFFIES_ENTRY(BASE_REACHABLE_TIME
, "base_reachable_time"),
3575 NEIGH_SYSCTL_JIFFIES_ENTRY(DELAY_PROBE_TIME
, "delay_first_probe_time"),
3576 NEIGH_SYSCTL_JIFFIES_ENTRY(GC_STALETIME
, "gc_stale_time"),
3577 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(QUEUE_LEN_BYTES
, "unres_qlen_bytes"),
3578 NEIGH_SYSCTL_ZERO_INTMAX_ENTRY(PROXY_QLEN
, "proxy_qlen"),
3579 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(ANYCAST_DELAY
, "anycast_delay"),
3580 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(PROXY_DELAY
, "proxy_delay"),
3581 NEIGH_SYSCTL_USERHZ_JIFFIES_ENTRY(LOCKTIME
, "locktime"),
3582 NEIGH_SYSCTL_UNRES_QLEN_REUSED_ENTRY(QUEUE_LEN
, QUEUE_LEN_BYTES
, "unres_qlen"),
3583 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(RETRANS_TIME_MS
, RETRANS_TIME
, "retrans_time_ms"),
3584 NEIGH_SYSCTL_MS_JIFFIES_REUSED_ENTRY(BASE_REACHABLE_TIME_MS
, BASE_REACHABLE_TIME
, "base_reachable_time_ms"),
3585 [NEIGH_VAR_GC_INTERVAL
] = {
3586 .procname
= "gc_interval",
3587 .maxlen
= sizeof(int),
3589 .proc_handler
= proc_dointvec_jiffies
,
3591 [NEIGH_VAR_GC_THRESH1
] = {
3592 .procname
= "gc_thresh1",
3593 .maxlen
= sizeof(int),
3595 .extra1
= SYSCTL_ZERO
,
3596 .extra2
= SYSCTL_INT_MAX
,
3597 .proc_handler
= proc_dointvec_minmax
,
3599 [NEIGH_VAR_GC_THRESH2
] = {
3600 .procname
= "gc_thresh2",
3601 .maxlen
= sizeof(int),
3603 .extra1
= SYSCTL_ZERO
,
3604 .extra2
= SYSCTL_INT_MAX
,
3605 .proc_handler
= proc_dointvec_minmax
,
3607 [NEIGH_VAR_GC_THRESH3
] = {
3608 .procname
= "gc_thresh3",
3609 .maxlen
= sizeof(int),
3611 .extra1
= SYSCTL_ZERO
,
3612 .extra2
= SYSCTL_INT_MAX
,
3613 .proc_handler
= proc_dointvec_minmax
,
3619 int neigh_sysctl_register(struct net_device
*dev
, struct neigh_parms
*p
,
3620 proc_handler
*handler
)
3623 struct neigh_sysctl_table
*t
;
3624 const char *dev_name_source
;
3625 char neigh_path
[ sizeof("net//neigh/") + IFNAMSIZ
+ IFNAMSIZ
];
3628 t
= kmemdup(&neigh_sysctl_template
, sizeof(*t
), GFP_KERNEL
);
3632 for (i
= 0; i
< NEIGH_VAR_GC_INTERVAL
; i
++) {
3633 t
->neigh_vars
[i
].data
+= (long) p
;
3634 t
->neigh_vars
[i
].extra1
= dev
;
3635 t
->neigh_vars
[i
].extra2
= p
;
3639 dev_name_source
= dev
->name
;
3640 /* Terminate the table early */
3641 memset(&t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
], 0,
3642 sizeof(t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
]));
3644 struct neigh_table
*tbl
= p
->tbl
;
3645 dev_name_source
= "default";
3646 t
->neigh_vars
[NEIGH_VAR_GC_INTERVAL
].data
= &tbl
->gc_interval
;
3647 t
->neigh_vars
[NEIGH_VAR_GC_THRESH1
].data
= &tbl
->gc_thresh1
;
3648 t
->neigh_vars
[NEIGH_VAR_GC_THRESH2
].data
= &tbl
->gc_thresh2
;
3649 t
->neigh_vars
[NEIGH_VAR_GC_THRESH3
].data
= &tbl
->gc_thresh3
;
3654 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME
].proc_handler
= handler
;
3656 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].proc_handler
= handler
;
3657 /* RetransTime (in milliseconds)*/
3658 t
->neigh_vars
[NEIGH_VAR_RETRANS_TIME_MS
].proc_handler
= handler
;
3659 /* ReachableTime (in milliseconds) */
3660 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].proc_handler
= handler
;
3662 /* Those handlers will update p->reachable_time after
3663 * base_reachable_time(_ms) is set to ensure the new timer starts being
3664 * applied after the next neighbour update instead of waiting for
3665 * neigh_periodic_work to update its value (can be multiple minutes)
3666 * So any handler that replaces them should do this as well
3669 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME
].proc_handler
=
3670 neigh_proc_base_reachable_time
;
3671 /* ReachableTime (in milliseconds) */
3672 t
->neigh_vars
[NEIGH_VAR_BASE_REACHABLE_TIME_MS
].proc_handler
=
3673 neigh_proc_base_reachable_time
;
3676 /* Don't export sysctls to unprivileged users */
3677 if (neigh_parms_net(p
)->user_ns
!= &init_user_ns
)
3678 t
->neigh_vars
[0].procname
= NULL
;
3680 switch (neigh_parms_family(p
)) {
3691 snprintf(neigh_path
, sizeof(neigh_path
), "net/%s/neigh/%s",
3692 p_name
, dev_name_source
);
3694 register_net_sysctl(neigh_parms_net(p
), neigh_path
, t
->neigh_vars
);
3695 if (!t
->sysctl_header
)
3698 p
->sysctl_table
= t
;
3706 EXPORT_SYMBOL(neigh_sysctl_register
);
3708 void neigh_sysctl_unregister(struct neigh_parms
*p
)
3710 if (p
->sysctl_table
) {
3711 struct neigh_sysctl_table
*t
= p
->sysctl_table
;
3712 p
->sysctl_table
= NULL
;
3713 unregister_net_sysctl_table(t
->sysctl_header
);
3717 EXPORT_SYMBOL(neigh_sysctl_unregister
);
3719 #endif /* CONFIG_SYSCTL */
3721 static int __init
neigh_init(void)
3723 rtnl_register(PF_UNSPEC
, RTM_NEWNEIGH
, neigh_add
, NULL
, 0);
3724 rtnl_register(PF_UNSPEC
, RTM_DELNEIGH
, neigh_delete
, NULL
, 0);
3725 rtnl_register(PF_UNSPEC
, RTM_GETNEIGH
, neigh_get
, neigh_dump_info
, 0);
3727 rtnl_register(PF_UNSPEC
, RTM_GETNEIGHTBL
, NULL
, neightbl_dump_info
,
3729 rtnl_register(PF_UNSPEC
, RTM_SETNEIGHTBL
, neightbl_set
, NULL
, 0);
3734 subsys_initcall(neigh_init
);