]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/bridge/br_fdb.c
KVM: x86: hyper-v: HVCALL_SEND_IPI_EX is an XMM fast hypercall
[mirror_ubuntu-jammy-kernel.git] / net / bridge / br_fdb.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Forwarding database
4 * Linux ethernet bridge
5 *
6 * Authors:
7 * Lennert Buytenhek <buytenh@gnu.org>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/rculist.h>
13 #include <linux/spinlock.h>
14 #include <linux/times.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/jhash.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/atomic.h>
21 #include <asm/unaligned.h>
22 #include <linux/if_vlan.h>
23 #include <net/switchdev.h>
24 #include <trace/events/bridge.h>
25 #include "br_private.h"
26
27 static const struct rhashtable_params br_fdb_rht_params = {
28 .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
29 .key_offset = offsetof(struct net_bridge_fdb_entry, key),
30 .key_len = sizeof(struct net_bridge_fdb_key),
31 .automatic_shrinking = true,
32 };
33
34 static struct kmem_cache *br_fdb_cache __read_mostly;
35 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
36 const unsigned char *addr, u16 vid);
37 static void fdb_notify(struct net_bridge *br,
38 const struct net_bridge_fdb_entry *, int, bool);
39
40 int __init br_fdb_init(void)
41 {
42 br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
43 sizeof(struct net_bridge_fdb_entry),
44 0,
45 SLAB_HWCACHE_ALIGN, NULL);
46 if (!br_fdb_cache)
47 return -ENOMEM;
48
49 return 0;
50 }
51
52 void br_fdb_fini(void)
53 {
54 kmem_cache_destroy(br_fdb_cache);
55 }
56
57 int br_fdb_hash_init(struct net_bridge *br)
58 {
59 return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
60 }
61
62 void br_fdb_hash_fini(struct net_bridge *br)
63 {
64 rhashtable_destroy(&br->fdb_hash_tbl);
65 }
66
67 /* if topology_changing then use forward_delay (default 15 sec)
68 * otherwise keep longer (default 5 minutes)
69 */
70 static inline unsigned long hold_time(const struct net_bridge *br)
71 {
72 return br->topology_change ? br->forward_delay : br->ageing_time;
73 }
74
75 static inline int has_expired(const struct net_bridge *br,
76 const struct net_bridge_fdb_entry *fdb)
77 {
78 return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
79 !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
80 time_before_eq(fdb->updated + hold_time(br), jiffies);
81 }
82
83 static void fdb_rcu_free(struct rcu_head *head)
84 {
85 struct net_bridge_fdb_entry *ent
86 = container_of(head, struct net_bridge_fdb_entry, rcu);
87 kmem_cache_free(br_fdb_cache, ent);
88 }
89
90 static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
91 const unsigned char *addr,
92 __u16 vid)
93 {
94 struct net_bridge_fdb_key key;
95
96 WARN_ON_ONCE(!rcu_read_lock_held());
97
98 key.vlan_id = vid;
99 memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
100
101 return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
102 }
103
104 /* requires bridge hash_lock */
105 static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
106 const unsigned char *addr,
107 __u16 vid)
108 {
109 struct net_bridge_fdb_entry *fdb;
110
111 lockdep_assert_held_once(&br->hash_lock);
112
113 rcu_read_lock();
114 fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
115 rcu_read_unlock();
116
117 return fdb;
118 }
119
120 struct net_device *br_fdb_find_port(const struct net_device *br_dev,
121 const unsigned char *addr,
122 __u16 vid)
123 {
124 struct net_bridge_fdb_entry *f;
125 struct net_device *dev = NULL;
126 struct net_bridge *br;
127
128 ASSERT_RTNL();
129
130 if (!netif_is_bridge_master(br_dev))
131 return NULL;
132
133 br = netdev_priv(br_dev);
134 rcu_read_lock();
135 f = br_fdb_find_rcu(br, addr, vid);
136 if (f && f->dst)
137 dev = f->dst->dev;
138 rcu_read_unlock();
139
140 return dev;
141 }
142 EXPORT_SYMBOL_GPL(br_fdb_find_port);
143
144 struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
145 const unsigned char *addr,
146 __u16 vid)
147 {
148 return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
149 }
150
151 /* When a static FDB entry is added, the mac address from the entry is
152 * added to the bridge private HW address list and all required ports
153 * are then updated with the new information.
154 * Called under RTNL.
155 */
156 static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
157 {
158 int err;
159 struct net_bridge_port *p;
160
161 ASSERT_RTNL();
162
163 list_for_each_entry(p, &br->port_list, list) {
164 if (!br_promisc_port(p)) {
165 err = dev_uc_add(p->dev, addr);
166 if (err)
167 goto undo;
168 }
169 }
170
171 return;
172 undo:
173 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
174 if (!br_promisc_port(p))
175 dev_uc_del(p->dev, addr);
176 }
177 }
178
179 /* When a static FDB entry is deleted, the HW address from that entry is
180 * also removed from the bridge private HW address list and updates all
181 * the ports with needed information.
182 * Called under RTNL.
183 */
184 static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
185 {
186 struct net_bridge_port *p;
187
188 ASSERT_RTNL();
189
190 list_for_each_entry(p, &br->port_list, list) {
191 if (!br_promisc_port(p))
192 dev_uc_del(p->dev, addr);
193 }
194 }
195
196 static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
197 bool swdev_notify)
198 {
199 trace_fdb_delete(br, f);
200
201 if (test_bit(BR_FDB_STATIC, &f->flags))
202 fdb_del_hw_addr(br, f->key.addr.addr);
203
204 hlist_del_init_rcu(&f->fdb_node);
205 rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
206 br_fdb_rht_params);
207 fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
208 call_rcu(&f->rcu, fdb_rcu_free);
209 }
210
211 /* Delete a local entry if no other port had the same address. */
212 static void fdb_delete_local(struct net_bridge *br,
213 const struct net_bridge_port *p,
214 struct net_bridge_fdb_entry *f)
215 {
216 const unsigned char *addr = f->key.addr.addr;
217 struct net_bridge_vlan_group *vg;
218 const struct net_bridge_vlan *v;
219 struct net_bridge_port *op;
220 u16 vid = f->key.vlan_id;
221
222 /* Maybe another port has same hw addr? */
223 list_for_each_entry(op, &br->port_list, list) {
224 vg = nbp_vlan_group(op);
225 if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
226 (!vid || br_vlan_find(vg, vid))) {
227 f->dst = op;
228 clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
229 return;
230 }
231 }
232
233 vg = br_vlan_group(br);
234 v = br_vlan_find(vg, vid);
235 /* Maybe bridge device has same hw addr? */
236 if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
237 (!vid || (v && br_vlan_should_use(v)))) {
238 f->dst = NULL;
239 clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
240 return;
241 }
242
243 fdb_delete(br, f, true);
244 }
245
246 void br_fdb_find_delete_local(struct net_bridge *br,
247 const struct net_bridge_port *p,
248 const unsigned char *addr, u16 vid)
249 {
250 struct net_bridge_fdb_entry *f;
251
252 spin_lock_bh(&br->hash_lock);
253 f = br_fdb_find(br, addr, vid);
254 if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
255 !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
256 fdb_delete_local(br, p, f);
257 spin_unlock_bh(&br->hash_lock);
258 }
259
260 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
261 {
262 struct net_bridge_vlan_group *vg;
263 struct net_bridge_fdb_entry *f;
264 struct net_bridge *br = p->br;
265 struct net_bridge_vlan *v;
266
267 spin_lock_bh(&br->hash_lock);
268 vg = nbp_vlan_group(p);
269 hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
270 if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
271 !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
272 /* delete old one */
273 fdb_delete_local(br, p, f);
274
275 /* if this port has no vlan information
276 * configured, we can safely be done at
277 * this point.
278 */
279 if (!vg || !vg->num_vlans)
280 goto insert;
281 }
282 }
283
284 insert:
285 /* insert new address, may fail if invalid address or dup. */
286 fdb_insert(br, p, newaddr, 0);
287
288 if (!vg || !vg->num_vlans)
289 goto done;
290
291 /* Now add entries for every VLAN configured on the port.
292 * This function runs under RTNL so the bitmap will not change
293 * from under us.
294 */
295 list_for_each_entry(v, &vg->vlan_list, vlist)
296 fdb_insert(br, p, newaddr, v->vid);
297
298 done:
299 spin_unlock_bh(&br->hash_lock);
300 }
301
302 void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
303 {
304 struct net_bridge_vlan_group *vg;
305 struct net_bridge_fdb_entry *f;
306 struct net_bridge_vlan *v;
307
308 spin_lock_bh(&br->hash_lock);
309
310 /* If old entry was unassociated with any port, then delete it. */
311 f = br_fdb_find(br, br->dev->dev_addr, 0);
312 if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
313 !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
314 fdb_delete_local(br, NULL, f);
315
316 fdb_insert(br, NULL, newaddr, 0);
317 vg = br_vlan_group(br);
318 if (!vg || !vg->num_vlans)
319 goto out;
320 /* Now remove and add entries for every VLAN configured on the
321 * bridge. This function runs under RTNL so the bitmap will not
322 * change from under us.
323 */
324 list_for_each_entry(v, &vg->vlan_list, vlist) {
325 if (!br_vlan_should_use(v))
326 continue;
327 f = br_fdb_find(br, br->dev->dev_addr, v->vid);
328 if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
329 !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
330 fdb_delete_local(br, NULL, f);
331 fdb_insert(br, NULL, newaddr, v->vid);
332 }
333 out:
334 spin_unlock_bh(&br->hash_lock);
335 }
336
337 void br_fdb_cleanup(struct work_struct *work)
338 {
339 struct net_bridge *br = container_of(work, struct net_bridge,
340 gc_work.work);
341 struct net_bridge_fdb_entry *f = NULL;
342 unsigned long delay = hold_time(br);
343 unsigned long work_delay = delay;
344 unsigned long now = jiffies;
345
346 /* this part is tricky, in order to avoid blocking learning and
347 * consequently forwarding, we rely on rcu to delete objects with
348 * delayed freeing allowing us to continue traversing
349 */
350 rcu_read_lock();
351 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
352 unsigned long this_timer = f->updated + delay;
353
354 if (test_bit(BR_FDB_STATIC, &f->flags) ||
355 test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
356 if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
357 if (time_after(this_timer, now))
358 work_delay = min(work_delay,
359 this_timer - now);
360 else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
361 &f->flags))
362 fdb_notify(br, f, RTM_NEWNEIGH, false);
363 }
364 continue;
365 }
366
367 if (time_after(this_timer, now)) {
368 work_delay = min(work_delay, this_timer - now);
369 } else {
370 spin_lock_bh(&br->hash_lock);
371 if (!hlist_unhashed(&f->fdb_node))
372 fdb_delete(br, f, true);
373 spin_unlock_bh(&br->hash_lock);
374 }
375 }
376 rcu_read_unlock();
377
378 /* Cleanup minimum 10 milliseconds apart */
379 work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
380 mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
381 }
382
383 /* Completely flush all dynamic entries in forwarding database.*/
384 void br_fdb_flush(struct net_bridge *br)
385 {
386 struct net_bridge_fdb_entry *f;
387 struct hlist_node *tmp;
388
389 spin_lock_bh(&br->hash_lock);
390 hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
391 if (!test_bit(BR_FDB_STATIC, &f->flags))
392 fdb_delete(br, f, true);
393 }
394 spin_unlock_bh(&br->hash_lock);
395 }
396
397 /* Flush all entries referring to a specific port.
398 * if do_all is set also flush static entries
399 * if vid is set delete all entries that match the vlan_id
400 */
401 void br_fdb_delete_by_port(struct net_bridge *br,
402 const struct net_bridge_port *p,
403 u16 vid,
404 int do_all)
405 {
406 struct net_bridge_fdb_entry *f;
407 struct hlist_node *tmp;
408
409 spin_lock_bh(&br->hash_lock);
410 hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
411 if (f->dst != p)
412 continue;
413
414 if (!do_all)
415 if (test_bit(BR_FDB_STATIC, &f->flags) ||
416 (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
417 !test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
418 (vid && f->key.vlan_id != vid))
419 continue;
420
421 if (test_bit(BR_FDB_LOCAL, &f->flags))
422 fdb_delete_local(br, p, f);
423 else
424 fdb_delete(br, f, true);
425 }
426 spin_unlock_bh(&br->hash_lock);
427 }
428
429 #if IS_ENABLED(CONFIG_ATM_LANE)
430 /* Interface used by ATM LANE hook to test
431 * if an addr is on some other bridge port */
432 int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
433 {
434 struct net_bridge_fdb_entry *fdb;
435 struct net_bridge_port *port;
436 int ret;
437
438 rcu_read_lock();
439 port = br_port_get_rcu(dev);
440 if (!port)
441 ret = 0;
442 else {
443 const struct net_bridge_port *dst = NULL;
444
445 fdb = br_fdb_find_rcu(port->br, addr, 0);
446 if (fdb)
447 dst = READ_ONCE(fdb->dst);
448
449 ret = dst && dst->dev != dev &&
450 dst->state == BR_STATE_FORWARDING;
451 }
452 rcu_read_unlock();
453
454 return ret;
455 }
456 #endif /* CONFIG_ATM_LANE */
457
458 /*
459 * Fill buffer with forwarding table records in
460 * the API format.
461 */
462 int br_fdb_fillbuf(struct net_bridge *br, void *buf,
463 unsigned long maxnum, unsigned long skip)
464 {
465 struct net_bridge_fdb_entry *f;
466 struct __fdb_entry *fe = buf;
467 int num = 0;
468
469 memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
470
471 rcu_read_lock();
472 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
473 if (num >= maxnum)
474 break;
475
476 if (has_expired(br, f))
477 continue;
478
479 /* ignore pseudo entry for local MAC address */
480 if (!f->dst)
481 continue;
482
483 if (skip) {
484 --skip;
485 continue;
486 }
487
488 /* convert from internal format to API */
489 memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
490
491 /* due to ABI compat need to split into hi/lo */
492 fe->port_no = f->dst->port_no;
493 fe->port_hi = f->dst->port_no >> 8;
494
495 fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
496 if (!test_bit(BR_FDB_STATIC, &f->flags))
497 fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
498 ++fe;
499 ++num;
500 }
501 rcu_read_unlock();
502
503 return num;
504 }
505
506 static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
507 struct net_bridge_port *source,
508 const unsigned char *addr,
509 __u16 vid,
510 unsigned long flags)
511 {
512 struct net_bridge_fdb_entry *fdb;
513
514 fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
515 if (fdb) {
516 memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
517 WRITE_ONCE(fdb->dst, source);
518 fdb->key.vlan_id = vid;
519 fdb->flags = flags;
520 fdb->updated = fdb->used = jiffies;
521 if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
522 &fdb->rhnode,
523 br_fdb_rht_params)) {
524 kmem_cache_free(br_fdb_cache, fdb);
525 fdb = NULL;
526 } else {
527 hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
528 }
529 }
530 return fdb;
531 }
532
533 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
534 const unsigned char *addr, u16 vid)
535 {
536 struct net_bridge_fdb_entry *fdb;
537
538 if (!is_valid_ether_addr(addr))
539 return -EINVAL;
540
541 fdb = br_fdb_find(br, addr, vid);
542 if (fdb) {
543 /* it is okay to have multiple ports with same
544 * address, just use the first one.
545 */
546 if (test_bit(BR_FDB_LOCAL, &fdb->flags))
547 return 0;
548 br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
549 source ? source->dev->name : br->dev->name, addr, vid);
550 fdb_delete(br, fdb, true);
551 }
552
553 fdb = fdb_create(br, source, addr, vid,
554 BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
555 if (!fdb)
556 return -ENOMEM;
557
558 fdb_add_hw_addr(br, addr);
559 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
560 return 0;
561 }
562
563 /* Add entry for local address of interface */
564 int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
565 const unsigned char *addr, u16 vid)
566 {
567 int ret;
568
569 spin_lock_bh(&br->hash_lock);
570 ret = fdb_insert(br, source, addr, vid);
571 spin_unlock_bh(&br->hash_lock);
572 return ret;
573 }
574
575 /* returns true if the fdb was modified */
576 static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
577 {
578 return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
579 test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
580 }
581
582 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
583 const unsigned char *addr, u16 vid, unsigned long flags)
584 {
585 struct net_bridge_fdb_entry *fdb;
586
587 /* some users want to always flood. */
588 if (hold_time(br) == 0)
589 return;
590
591 fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
592 if (likely(fdb)) {
593 /* attempt to update an entry for a local interface */
594 if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
595 if (net_ratelimit())
596 br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
597 source->dev->name, addr, vid);
598 } else {
599 unsigned long now = jiffies;
600 bool fdb_modified = false;
601
602 if (now != fdb->updated) {
603 fdb->updated = now;
604 fdb_modified = __fdb_mark_active(fdb);
605 }
606
607 /* fastpath: update of existing entry */
608 if (unlikely(source != READ_ONCE(fdb->dst) &&
609 !test_bit(BR_FDB_STICKY, &fdb->flags))) {
610 br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
611 WRITE_ONCE(fdb->dst, source);
612 fdb_modified = true;
613 /* Take over HW learned entry */
614 if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
615 &fdb->flags)))
616 clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
617 &fdb->flags);
618 }
619
620 if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
621 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
622 if (unlikely(fdb_modified)) {
623 trace_br_fdb_update(br, source, addr, vid, flags);
624 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
625 }
626 }
627 } else {
628 spin_lock(&br->hash_lock);
629 fdb = fdb_create(br, source, addr, vid, flags);
630 if (fdb) {
631 trace_br_fdb_update(br, source, addr, vid, flags);
632 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
633 }
634 /* else we lose race and someone else inserts
635 * it first, don't bother updating
636 */
637 spin_unlock(&br->hash_lock);
638 }
639 }
640
641 static int fdb_to_nud(const struct net_bridge *br,
642 const struct net_bridge_fdb_entry *fdb)
643 {
644 if (test_bit(BR_FDB_LOCAL, &fdb->flags))
645 return NUD_PERMANENT;
646 else if (test_bit(BR_FDB_STATIC, &fdb->flags))
647 return NUD_NOARP;
648 else if (has_expired(br, fdb))
649 return NUD_STALE;
650 else
651 return NUD_REACHABLE;
652 }
653
654 static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
655 const struct net_bridge_fdb_entry *fdb,
656 u32 portid, u32 seq, int type, unsigned int flags)
657 {
658 const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
659 unsigned long now = jiffies;
660 struct nda_cacheinfo ci;
661 struct nlmsghdr *nlh;
662 struct ndmsg *ndm;
663
664 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
665 if (nlh == NULL)
666 return -EMSGSIZE;
667
668 ndm = nlmsg_data(nlh);
669 ndm->ndm_family = AF_BRIDGE;
670 ndm->ndm_pad1 = 0;
671 ndm->ndm_pad2 = 0;
672 ndm->ndm_flags = 0;
673 ndm->ndm_type = 0;
674 ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex;
675 ndm->ndm_state = fdb_to_nud(br, fdb);
676
677 if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
678 ndm->ndm_flags |= NTF_OFFLOADED;
679 if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
680 ndm->ndm_flags |= NTF_EXT_LEARNED;
681 if (test_bit(BR_FDB_STICKY, &fdb->flags))
682 ndm->ndm_flags |= NTF_STICKY;
683
684 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
685 goto nla_put_failure;
686 if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
687 goto nla_put_failure;
688 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
689 ci.ndm_confirmed = 0;
690 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
691 ci.ndm_refcnt = 0;
692 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
693 goto nla_put_failure;
694
695 if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
696 &fdb->key.vlan_id))
697 goto nla_put_failure;
698
699 if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
700 struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
701 u8 notify_bits = FDB_NOTIFY_BIT;
702
703 if (!nest)
704 goto nla_put_failure;
705 if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
706 notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
707
708 if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
709 nla_nest_cancel(skb, nest);
710 goto nla_put_failure;
711 }
712
713 nla_nest_end(skb, nest);
714 }
715
716 nlmsg_end(skb, nlh);
717 return 0;
718
719 nla_put_failure:
720 nlmsg_cancel(skb, nlh);
721 return -EMSGSIZE;
722 }
723
724 static inline size_t fdb_nlmsg_size(void)
725 {
726 return NLMSG_ALIGN(sizeof(struct ndmsg))
727 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
728 + nla_total_size(sizeof(u32)) /* NDA_MASTER */
729 + nla_total_size(sizeof(u16)) /* NDA_VLAN */
730 + nla_total_size(sizeof(struct nda_cacheinfo))
731 + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
732 + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
733 }
734
735 static int br_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
736 const struct net_bridge_fdb_entry *fdb,
737 unsigned long action, const void *ctx)
738 {
739 const struct net_bridge_port *p = READ_ONCE(fdb->dst);
740 struct switchdev_notifier_fdb_info item;
741 int err;
742
743 item.addr = fdb->key.addr.addr;
744 item.vid = fdb->key.vlan_id;
745 item.added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
746 item.offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
747 item.is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
748 item.info.dev = (!p || item.is_local) ? br->dev : p->dev;
749 item.info.ctx = ctx;
750
751 err = nb->notifier_call(nb, action, &item);
752 return notifier_to_errno(err);
753 }
754
755 int br_fdb_replay(const struct net_device *br_dev, const void *ctx, bool adding,
756 struct notifier_block *nb)
757 {
758 struct net_bridge_fdb_entry *fdb;
759 struct net_bridge *br;
760 unsigned long action;
761 int err = 0;
762
763 if (!nb)
764 return 0;
765
766 if (!netif_is_bridge_master(br_dev))
767 return -EINVAL;
768
769 br = netdev_priv(br_dev);
770
771 if (adding)
772 action = SWITCHDEV_FDB_ADD_TO_DEVICE;
773 else
774 action = SWITCHDEV_FDB_DEL_TO_DEVICE;
775
776 rcu_read_lock();
777
778 hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
779 err = br_fdb_replay_one(br, nb, fdb, action, ctx);
780 if (err)
781 break;
782 }
783
784 rcu_read_unlock();
785
786 return err;
787 }
788
789 static void fdb_notify(struct net_bridge *br,
790 const struct net_bridge_fdb_entry *fdb, int type,
791 bool swdev_notify)
792 {
793 struct net *net = dev_net(br->dev);
794 struct sk_buff *skb;
795 int err = -ENOBUFS;
796
797 if (swdev_notify)
798 br_switchdev_fdb_notify(br, fdb, type);
799
800 skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
801 if (skb == NULL)
802 goto errout;
803
804 err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
805 if (err < 0) {
806 /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
807 WARN_ON(err == -EMSGSIZE);
808 kfree_skb(skb);
809 goto errout;
810 }
811 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
812 return;
813 errout:
814 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
815 }
816
817 /* Dump information about entries, in response to GETNEIGH */
818 int br_fdb_dump(struct sk_buff *skb,
819 struct netlink_callback *cb,
820 struct net_device *dev,
821 struct net_device *filter_dev,
822 int *idx)
823 {
824 struct net_bridge *br = netdev_priv(dev);
825 struct net_bridge_fdb_entry *f;
826 int err = 0;
827
828 if (!(dev->priv_flags & IFF_EBRIDGE))
829 return err;
830
831 if (!filter_dev) {
832 err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
833 if (err < 0)
834 return err;
835 }
836
837 rcu_read_lock();
838 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
839 if (*idx < cb->args[2])
840 goto skip;
841 if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
842 if (filter_dev != dev)
843 goto skip;
844 /* !f->dst is a special case for bridge
845 * It means the MAC belongs to the bridge
846 * Therefore need a little more filtering
847 * we only want to dump the !f->dst case
848 */
849 if (f->dst)
850 goto skip;
851 }
852 if (!filter_dev && f->dst)
853 goto skip;
854
855 err = fdb_fill_info(skb, br, f,
856 NETLINK_CB(cb->skb).portid,
857 cb->nlh->nlmsg_seq,
858 RTM_NEWNEIGH,
859 NLM_F_MULTI);
860 if (err < 0)
861 break;
862 skip:
863 *idx += 1;
864 }
865 rcu_read_unlock();
866
867 return err;
868 }
869
870 int br_fdb_get(struct sk_buff *skb,
871 struct nlattr *tb[],
872 struct net_device *dev,
873 const unsigned char *addr,
874 u16 vid, u32 portid, u32 seq,
875 struct netlink_ext_ack *extack)
876 {
877 struct net_bridge *br = netdev_priv(dev);
878 struct net_bridge_fdb_entry *f;
879 int err = 0;
880
881 rcu_read_lock();
882 f = br_fdb_find_rcu(br, addr, vid);
883 if (!f) {
884 NL_SET_ERR_MSG(extack, "Fdb entry not found");
885 err = -ENOENT;
886 goto errout;
887 }
888
889 err = fdb_fill_info(skb, br, f, portid, seq,
890 RTM_NEWNEIGH, 0);
891 errout:
892 rcu_read_unlock();
893 return err;
894 }
895
896 /* returns true if the fdb is modified */
897 static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
898 {
899 bool modified = false;
900
901 /* allow to mark an entry as inactive, usually done on creation */
902 if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
903 !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
904 modified = true;
905
906 if ((notify & FDB_NOTIFY_BIT) &&
907 !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
908 /* enabled activity tracking */
909 modified = true;
910 } else if (!(notify & FDB_NOTIFY_BIT) &&
911 test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
912 /* disabled activity tracking, clear notify state */
913 clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
914 modified = true;
915 }
916
917 return modified;
918 }
919
920 /* Update (create or replace) forwarding database entry */
921 static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
922 const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
923 struct nlattr *nfea_tb[])
924 {
925 bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
926 bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
927 struct net_bridge_fdb_entry *fdb;
928 u16 state = ndm->ndm_state;
929 bool modified = false;
930 u8 notify = 0;
931
932 /* If the port cannot learn allow only local and static entries */
933 if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
934 !(source->state == BR_STATE_LEARNING ||
935 source->state == BR_STATE_FORWARDING))
936 return -EPERM;
937
938 if (!source && !(state & NUD_PERMANENT)) {
939 pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
940 br->dev->name);
941 return -EINVAL;
942 }
943
944 if (is_sticky && (state & NUD_PERMANENT))
945 return -EINVAL;
946
947 if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
948 notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
949 if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
950 (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
951 return -EINVAL;
952 }
953
954 fdb = br_fdb_find(br, addr, vid);
955 if (fdb == NULL) {
956 if (!(flags & NLM_F_CREATE))
957 return -ENOENT;
958
959 fdb = fdb_create(br, source, addr, vid, 0);
960 if (!fdb)
961 return -ENOMEM;
962
963 modified = true;
964 } else {
965 if (flags & NLM_F_EXCL)
966 return -EEXIST;
967
968 if (READ_ONCE(fdb->dst) != source) {
969 WRITE_ONCE(fdb->dst, source);
970 modified = true;
971 }
972 }
973
974 if (fdb_to_nud(br, fdb) != state) {
975 if (state & NUD_PERMANENT) {
976 set_bit(BR_FDB_LOCAL, &fdb->flags);
977 if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
978 fdb_add_hw_addr(br, addr);
979 } else if (state & NUD_NOARP) {
980 clear_bit(BR_FDB_LOCAL, &fdb->flags);
981 if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
982 fdb_add_hw_addr(br, addr);
983 } else {
984 clear_bit(BR_FDB_LOCAL, &fdb->flags);
985 if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
986 fdb_del_hw_addr(br, addr);
987 }
988
989 modified = true;
990 }
991
992 if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
993 change_bit(BR_FDB_STICKY, &fdb->flags);
994 modified = true;
995 }
996
997 if (fdb_handle_notify(fdb, notify))
998 modified = true;
999
1000 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
1001
1002 fdb->used = jiffies;
1003 if (modified) {
1004 if (refresh)
1005 fdb->updated = jiffies;
1006 fdb_notify(br, fdb, RTM_NEWNEIGH, true);
1007 }
1008
1009 return 0;
1010 }
1011
1012 static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
1013 struct net_bridge_port *p, const unsigned char *addr,
1014 u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
1015 struct netlink_ext_ack *extack)
1016 {
1017 int err = 0;
1018
1019 if (ndm->ndm_flags & NTF_USE) {
1020 if (!p) {
1021 pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
1022 br->dev->name);
1023 return -EINVAL;
1024 }
1025 if (!nbp_state_should_learn(p))
1026 return 0;
1027
1028 local_bh_disable();
1029 rcu_read_lock();
1030 br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
1031 rcu_read_unlock();
1032 local_bh_enable();
1033 } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
1034 if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
1035 NL_SET_ERR_MSG_MOD(extack,
1036 "FDB entry towards bridge must be permanent");
1037 return -EINVAL;
1038 }
1039 err = br_fdb_external_learn_add(br, p, addr, vid, true);
1040 } else {
1041 spin_lock_bh(&br->hash_lock);
1042 err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
1043 spin_unlock_bh(&br->hash_lock);
1044 }
1045
1046 return err;
1047 }
1048
1049 static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
1050 [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 },
1051 [NFEA_DONT_REFRESH] = { .type = NLA_FLAG },
1052 };
1053
1054 /* Add new permanent fdb entry with RTM_NEWNEIGH */
1055 int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1056 struct net_device *dev,
1057 const unsigned char *addr, u16 vid, u16 nlh_flags,
1058 struct netlink_ext_ack *extack)
1059 {
1060 struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
1061 struct net_bridge_vlan_group *vg;
1062 struct net_bridge_port *p = NULL;
1063 struct net_bridge_vlan *v;
1064 struct net_bridge *br = NULL;
1065 int err = 0;
1066
1067 trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
1068
1069 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
1070 pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
1071 return -EINVAL;
1072 }
1073
1074 if (is_zero_ether_addr(addr)) {
1075 pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
1076 return -EINVAL;
1077 }
1078
1079 if (dev->priv_flags & IFF_EBRIDGE) {
1080 br = netdev_priv(dev);
1081 vg = br_vlan_group(br);
1082 } else {
1083 p = br_port_get_rtnl(dev);
1084 if (!p) {
1085 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
1086 dev->name);
1087 return -EINVAL;
1088 }
1089 br = p->br;
1090 vg = nbp_vlan_group(p);
1091 }
1092
1093 if (tb[NDA_FDB_EXT_ATTRS]) {
1094 attr = tb[NDA_FDB_EXT_ATTRS];
1095 err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
1096 br_nda_fdb_pol, extack);
1097 if (err)
1098 return err;
1099 } else {
1100 memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
1101 }
1102
1103 if (vid) {
1104 v = br_vlan_find(vg, vid);
1105 if (!v || !br_vlan_should_use(v)) {
1106 pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
1107 return -EINVAL;
1108 }
1109
1110 /* VID was specified, so use it. */
1111 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
1112 extack);
1113 } else {
1114 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
1115 extack);
1116 if (err || !vg || !vg->num_vlans)
1117 goto out;
1118
1119 /* We have vlans configured on this port and user didn't
1120 * specify a VLAN. To be nice, add/update entry for every
1121 * vlan on this port.
1122 */
1123 list_for_each_entry(v, &vg->vlan_list, vlist) {
1124 if (!br_vlan_should_use(v))
1125 continue;
1126 err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
1127 nfea_tb, extack);
1128 if (err)
1129 goto out;
1130 }
1131 }
1132
1133 out:
1134 return err;
1135 }
1136
1137 static int fdb_delete_by_addr_and_port(struct net_bridge *br,
1138 const struct net_bridge_port *p,
1139 const u8 *addr, u16 vlan)
1140 {
1141 struct net_bridge_fdb_entry *fdb;
1142
1143 fdb = br_fdb_find(br, addr, vlan);
1144 if (!fdb || READ_ONCE(fdb->dst) != p)
1145 return -ENOENT;
1146
1147 fdb_delete(br, fdb, true);
1148
1149 return 0;
1150 }
1151
1152 static int __br_fdb_delete(struct net_bridge *br,
1153 const struct net_bridge_port *p,
1154 const unsigned char *addr, u16 vid)
1155 {
1156 int err;
1157
1158 spin_lock_bh(&br->hash_lock);
1159 err = fdb_delete_by_addr_and_port(br, p, addr, vid);
1160 spin_unlock_bh(&br->hash_lock);
1161
1162 return err;
1163 }
1164
1165 /* Remove neighbor entry with RTM_DELNEIGH */
1166 int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
1167 struct net_device *dev,
1168 const unsigned char *addr, u16 vid)
1169 {
1170 struct net_bridge_vlan_group *vg;
1171 struct net_bridge_port *p = NULL;
1172 struct net_bridge_vlan *v;
1173 struct net_bridge *br;
1174 int err;
1175
1176 if (dev->priv_flags & IFF_EBRIDGE) {
1177 br = netdev_priv(dev);
1178 vg = br_vlan_group(br);
1179 } else {
1180 p = br_port_get_rtnl(dev);
1181 if (!p) {
1182 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
1183 dev->name);
1184 return -EINVAL;
1185 }
1186 vg = nbp_vlan_group(p);
1187 br = p->br;
1188 }
1189
1190 if (vid) {
1191 v = br_vlan_find(vg, vid);
1192 if (!v) {
1193 pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
1194 return -EINVAL;
1195 }
1196
1197 err = __br_fdb_delete(br, p, addr, vid);
1198 } else {
1199 err = -ENOENT;
1200 err &= __br_fdb_delete(br, p, addr, 0);
1201 if (!vg || !vg->num_vlans)
1202 return err;
1203
1204 list_for_each_entry(v, &vg->vlan_list, vlist) {
1205 if (!br_vlan_should_use(v))
1206 continue;
1207 err &= __br_fdb_delete(br, p, addr, v->vid);
1208 }
1209 }
1210
1211 return err;
1212 }
1213
1214 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
1215 {
1216 struct net_bridge_fdb_entry *f, *tmp;
1217 int err = 0;
1218
1219 ASSERT_RTNL();
1220
1221 /* the key here is that static entries change only under rtnl */
1222 rcu_read_lock();
1223 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
1224 /* We only care for static entries */
1225 if (!test_bit(BR_FDB_STATIC, &f->flags))
1226 continue;
1227 err = dev_uc_add(p->dev, f->key.addr.addr);
1228 if (err)
1229 goto rollback;
1230 }
1231 done:
1232 rcu_read_unlock();
1233
1234 return err;
1235
1236 rollback:
1237 hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
1238 /* We only care for static entries */
1239 if (!test_bit(BR_FDB_STATIC, &tmp->flags))
1240 continue;
1241 if (tmp == f)
1242 break;
1243 dev_uc_del(p->dev, tmp->key.addr.addr);
1244 }
1245
1246 goto done;
1247 }
1248
1249 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
1250 {
1251 struct net_bridge_fdb_entry *f;
1252
1253 ASSERT_RTNL();
1254
1255 rcu_read_lock();
1256 hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
1257 /* We only care for static entries */
1258 if (!test_bit(BR_FDB_STATIC, &f->flags))
1259 continue;
1260
1261 dev_uc_del(p->dev, f->key.addr.addr);
1262 }
1263 rcu_read_unlock();
1264 }
1265
1266 int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
1267 const unsigned char *addr, u16 vid,
1268 bool swdev_notify)
1269 {
1270 struct net_bridge_fdb_entry *fdb;
1271 bool modified = false;
1272 int err = 0;
1273
1274 trace_br_fdb_external_learn_add(br, p, addr, vid);
1275
1276 spin_lock_bh(&br->hash_lock);
1277
1278 fdb = br_fdb_find(br, addr, vid);
1279 if (!fdb) {
1280 unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
1281
1282 if (swdev_notify)
1283 flags |= BIT(BR_FDB_ADDED_BY_USER);
1284
1285 if (!p)
1286 flags |= BIT(BR_FDB_LOCAL);
1287
1288 fdb = fdb_create(br, p, addr, vid, flags);
1289 if (!fdb) {
1290 err = -ENOMEM;
1291 goto err_unlock;
1292 }
1293 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1294 } else {
1295 fdb->updated = jiffies;
1296
1297 if (READ_ONCE(fdb->dst) != p) {
1298 WRITE_ONCE(fdb->dst, p);
1299 modified = true;
1300 }
1301
1302 if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
1303 /* Refresh entry */
1304 fdb->used = jiffies;
1305 } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
1306 /* Take over SW learned entry */
1307 set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
1308 modified = true;
1309 }
1310
1311 if (swdev_notify)
1312 set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
1313
1314 if (!p)
1315 set_bit(BR_FDB_LOCAL, &fdb->flags);
1316
1317 if (modified)
1318 fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
1319 }
1320
1321 err_unlock:
1322 spin_unlock_bh(&br->hash_lock);
1323
1324 return err;
1325 }
1326
1327 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
1328 const unsigned char *addr, u16 vid,
1329 bool swdev_notify)
1330 {
1331 struct net_bridge_fdb_entry *fdb;
1332 int err = 0;
1333
1334 spin_lock_bh(&br->hash_lock);
1335
1336 fdb = br_fdb_find(br, addr, vid);
1337 if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
1338 fdb_delete(br, fdb, swdev_notify);
1339 else
1340 err = -ENOENT;
1341
1342 spin_unlock_bh(&br->hash_lock);
1343
1344 return err;
1345 }
1346
1347 void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
1348 const unsigned char *addr, u16 vid, bool offloaded)
1349 {
1350 struct net_bridge_fdb_entry *fdb;
1351
1352 spin_lock_bh(&br->hash_lock);
1353
1354 fdb = br_fdb_find(br, addr, vid);
1355 if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
1356 change_bit(BR_FDB_OFFLOADED, &fdb->flags);
1357
1358 spin_unlock_bh(&br->hash_lock);
1359 }
1360
1361 void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
1362 {
1363 struct net_bridge_fdb_entry *f;
1364 struct net_bridge_port *p;
1365
1366 ASSERT_RTNL();
1367
1368 p = br_port_get_rtnl(dev);
1369 if (!p)
1370 return;
1371
1372 spin_lock_bh(&p->br->hash_lock);
1373 hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
1374 if (f->dst == p && f->key.vlan_id == vid)
1375 clear_bit(BR_FDB_OFFLOADED, &f->flags);
1376 }
1377 spin_unlock_bh(&p->br->hash_lock);
1378 }
1379 EXPORT_SYMBOL_GPL(br_fdb_clear_offload);