]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
mtd: nand: atmel: Relax tADL_min constraint
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_switchdev.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
50
51 #include "spectrum.h"
52 #include "core.h"
53 #include "reg.h"
54
55 struct mlxsw_sp_bridge_ops;
56
57 struct mlxsw_sp_bridge {
58 struct mlxsw_sp *mlxsw_sp;
59 struct {
60 struct delayed_work dw;
61 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
62 unsigned int interval; /* ms */
63 } fdb_notify;
64 #define MLXSW_SP_MIN_AGEING_TIME 10
65 #define MLXSW_SP_MAX_AGEING_TIME 1000000
66 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
67 u32 ageing_time;
68 bool vlan_enabled_exists;
69 struct list_head bridges_list;
70 struct list_head mids_list;
71 DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
72 const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
73 const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
74 };
75
76 struct mlxsw_sp_bridge_device {
77 struct net_device *dev;
78 struct list_head list;
79 struct list_head ports_list;
80 u8 vlan_enabled:1,
81 multicast_enabled:1;
82 const struct mlxsw_sp_bridge_ops *ops;
83 };
84
85 struct mlxsw_sp_bridge_port {
86 struct net_device *dev;
87 struct mlxsw_sp_bridge_device *bridge_device;
88 struct list_head list;
89 struct list_head vlans_list;
90 unsigned int ref_count;
91 u8 stp_state;
92 unsigned long flags;
93 bool mrouter;
94 bool lagged;
95 union {
96 u16 lag_id;
97 u16 system_port;
98 };
99 };
100
101 struct mlxsw_sp_bridge_vlan {
102 struct list_head list;
103 struct list_head port_vlan_list;
104 u16 vid;
105 };
106
107 struct mlxsw_sp_bridge_ops {
108 int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
109 struct mlxsw_sp_bridge_port *bridge_port,
110 struct mlxsw_sp_port *mlxsw_sp_port);
111 void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
112 struct mlxsw_sp_bridge_port *bridge_port,
113 struct mlxsw_sp_port *mlxsw_sp_port);
114 struct mlxsw_sp_fid *
115 (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
116 u16 vid);
117 };
118
119 static int
120 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
121 struct mlxsw_sp_bridge_port *bridge_port,
122 u16 fid_index);
123
124 static struct mlxsw_sp_bridge_device *
125 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
126 const struct net_device *br_dev)
127 {
128 struct mlxsw_sp_bridge_device *bridge_device;
129
130 list_for_each_entry(bridge_device, &bridge->bridges_list, list)
131 if (bridge_device->dev == br_dev)
132 return bridge_device;
133
134 return NULL;
135 }
136
137 static struct mlxsw_sp_bridge_device *
138 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
139 struct net_device *br_dev)
140 {
141 struct device *dev = bridge->mlxsw_sp->bus_info->dev;
142 struct mlxsw_sp_bridge_device *bridge_device;
143 bool vlan_enabled = br_vlan_enabled(br_dev);
144
145 if (vlan_enabled && bridge->vlan_enabled_exists) {
146 dev_err(dev, "Only one VLAN-aware bridge is supported\n");
147 return ERR_PTR(-EINVAL);
148 }
149
150 bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
151 if (!bridge_device)
152 return ERR_PTR(-ENOMEM);
153
154 bridge_device->dev = br_dev;
155 bridge_device->vlan_enabled = vlan_enabled;
156 bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
157 INIT_LIST_HEAD(&bridge_device->ports_list);
158 if (vlan_enabled) {
159 bridge->vlan_enabled_exists = true;
160 bridge_device->ops = bridge->bridge_8021q_ops;
161 } else {
162 bridge_device->ops = bridge->bridge_8021d_ops;
163 }
164 list_add(&bridge_device->list, &bridge->bridges_list);
165
166 return bridge_device;
167 }
168
169 static void
170 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
171 struct mlxsw_sp_bridge_device *bridge_device)
172 {
173 list_del(&bridge_device->list);
174 if (bridge_device->vlan_enabled)
175 bridge->vlan_enabled_exists = false;
176 WARN_ON(!list_empty(&bridge_device->ports_list));
177 kfree(bridge_device);
178 }
179
180 static struct mlxsw_sp_bridge_device *
181 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
182 struct net_device *br_dev)
183 {
184 struct mlxsw_sp_bridge_device *bridge_device;
185
186 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
187 if (bridge_device)
188 return bridge_device;
189
190 return mlxsw_sp_bridge_device_create(bridge, br_dev);
191 }
192
193 static void
194 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
195 struct mlxsw_sp_bridge_device *bridge_device)
196 {
197 if (list_empty(&bridge_device->ports_list))
198 mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
199 }
200
201 static struct mlxsw_sp_bridge_port *
202 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
203 const struct net_device *brport_dev)
204 {
205 struct mlxsw_sp_bridge_port *bridge_port;
206
207 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
208 if (bridge_port->dev == brport_dev)
209 return bridge_port;
210 }
211
212 return NULL;
213 }
214
215 static struct mlxsw_sp_bridge_port *
216 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
217 struct net_device *brport_dev)
218 {
219 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
220 struct mlxsw_sp_bridge_device *bridge_device;
221
222 if (!br_dev)
223 return NULL;
224
225 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
226 if (!bridge_device)
227 return NULL;
228
229 return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
230 }
231
232 static struct mlxsw_sp_bridge_port *
233 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
234 struct net_device *brport_dev)
235 {
236 struct mlxsw_sp_bridge_port *bridge_port;
237 struct mlxsw_sp_port *mlxsw_sp_port;
238
239 bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
240 if (!bridge_port)
241 return NULL;
242
243 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
244 bridge_port->lagged = mlxsw_sp_port->lagged;
245 if (bridge_port->lagged)
246 bridge_port->lag_id = mlxsw_sp_port->lag_id;
247 else
248 bridge_port->system_port = mlxsw_sp_port->local_port;
249 bridge_port->dev = brport_dev;
250 bridge_port->bridge_device = bridge_device;
251 bridge_port->stp_state = BR_STATE_DISABLED;
252 bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC;
253 INIT_LIST_HEAD(&bridge_port->vlans_list);
254 list_add(&bridge_port->list, &bridge_device->ports_list);
255 bridge_port->ref_count = 1;
256
257 return bridge_port;
258 }
259
260 static void
261 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
262 {
263 list_del(&bridge_port->list);
264 WARN_ON(!list_empty(&bridge_port->vlans_list));
265 kfree(bridge_port);
266 }
267
268 static bool
269 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
270 bridge_port)
271 {
272 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
273
274 /* In case ports were pulled from out of a bridged LAG, then
275 * it's possible the reference count isn't zero, yet the bridge
276 * port should be destroyed, as it's no longer an upper of ours.
277 */
278 if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
279 return true;
280 else if (bridge_port->ref_count == 0)
281 return true;
282 else
283 return false;
284 }
285
286 static struct mlxsw_sp_bridge_port *
287 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
288 struct net_device *brport_dev)
289 {
290 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
291 struct mlxsw_sp_bridge_device *bridge_device;
292 struct mlxsw_sp_bridge_port *bridge_port;
293 int err;
294
295 bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
296 if (bridge_port) {
297 bridge_port->ref_count++;
298 return bridge_port;
299 }
300
301 bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
302 if (IS_ERR(bridge_device))
303 return ERR_CAST(bridge_device);
304
305 bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
306 if (!bridge_port) {
307 err = -ENOMEM;
308 goto err_bridge_port_create;
309 }
310
311 return bridge_port;
312
313 err_bridge_port_create:
314 mlxsw_sp_bridge_device_put(bridge, bridge_device);
315 return ERR_PTR(err);
316 }
317
318 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
319 struct mlxsw_sp_bridge_port *bridge_port)
320 {
321 struct mlxsw_sp_bridge_device *bridge_device;
322
323 bridge_port->ref_count--;
324 if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
325 return;
326 bridge_device = bridge_port->bridge_device;
327 mlxsw_sp_bridge_port_destroy(bridge_port);
328 mlxsw_sp_bridge_device_put(bridge, bridge_device);
329 }
330
331 static struct mlxsw_sp_port_vlan *
332 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
333 const struct mlxsw_sp_bridge_device *
334 bridge_device,
335 u16 vid)
336 {
337 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
338
339 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
340 list) {
341 if (!mlxsw_sp_port_vlan->bridge_port)
342 continue;
343 if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
344 bridge_device)
345 continue;
346 if (bridge_device->vlan_enabled &&
347 mlxsw_sp_port_vlan->vid != vid)
348 continue;
349 return mlxsw_sp_port_vlan;
350 }
351
352 return NULL;
353 }
354
355 static struct mlxsw_sp_port_vlan*
356 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
357 u16 fid_index)
358 {
359 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
360
361 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
362 list) {
363 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
364
365 if (fid && mlxsw_sp_fid_index(fid) == fid_index)
366 return mlxsw_sp_port_vlan;
367 }
368
369 return NULL;
370 }
371
372 static struct mlxsw_sp_bridge_vlan *
373 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
374 u16 vid)
375 {
376 struct mlxsw_sp_bridge_vlan *bridge_vlan;
377
378 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
379 if (bridge_vlan->vid == vid)
380 return bridge_vlan;
381 }
382
383 return NULL;
384 }
385
386 static struct mlxsw_sp_bridge_vlan *
387 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
388 {
389 struct mlxsw_sp_bridge_vlan *bridge_vlan;
390
391 bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
392 if (!bridge_vlan)
393 return NULL;
394
395 INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
396 bridge_vlan->vid = vid;
397 list_add(&bridge_vlan->list, &bridge_port->vlans_list);
398
399 return bridge_vlan;
400 }
401
402 static void
403 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
404 {
405 list_del(&bridge_vlan->list);
406 WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
407 kfree(bridge_vlan);
408 }
409
410 static struct mlxsw_sp_bridge_vlan *
411 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
412 {
413 struct mlxsw_sp_bridge_vlan *bridge_vlan;
414
415 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
416 if (bridge_vlan)
417 return bridge_vlan;
418
419 return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
420 }
421
422 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
423 {
424 if (list_empty(&bridge_vlan->port_vlan_list))
425 mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
426 }
427
428 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
429 struct net_device *dev,
430 unsigned long *brport_flags)
431 {
432 struct mlxsw_sp_bridge_port *bridge_port;
433
434 bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
435 if (WARN_ON(!bridge_port))
436 return;
437
438 memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
439 }
440
441 static int mlxsw_sp_port_attr_get(struct net_device *dev,
442 struct switchdev_attr *attr)
443 {
444 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
446
447 switch (attr->id) {
448 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
449 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
450 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
451 attr->u.ppid.id_len);
452 break;
453 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
454 mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
455 &attr->u.brport_flags);
456 break;
457 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
458 attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD;
459 break;
460 default:
461 return -EOPNOTSUPP;
462 }
463
464 return 0;
465 }
466
467 static int
468 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
469 struct mlxsw_sp_bridge_vlan *bridge_vlan,
470 u8 state)
471 {
472 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
473
474 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
475 bridge_vlan_node) {
476 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
477 continue;
478 return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
479 bridge_vlan->vid, state);
480 }
481
482 return 0;
483 }
484
485 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
486 struct switchdev_trans *trans,
487 struct net_device *orig_dev,
488 u8 state)
489 {
490 struct mlxsw_sp_bridge_port *bridge_port;
491 struct mlxsw_sp_bridge_vlan *bridge_vlan;
492 int err;
493
494 if (switchdev_trans_ph_prepare(trans))
495 return 0;
496
497 /* It's possible we failed to enslave the port, yet this
498 * operation is executed due to it being deferred.
499 */
500 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
501 orig_dev);
502 if (!bridge_port)
503 return 0;
504
505 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
506 err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
507 bridge_vlan, state);
508 if (err)
509 goto err_port_bridge_vlan_stp_set;
510 }
511
512 bridge_port->stp_state = state;
513
514 return 0;
515
516 err_port_bridge_vlan_stp_set:
517 list_for_each_entry_continue_reverse(bridge_vlan,
518 &bridge_port->vlans_list, list)
519 mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
520 bridge_port->stp_state);
521 return err;
522 }
523
524 static int
525 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
526 struct mlxsw_sp_bridge_vlan *bridge_vlan,
527 enum mlxsw_sp_flood_type packet_type,
528 bool member)
529 {
530 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
531
532 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
533 bridge_vlan_node) {
534 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
535 continue;
536 return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
537 packet_type,
538 mlxsw_sp_port->local_port,
539 member);
540 }
541
542 return 0;
543 }
544
545 static int
546 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
547 struct mlxsw_sp_bridge_port *bridge_port,
548 enum mlxsw_sp_flood_type packet_type,
549 bool member)
550 {
551 struct mlxsw_sp_bridge_vlan *bridge_vlan;
552 int err;
553
554 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
555 err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
556 bridge_vlan,
557 packet_type,
558 member);
559 if (err)
560 goto err_port_bridge_vlan_flood_set;
561 }
562
563 return 0;
564
565 err_port_bridge_vlan_flood_set:
566 list_for_each_entry_continue_reverse(bridge_vlan,
567 &bridge_port->vlans_list, list)
568 mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
569 packet_type, !member);
570 return err;
571 }
572
573 static int
574 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
575 struct mlxsw_sp_bridge_vlan *bridge_vlan,
576 bool set)
577 {
578 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
579 u16 vid = bridge_vlan->vid;
580
581 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
582 bridge_vlan_node) {
583 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
584 continue;
585 return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
586 }
587
588 return 0;
589 }
590
591 static int
592 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
593 struct mlxsw_sp_bridge_port *bridge_port,
594 bool set)
595 {
596 struct mlxsw_sp_bridge_vlan *bridge_vlan;
597 int err;
598
599 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
600 err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
601 bridge_vlan, set);
602 if (err)
603 goto err_port_bridge_vlan_learning_set;
604 }
605
606 return 0;
607
608 err_port_bridge_vlan_learning_set:
609 list_for_each_entry_continue_reverse(bridge_vlan,
610 &bridge_port->vlans_list, list)
611 mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
612 bridge_vlan, !set);
613 return err;
614 }
615
616 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
617 struct switchdev_trans *trans,
618 struct net_device *orig_dev,
619 unsigned long brport_flags)
620 {
621 struct mlxsw_sp_bridge_port *bridge_port;
622 int err;
623
624 if (switchdev_trans_ph_prepare(trans))
625 return 0;
626
627 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
628 orig_dev);
629 if (WARN_ON(!bridge_port))
630 return -EINVAL;
631
632 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
633 MLXSW_SP_FLOOD_TYPE_UC,
634 brport_flags & BR_FLOOD);
635 if (err)
636 return err;
637
638 err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
639 brport_flags & BR_LEARNING);
640 if (err)
641 return err;
642
643 memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
644
645 return 0;
646 }
647
648 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
649 {
650 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
651 int err;
652
653 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
654 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
655 if (err)
656 return err;
657 mlxsw_sp->bridge->ageing_time = ageing_time;
658 return 0;
659 }
660
661 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
662 struct switchdev_trans *trans,
663 unsigned long ageing_clock_t)
664 {
665 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
666 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
667 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
668
669 if (switchdev_trans_ph_prepare(trans)) {
670 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
671 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
672 return -ERANGE;
673 else
674 return 0;
675 }
676
677 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
678 }
679
680 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
681 struct switchdev_trans *trans,
682 struct net_device *orig_dev,
683 bool vlan_enabled)
684 {
685 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
686 struct mlxsw_sp_bridge_device *bridge_device;
687
688 if (!switchdev_trans_ph_prepare(trans))
689 return 0;
690
691 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
692 if (WARN_ON(!bridge_device))
693 return -EINVAL;
694
695 if (bridge_device->vlan_enabled == vlan_enabled)
696 return 0;
697
698 netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
699 return -EINVAL;
700 }
701
702 static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
703 struct switchdev_trans *trans,
704 struct net_device *orig_dev,
705 bool is_port_mc_router)
706 {
707 struct mlxsw_sp_bridge_port *bridge_port;
708
709 if (switchdev_trans_ph_prepare(trans))
710 return 0;
711
712 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
713 orig_dev);
714 if (WARN_ON(!bridge_port))
715 return -EINVAL;
716
717 if (!bridge_port->bridge_device->multicast_enabled)
718 return 0;
719
720 return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
721 MLXSW_SP_FLOOD_TYPE_MC,
722 is_port_mc_router);
723 }
724
725 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
726 struct switchdev_trans *trans,
727 struct net_device *orig_dev,
728 bool mc_disabled)
729 {
730 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
731 struct mlxsw_sp_bridge_device *bridge_device;
732 struct mlxsw_sp_bridge_port *bridge_port;
733 int err;
734
735 if (switchdev_trans_ph_prepare(trans))
736 return 0;
737
738 /* It's possible we failed to enslave the port, yet this
739 * operation is executed due to it being deferred.
740 */
741 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
742 if (!bridge_device)
743 return 0;
744
745 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
746 enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
747 bool member = mc_disabled ? true : bridge_port->mrouter;
748
749 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
750 bridge_port,
751 packet_type, member);
752 if (err)
753 return err;
754 }
755
756 bridge_device->multicast_enabled = !mc_disabled;
757
758 return 0;
759 }
760
761 static int mlxsw_sp_port_attr_set(struct net_device *dev,
762 const struct switchdev_attr *attr,
763 struct switchdev_trans *trans)
764 {
765 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
766 int err;
767
768 switch (attr->id) {
769 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
770 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
771 attr->orig_dev,
772 attr->u.stp_state);
773 break;
774 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
775 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
776 attr->orig_dev,
777 attr->u.brport_flags);
778 break;
779 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
780 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
781 attr->u.ageing_time);
782 break;
783 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
784 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
785 attr->orig_dev,
786 attr->u.vlan_filtering);
787 break;
788 case SWITCHDEV_ATTR_ID_PORT_MROUTER:
789 err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
790 attr->orig_dev,
791 attr->u.mrouter);
792 break;
793 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
794 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
795 attr->orig_dev,
796 attr->u.mc_disabled);
797 break;
798 default:
799 err = -EOPNOTSUPP;
800 break;
801 }
802
803 return err;
804 }
805
806 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
807 {
808 const struct mlxsw_sp_bridge_device *bridge_device;
809
810 bridge_device = bridge_port->bridge_device;
811 return !bridge_device->multicast_enabled ? true : bridge_port->mrouter;
812 }
813
814 static int
815 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
816 struct mlxsw_sp_bridge_port *bridge_port)
817 {
818 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
819 struct mlxsw_sp_bridge_device *bridge_device;
820 u8 local_port = mlxsw_sp_port->local_port;
821 u16 vid = mlxsw_sp_port_vlan->vid;
822 struct mlxsw_sp_fid *fid;
823 int err;
824
825 bridge_device = bridge_port->bridge_device;
826 fid = bridge_device->ops->fid_get(bridge_device, vid);
827 if (IS_ERR(fid))
828 return PTR_ERR(fid);
829
830 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
831 bridge_port->flags & BR_FLOOD);
832 if (err)
833 goto err_fid_uc_flood_set;
834
835 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
836 mlxsw_sp_mc_flood(bridge_port));
837 if (err)
838 goto err_fid_mc_flood_set;
839
840 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
841 true);
842 if (err)
843 goto err_fid_bc_flood_set;
844
845 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
846 if (err)
847 goto err_fid_port_vid_map;
848
849 mlxsw_sp_port_vlan->fid = fid;
850
851 return 0;
852
853 err_fid_port_vid_map:
854 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
855 err_fid_bc_flood_set:
856 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
857 err_fid_mc_flood_set:
858 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
859 err_fid_uc_flood_set:
860 mlxsw_sp_fid_put(fid);
861 return err;
862 }
863
864 static void
865 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
866 {
867 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
868 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
869 u8 local_port = mlxsw_sp_port->local_port;
870 u16 vid = mlxsw_sp_port_vlan->vid;
871
872 mlxsw_sp_port_vlan->fid = NULL;
873 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
874 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
875 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
876 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
877 mlxsw_sp_fid_put(fid);
878 }
879
880 static u16
881 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
882 u16 vid, bool is_pvid)
883 {
884 if (is_pvid)
885 return vid;
886 else if (mlxsw_sp_port->pvid == vid)
887 return 0; /* Dis-allow untagged packets */
888 else
889 return mlxsw_sp_port->pvid;
890 }
891
892 static int
893 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
894 struct mlxsw_sp_bridge_port *bridge_port)
895 {
896 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
897 struct mlxsw_sp_bridge_vlan *bridge_vlan;
898 u16 vid = mlxsw_sp_port_vlan->vid;
899 int err;
900
901 /* No need to continue if only VLAN flags were changed */
902 if (mlxsw_sp_port_vlan->bridge_port)
903 return 0;
904
905 err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
906 if (err)
907 return err;
908
909 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
910 bridge_port->flags & BR_LEARNING);
911 if (err)
912 goto err_port_vid_learning_set;
913
914 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
915 bridge_port->stp_state);
916 if (err)
917 goto err_port_vid_stp_set;
918
919 bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
920 if (!bridge_vlan) {
921 err = -ENOMEM;
922 goto err_bridge_vlan_get;
923 }
924
925 list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
926 &bridge_vlan->port_vlan_list);
927
928 mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
929 bridge_port->dev);
930 mlxsw_sp_port_vlan->bridge_port = bridge_port;
931
932 return 0;
933
934 err_bridge_vlan_get:
935 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
936 err_port_vid_stp_set:
937 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
938 err_port_vid_learning_set:
939 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
940 return err;
941 }
942
943 void
944 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
945 {
946 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
947 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
948 struct mlxsw_sp_bridge_vlan *bridge_vlan;
949 struct mlxsw_sp_bridge_port *bridge_port;
950 u16 vid = mlxsw_sp_port_vlan->vid;
951 bool last;
952
953 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
954 mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
955 return;
956
957 bridge_port = mlxsw_sp_port_vlan->bridge_port;
958 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
959 last = list_is_singular(&bridge_vlan->port_vlan_list);
960
961 list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
962 mlxsw_sp_bridge_vlan_put(bridge_vlan);
963 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
964 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
965 if (last)
966 mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
967 bridge_port,
968 mlxsw_sp_fid_index(fid));
969 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
970
971 mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
972 mlxsw_sp_port_vlan->bridge_port = NULL;
973 }
974
975 static int
976 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
977 struct mlxsw_sp_bridge_port *bridge_port,
978 u16 vid, bool is_untagged, bool is_pvid)
979 {
980 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
981 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
982 u16 old_pvid = mlxsw_sp_port->pvid;
983 int err;
984
985 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
986 if (IS_ERR(mlxsw_sp_port_vlan))
987 return PTR_ERR(mlxsw_sp_port_vlan);
988
989 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
990 is_untagged);
991 if (err)
992 goto err_port_vlan_set;
993
994 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
995 if (err)
996 goto err_port_pvid_set;
997
998 err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
999 if (err)
1000 goto err_port_vlan_bridge_join;
1001
1002 return 0;
1003
1004 err_port_vlan_bridge_join:
1005 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1006 err_port_pvid_set:
1007 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1008 err_port_vlan_set:
1009 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1010 return err;
1011 }
1012
1013 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1014 const struct switchdev_obj_port_vlan *vlan,
1015 struct switchdev_trans *trans)
1016 {
1017 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1018 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1019 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1020 struct net_device *orig_dev = vlan->obj.orig_dev;
1021 struct mlxsw_sp_bridge_port *bridge_port;
1022 u16 vid;
1023
1024 if (switchdev_trans_ph_prepare(trans))
1025 return 0;
1026
1027 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1028 if (WARN_ON(!bridge_port))
1029 return -EINVAL;
1030
1031 if (!bridge_port->bridge_device->vlan_enabled)
1032 return 0;
1033
1034 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1035 int err;
1036
1037 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1038 vid, flag_untagged,
1039 flag_pvid);
1040 if (err)
1041 return err;
1042 }
1043
1044 return 0;
1045 }
1046
1047 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1048 {
1049 return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1050 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1051 }
1052
1053 static int
1054 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1055 struct mlxsw_sp_bridge_port *bridge_port,
1056 u16 fid_index)
1057 {
1058 bool lagged = bridge_port->lagged;
1059 char sfdf_pl[MLXSW_REG_SFDF_LEN];
1060 u16 system_port;
1061
1062 system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1063 mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1064 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1065 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1066
1067 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1068 }
1069
1070 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1071 {
1072 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1073 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
1074 }
1075
1076 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1077 {
1078 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1079 MLXSW_REG_SFD_OP_WRITE_REMOVE;
1080 }
1081
1082 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1083 const char *mac, u16 fid, bool adding,
1084 enum mlxsw_reg_sfd_rec_action action,
1085 bool dynamic)
1086 {
1087 char *sfd_pl;
1088 int err;
1089
1090 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1091 if (!sfd_pl)
1092 return -ENOMEM;
1093
1094 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1095 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1096 mac, fid, action, local_port);
1097 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1098 kfree(sfd_pl);
1099
1100 return err;
1101 }
1102
1103 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1104 const char *mac, u16 fid, bool adding,
1105 bool dynamic)
1106 {
1107 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1108 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
1109 }
1110
1111 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1112 bool adding)
1113 {
1114 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1115 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1116 false);
1117 }
1118
1119 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1120 const char *mac, u16 fid, u16 lag_vid,
1121 bool adding, bool dynamic)
1122 {
1123 char *sfd_pl;
1124 int err;
1125
1126 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1127 if (!sfd_pl)
1128 return -ENOMEM;
1129
1130 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1131 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1132 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1133 lag_vid, lag_id);
1134 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1135 kfree(sfd_pl);
1136
1137 return err;
1138 }
1139
1140 static int
1141 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1142 struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1143 {
1144 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1145 struct net_device *orig_dev = fdb_info->info.dev;
1146 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1147 struct mlxsw_sp_bridge_device *bridge_device;
1148 struct mlxsw_sp_bridge_port *bridge_port;
1149 u16 fid_index, vid;
1150
1151 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1152 if (!bridge_port)
1153 return -EINVAL;
1154
1155 bridge_device = bridge_port->bridge_device;
1156 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1157 bridge_device,
1158 fdb_info->vid);
1159 if (!mlxsw_sp_port_vlan)
1160 return 0;
1161
1162 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1163 vid = mlxsw_sp_port_vlan->vid;
1164
1165 if (!bridge_port->lagged)
1166 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1167 bridge_port->system_port,
1168 fdb_info->addr, fid_index,
1169 adding, false);
1170 else
1171 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1172 bridge_port->lag_id,
1173 fdb_info->addr, fid_index,
1174 vid, adding, false);
1175 }
1176
1177 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1178 u16 fid, u16 mid, bool adding)
1179 {
1180 char *sfd_pl;
1181 int err;
1182
1183 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1184 if (!sfd_pl)
1185 return -ENOMEM;
1186
1187 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1188 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1189 MLXSW_REG_SFD_REC_ACTION_NOP, mid);
1190 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1191 kfree(sfd_pl);
1192 return err;
1193 }
1194
1195 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
1196 bool add, bool clear_all_ports)
1197 {
1198 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1199 char *smid_pl;
1200 int err, i;
1201
1202 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1203 if (!smid_pl)
1204 return -ENOMEM;
1205
1206 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
1207 if (clear_all_ports) {
1208 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
1209 if (mlxsw_sp->ports[i])
1210 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1211 }
1212 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1213 kfree(smid_pl);
1214 return err;
1215 }
1216
1217 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
1218 const unsigned char *addr,
1219 u16 fid)
1220 {
1221 struct mlxsw_sp_mid *mid;
1222
1223 list_for_each_entry(mid, &mlxsw_sp->bridge->mids_list, list) {
1224 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1225 return mid;
1226 }
1227 return NULL;
1228 }
1229
1230 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1231 const unsigned char *addr,
1232 u16 fid)
1233 {
1234 struct mlxsw_sp_mid *mid;
1235 u16 mid_idx;
1236
1237 mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1238 MLXSW_SP_MID_MAX);
1239 if (mid_idx == MLXSW_SP_MID_MAX)
1240 return NULL;
1241
1242 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1243 if (!mid)
1244 return NULL;
1245
1246 set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1247 ether_addr_copy(mid->addr, addr);
1248 mid->fid = fid;
1249 mid->mid = mid_idx;
1250 mid->ref_count = 0;
1251 list_add_tail(&mid->list, &mlxsw_sp->bridge->mids_list);
1252
1253 return mid;
1254 }
1255
1256 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
1257 struct mlxsw_sp_mid *mid)
1258 {
1259 if (--mid->ref_count == 0) {
1260 list_del(&mid->list);
1261 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1262 kfree(mid);
1263 return 1;
1264 }
1265 return 0;
1266 }
1267
1268 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1269 const struct switchdev_obj_port_mdb *mdb,
1270 struct switchdev_trans *trans)
1271 {
1272 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1273 struct net_device *orig_dev = mdb->obj.orig_dev;
1274 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1275 struct net_device *dev = mlxsw_sp_port->dev;
1276 struct mlxsw_sp_bridge_device *bridge_device;
1277 struct mlxsw_sp_bridge_port *bridge_port;
1278 struct mlxsw_sp_mid *mid;
1279 u16 fid_index;
1280 int err = 0;
1281
1282 if (switchdev_trans_ph_prepare(trans))
1283 return 0;
1284
1285 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1286 if (WARN_ON(!bridge_port))
1287 return -EINVAL;
1288
1289 bridge_device = bridge_port->bridge_device;
1290 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1291 bridge_device,
1292 mdb->vid);
1293 if (WARN_ON(!mlxsw_sp_port_vlan))
1294 return -EINVAL;
1295
1296 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1297
1298 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
1299 if (!mid) {
1300 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid_index);
1301 if (!mid) {
1302 netdev_err(dev, "Unable to allocate MC group\n");
1303 return -ENOMEM;
1304 }
1305 }
1306 mid->ref_count++;
1307
1308 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
1309 mid->ref_count == 1);
1310 if (err) {
1311 netdev_err(dev, "Unable to set SMID\n");
1312 goto err_out;
1313 }
1314
1315 if (mid->ref_count == 1) {
1316 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
1317 mid->mid, true);
1318 if (err) {
1319 netdev_err(dev, "Unable to set MC SFD\n");
1320 goto err_out;
1321 }
1322 }
1323
1324 return 0;
1325
1326 err_out:
1327 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
1328 return err;
1329 }
1330
1331 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1332 const struct switchdev_obj *obj,
1333 struct switchdev_trans *trans)
1334 {
1335 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1336 int err = 0;
1337
1338 switch (obj->id) {
1339 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1340 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
1341 SWITCHDEV_OBJ_PORT_VLAN(obj),
1342 trans);
1343 break;
1344 case SWITCHDEV_OBJ_ID_PORT_MDB:
1345 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1346 SWITCHDEV_OBJ_PORT_MDB(obj),
1347 trans);
1348 break;
1349 default:
1350 err = -EOPNOTSUPP;
1351 break;
1352 }
1353
1354 return err;
1355 }
1356
1357 static void
1358 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1359 struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1360 {
1361 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
1362 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1363
1364 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1365 if (WARN_ON(!mlxsw_sp_port_vlan))
1366 return;
1367
1368 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1369 mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1370 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1371 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1372 }
1373
1374 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1375 const struct switchdev_obj_port_vlan *vlan)
1376 {
1377 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1378 struct net_device *orig_dev = vlan->obj.orig_dev;
1379 struct mlxsw_sp_bridge_port *bridge_port;
1380 u16 vid;
1381
1382 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1383 if (WARN_ON(!bridge_port))
1384 return -EINVAL;
1385
1386 if (!bridge_port->bridge_device->vlan_enabled)
1387 return 0;
1388
1389 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1390 mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1391
1392 return 0;
1393 }
1394
1395 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1396 const struct switchdev_obj_port_mdb *mdb)
1397 {
1398 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1399 struct net_device *orig_dev = mdb->obj.orig_dev;
1400 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1401 struct mlxsw_sp_bridge_device *bridge_device;
1402 struct net_device *dev = mlxsw_sp_port->dev;
1403 struct mlxsw_sp_bridge_port *bridge_port;
1404 struct mlxsw_sp_mid *mid;
1405 u16 fid_index;
1406 u16 mid_idx;
1407 int err = 0;
1408
1409 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1410 if (WARN_ON(!bridge_port))
1411 return -EINVAL;
1412
1413 bridge_device = bridge_port->bridge_device;
1414 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1415 bridge_device,
1416 mdb->vid);
1417 if (WARN_ON(!mlxsw_sp_port_vlan))
1418 return -EINVAL;
1419
1420 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1421
1422 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index);
1423 if (!mid) {
1424 netdev_err(dev, "Unable to remove port from MC DB\n");
1425 return -EINVAL;
1426 }
1427
1428 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1429 if (err)
1430 netdev_err(dev, "Unable to remove port from SMID\n");
1431
1432 mid_idx = mid->mid;
1433 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1434 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index,
1435 mid_idx, false);
1436 if (err)
1437 netdev_err(dev, "Unable to remove MC SFD\n");
1438 }
1439
1440 return err;
1441 }
1442
1443 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1444 const struct switchdev_obj *obj)
1445 {
1446 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1447 int err = 0;
1448
1449 switch (obj->id) {
1450 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1451 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1452 SWITCHDEV_OBJ_PORT_VLAN(obj));
1453 break;
1454 case SWITCHDEV_OBJ_ID_PORT_MDB:
1455 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1456 SWITCHDEV_OBJ_PORT_MDB(obj));
1457 break;
1458 default:
1459 err = -EOPNOTSUPP;
1460 break;
1461 }
1462
1463 return err;
1464 }
1465
1466 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1467 u16 lag_id)
1468 {
1469 struct mlxsw_sp_port *mlxsw_sp_port;
1470 u64 max_lag_members;
1471 int i;
1472
1473 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1474 MAX_LAG_MEMBERS);
1475 for (i = 0; i < max_lag_members; i++) {
1476 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1477 if (mlxsw_sp_port)
1478 return mlxsw_sp_port;
1479 }
1480 return NULL;
1481 }
1482
1483 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1484 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
1485 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
1486 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
1487 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
1488 };
1489
1490 static int
1491 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1492 struct mlxsw_sp_bridge_port *bridge_port,
1493 struct mlxsw_sp_port *mlxsw_sp_port)
1494 {
1495 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1496
1497 if (is_vlan_dev(bridge_port->dev))
1498 return -EINVAL;
1499
1500 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
1501 if (WARN_ON(!mlxsw_sp_port_vlan))
1502 return -EINVAL;
1503
1504 /* Let VLAN-aware bridge take care of its own VLANs */
1505 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1506
1507 return 0;
1508 }
1509
1510 static void
1511 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1512 struct mlxsw_sp_bridge_port *bridge_port,
1513 struct mlxsw_sp_port *mlxsw_sp_port)
1514 {
1515 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
1516 /* Make sure untagged frames are allowed to ingress */
1517 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
1518 }
1519
1520 static struct mlxsw_sp_fid *
1521 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
1522 u16 vid)
1523 {
1524 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
1525
1526 return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
1527 }
1528
1529 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
1530 .port_join = mlxsw_sp_bridge_8021q_port_join,
1531 .port_leave = mlxsw_sp_bridge_8021q_port_leave,
1532 .fid_get = mlxsw_sp_bridge_8021q_fid_get,
1533 };
1534
1535 static bool
1536 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
1537 const struct net_device *br_dev)
1538 {
1539 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1540
1541 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
1542 list) {
1543 if (mlxsw_sp_port_vlan->bridge_port &&
1544 mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
1545 br_dev)
1546 return true;
1547 }
1548
1549 return false;
1550 }
1551
1552 static int
1553 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1554 struct mlxsw_sp_bridge_port *bridge_port,
1555 struct mlxsw_sp_port *mlxsw_sp_port)
1556 {
1557 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1558 u16 vid;
1559
1560 if (!is_vlan_dev(bridge_port->dev))
1561 return -EINVAL;
1562 vid = vlan_dev_vlan_id(bridge_port->dev);
1563
1564 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1565 if (WARN_ON(!mlxsw_sp_port_vlan))
1566 return -EINVAL;
1567
1568 if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
1569 netdev_err(mlxsw_sp_port->dev, "Can't bridge VLAN uppers of the same port\n");
1570 return -EINVAL;
1571 }
1572
1573 /* Port is no longer usable as a router interface */
1574 if (mlxsw_sp_port_vlan->fid)
1575 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1576
1577 return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1578 }
1579
1580 static void
1581 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1582 struct mlxsw_sp_bridge_port *bridge_port,
1583 struct mlxsw_sp_port *mlxsw_sp_port)
1584 {
1585 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1586 u16 vid = vlan_dev_vlan_id(bridge_port->dev);
1587
1588 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1589 if (WARN_ON(!mlxsw_sp_port_vlan))
1590 return;
1591
1592 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1593 }
1594
1595 static struct mlxsw_sp_fid *
1596 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
1597 u16 vid)
1598 {
1599 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
1600
1601 return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
1602 }
1603
1604 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
1605 .port_join = mlxsw_sp_bridge_8021d_port_join,
1606 .port_leave = mlxsw_sp_bridge_8021d_port_leave,
1607 .fid_get = mlxsw_sp_bridge_8021d_fid_get,
1608 };
1609
1610 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
1611 struct net_device *brport_dev,
1612 struct net_device *br_dev)
1613 {
1614 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1615 struct mlxsw_sp_bridge_device *bridge_device;
1616 struct mlxsw_sp_bridge_port *bridge_port;
1617 int err;
1618
1619 bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
1620 if (IS_ERR(bridge_port))
1621 return PTR_ERR(bridge_port);
1622 bridge_device = bridge_port->bridge_device;
1623
1624 err = bridge_device->ops->port_join(bridge_device, bridge_port,
1625 mlxsw_sp_port);
1626 if (err)
1627 goto err_port_join;
1628
1629 return 0;
1630
1631 err_port_join:
1632 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
1633 return err;
1634 }
1635
1636 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
1637 struct net_device *brport_dev,
1638 struct net_device *br_dev)
1639 {
1640 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1641 struct mlxsw_sp_bridge_device *bridge_device;
1642 struct mlxsw_sp_bridge_port *bridge_port;
1643
1644 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
1645 if (!bridge_device)
1646 return;
1647 bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
1648 if (!bridge_port)
1649 return;
1650
1651 bridge_device->ops->port_leave(bridge_device, bridge_port,
1652 mlxsw_sp_port);
1653 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
1654 }
1655
1656 static void
1657 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
1658 const char *mac, u16 vid,
1659 struct net_device *dev)
1660 {
1661 struct switchdev_notifier_fdb_info info;
1662
1663 info.addr = mac;
1664 info.vid = vid;
1665 call_switchdev_notifiers(type, dev, &info.info);
1666 }
1667
1668 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1669 char *sfn_pl, int rec_index,
1670 bool adding)
1671 {
1672 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1673 struct mlxsw_sp_bridge_device *bridge_device;
1674 struct mlxsw_sp_bridge_port *bridge_port;
1675 struct mlxsw_sp_port *mlxsw_sp_port;
1676 enum switchdev_notifier_type type;
1677 char mac[ETH_ALEN];
1678 u8 local_port;
1679 u16 vid, fid;
1680 bool do_notification = true;
1681 int err;
1682
1683 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1684 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1685 if (!mlxsw_sp_port) {
1686 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1687 goto just_remove;
1688 }
1689
1690 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
1691 if (!mlxsw_sp_port_vlan) {
1692 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
1693 goto just_remove;
1694 }
1695
1696 bridge_port = mlxsw_sp_port_vlan->bridge_port;
1697 if (!bridge_port) {
1698 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
1699 goto just_remove;
1700 }
1701
1702 bridge_device = bridge_port->bridge_device;
1703 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
1704
1705 do_fdb_op:
1706 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1707 adding, true);
1708 if (err) {
1709 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1710 return;
1711 }
1712
1713 if (!do_notification)
1714 return;
1715 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
1716 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
1717
1718 return;
1719
1720 just_remove:
1721 adding = false;
1722 do_notification = false;
1723 goto do_fdb_op;
1724 }
1725
1726 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1727 char *sfn_pl, int rec_index,
1728 bool adding)
1729 {
1730 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1731 struct mlxsw_sp_bridge_device *bridge_device;
1732 struct mlxsw_sp_bridge_port *bridge_port;
1733 struct mlxsw_sp_port *mlxsw_sp_port;
1734 enum switchdev_notifier_type type;
1735 char mac[ETH_ALEN];
1736 u16 lag_vid = 0;
1737 u16 lag_id;
1738 u16 vid, fid;
1739 bool do_notification = true;
1740 int err;
1741
1742 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1743 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1744 if (!mlxsw_sp_port) {
1745 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1746 goto just_remove;
1747 }
1748
1749 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
1750 if (!mlxsw_sp_port_vlan) {
1751 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
1752 goto just_remove;
1753 }
1754
1755 bridge_port = mlxsw_sp_port_vlan->bridge_port;
1756 if (!bridge_port) {
1757 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
1758 goto just_remove;
1759 }
1760
1761 bridge_device = bridge_port->bridge_device;
1762 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
1763 lag_vid = mlxsw_sp_port_vlan->vid;
1764
1765 do_fdb_op:
1766 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1767 adding, true);
1768 if (err) {
1769 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1770 return;
1771 }
1772
1773 if (!do_notification)
1774 return;
1775 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
1776 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
1777
1778 return;
1779
1780 just_remove:
1781 adding = false;
1782 do_notification = false;
1783 goto do_fdb_op;
1784 }
1785
1786 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1787 char *sfn_pl, int rec_index)
1788 {
1789 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1790 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1791 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1792 rec_index, true);
1793 break;
1794 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1795 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1796 rec_index, false);
1797 break;
1798 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1799 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1800 rec_index, true);
1801 break;
1802 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1803 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1804 rec_index, false);
1805 break;
1806 }
1807 }
1808
1809 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1810 {
1811 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
1812
1813 mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
1814 msecs_to_jiffies(bridge->fdb_notify.interval));
1815 }
1816
1817 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1818 {
1819 struct mlxsw_sp_bridge *bridge;
1820 struct mlxsw_sp *mlxsw_sp;
1821 char *sfn_pl;
1822 u8 num_rec;
1823 int i;
1824 int err;
1825
1826 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1827 if (!sfn_pl)
1828 return;
1829
1830 bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
1831 mlxsw_sp = bridge->mlxsw_sp;
1832
1833 rtnl_lock();
1834 mlxsw_reg_sfn_pack(sfn_pl);
1835 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1836 if (err) {
1837 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1838 goto out;
1839 }
1840 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1841 for (i = 0; i < num_rec; i++)
1842 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1843
1844 out:
1845 rtnl_unlock();
1846 kfree(sfn_pl);
1847 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1848 }
1849
1850 struct mlxsw_sp_switchdev_event_work {
1851 struct work_struct work;
1852 struct switchdev_notifier_fdb_info fdb_info;
1853 struct net_device *dev;
1854 unsigned long event;
1855 };
1856
1857 static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
1858 {
1859 struct mlxsw_sp_switchdev_event_work *switchdev_work =
1860 container_of(work, struct mlxsw_sp_switchdev_event_work, work);
1861 struct net_device *dev = switchdev_work->dev;
1862 struct switchdev_notifier_fdb_info *fdb_info;
1863 struct mlxsw_sp_port *mlxsw_sp_port;
1864 int err;
1865
1866 rtnl_lock();
1867 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
1868 if (!mlxsw_sp_port)
1869 goto out;
1870
1871 switch (switchdev_work->event) {
1872 case SWITCHDEV_FDB_ADD_TO_DEVICE:
1873 fdb_info = &switchdev_work->fdb_info;
1874 err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
1875 if (err)
1876 break;
1877 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
1878 fdb_info->addr,
1879 fdb_info->vid, dev);
1880 break;
1881 case SWITCHDEV_FDB_DEL_TO_DEVICE:
1882 fdb_info = &switchdev_work->fdb_info;
1883 mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
1884 break;
1885 }
1886
1887 out:
1888 rtnl_unlock();
1889 kfree(switchdev_work->fdb_info.addr);
1890 kfree(switchdev_work);
1891 dev_put(dev);
1892 }
1893
1894 /* Called under rcu_read_lock() */
1895 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
1896 unsigned long event, void *ptr)
1897 {
1898 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
1899 struct mlxsw_sp_switchdev_event_work *switchdev_work;
1900 struct switchdev_notifier_fdb_info *fdb_info = ptr;
1901
1902 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
1903 return NOTIFY_DONE;
1904
1905 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
1906 if (!switchdev_work)
1907 return NOTIFY_BAD;
1908
1909 INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
1910 switchdev_work->dev = dev;
1911 switchdev_work->event = event;
1912
1913 switch (event) {
1914 case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
1915 case SWITCHDEV_FDB_DEL_TO_DEVICE:
1916 memcpy(&switchdev_work->fdb_info, ptr,
1917 sizeof(switchdev_work->fdb_info));
1918 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
1919 if (!switchdev_work->fdb_info.addr)
1920 goto err_addr_alloc;
1921 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
1922 fdb_info->addr);
1923 /* Take a reference on the device. This can be either
1924 * upper device containig mlxsw_sp_port or just a
1925 * mlxsw_sp_port
1926 */
1927 dev_hold(dev);
1928 break;
1929 default:
1930 kfree(switchdev_work);
1931 return NOTIFY_DONE;
1932 }
1933
1934 mlxsw_core_schedule_work(&switchdev_work->work);
1935
1936 return NOTIFY_DONE;
1937
1938 err_addr_alloc:
1939 kfree(switchdev_work);
1940 return NOTIFY_BAD;
1941 }
1942
1943 static struct notifier_block mlxsw_sp_switchdev_notifier = {
1944 .notifier_call = mlxsw_sp_switchdev_event,
1945 };
1946
1947 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1948 {
1949 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
1950 int err;
1951
1952 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1953 if (err) {
1954 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1955 return err;
1956 }
1957
1958 err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
1959 if (err) {
1960 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
1961 return err;
1962 }
1963
1964 INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1965 bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1966 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1967 return 0;
1968 }
1969
1970 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1971 {
1972 cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
1973 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
1974
1975 }
1976
1977 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1978 {
1979 struct mlxsw_sp_bridge *bridge;
1980
1981 bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
1982 if (!bridge)
1983 return -ENOMEM;
1984 mlxsw_sp->bridge = bridge;
1985 bridge->mlxsw_sp = mlxsw_sp;
1986
1987 INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
1988 INIT_LIST_HEAD(&mlxsw_sp->bridge->mids_list);
1989
1990 bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
1991 bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
1992
1993 return mlxsw_sp_fdb_init(mlxsw_sp);
1994 }
1995
1996 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1997 {
1998 mlxsw_sp_fdb_fini(mlxsw_sp);
1999 WARN_ON(!list_empty(&mlxsw_sp->bridge->mids_list));
2000 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
2001 kfree(mlxsw_sp->bridge);
2002 }
2003
2004 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
2005 {
2006 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
2007 }
2008
2009 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
2010 {
2011 }