]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
Merge branch 'scsi-target-for-v4.10' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_switchdev.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
50
51 #include "spectrum.h"
52 #include "core.h"
53 #include "reg.h"
54
55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
56 u16 vid)
57 {
58 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
59 u16 fid = vid;
60
61 fid = f ? f->fid : fid;
62
63 if (!fid)
64 fid = mlxsw_sp_port->pvid;
65
66 return fid;
67 }
68
69 static struct mlxsw_sp_port *
70 mlxsw_sp_port_orig_get(struct net_device *dev,
71 struct mlxsw_sp_port *mlxsw_sp_port)
72 {
73 struct mlxsw_sp_port *mlxsw_sp_vport;
74 u16 vid;
75
76 if (!is_vlan_dev(dev))
77 return mlxsw_sp_port;
78
79 vid = vlan_dev_vlan_id(dev);
80 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
81 WARN_ON(!mlxsw_sp_vport);
82
83 return mlxsw_sp_vport;
84 }
85
86 static int mlxsw_sp_port_attr_get(struct net_device *dev,
87 struct switchdev_attr *attr)
88 {
89 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
90 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
91
92 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
93 if (!mlxsw_sp_port)
94 return -EINVAL;
95
96 switch (attr->id) {
97 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
98 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
99 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
100 attr->u.ppid.id_len);
101 break;
102 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
103 attr->u.brport_flags =
104 (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
105 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
106 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
107 break;
108 default:
109 return -EOPNOTSUPP;
110 }
111
112 return 0;
113 }
114
115 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
116 u8 state)
117 {
118 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
119 enum mlxsw_reg_spms_state spms_state;
120 char *spms_pl;
121 u16 vid;
122 int err;
123
124 switch (state) {
125 case BR_STATE_FORWARDING:
126 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
127 break;
128 case BR_STATE_LEARNING:
129 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
130 break;
131 case BR_STATE_LISTENING: /* fall-through */
132 case BR_STATE_DISABLED: /* fall-through */
133 case BR_STATE_BLOCKING:
134 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
135 break;
136 default:
137 BUG();
138 }
139
140 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
141 if (!spms_pl)
142 return -ENOMEM;
143 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
144
145 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
146 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
147 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
148 } else {
149 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
150 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
151 }
152
153 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
154 kfree(spms_pl);
155 return err;
156 }
157
158 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
159 struct switchdev_trans *trans,
160 u8 state)
161 {
162 if (switchdev_trans_ph_prepare(trans))
163 return 0;
164
165 mlxsw_sp_port->stp_state = state;
166 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
167 }
168
169 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
170 u16 idx_begin, u16 idx_end, bool uc_set,
171 bool bm_set)
172 {
173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
174 u16 local_port = mlxsw_sp_port->local_port;
175 enum mlxsw_flood_table_type table_type;
176 u16 range = idx_end - idx_begin + 1;
177 char *sftr_pl;
178 int err;
179
180 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
181 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
182 else
183 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
184
185 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
186 if (!sftr_pl)
187 return -ENOMEM;
188
189 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
190 table_type, range, local_port, uc_set);
191 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
192 if (err)
193 goto buffer_out;
194
195 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
196 table_type, range, local_port, bm_set);
197 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
198 if (err)
199 goto err_flood_bm_set;
200
201 goto buffer_out;
202
203 err_flood_bm_set:
204 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
205 table_type, range, local_port, !uc_set);
206 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
207 buffer_out:
208 kfree(sftr_pl);
209 return err;
210 }
211
212 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
213 bool set)
214 {
215 struct net_device *dev = mlxsw_sp_port->dev;
216 u16 vid, last_visited_vid;
217 int err;
218
219 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
220 u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
221 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
222
223 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
224 set, true);
225 }
226
227 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
228 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
229 true);
230 if (err) {
231 last_visited_vid = vid;
232 goto err_port_flood_set;
233 }
234 }
235
236 return 0;
237
238 err_port_flood_set:
239 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
240 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
241 netdev_err(dev, "Failed to configure unicast flooding\n");
242 return err;
243 }
244
245 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
246 bool set)
247 {
248 u16 vfid;
249
250 /* In case of vFIDs, index into the flooding table is relative to
251 * the start of the vFIDs range.
252 */
253 vfid = mlxsw_sp_fid_to_vfid(fid);
254 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
255 }
256
257 static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
258 bool set)
259 {
260 u16 vid;
261 int err;
262
263 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
264 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
265
266 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
267 set);
268 }
269
270 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
271 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
272 set);
273 if (err)
274 goto err_port_vid_learning_set;
275 }
276
277 return 0;
278
279 err_port_vid_learning_set:
280 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
281 __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
282 return err;
283 }
284
285 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
286 struct switchdev_trans *trans,
287 unsigned long brport_flags)
288 {
289 unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
290 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
291 int err;
292
293 if (!mlxsw_sp_port->bridged)
294 return -EINVAL;
295
296 if (switchdev_trans_ph_prepare(trans))
297 return 0;
298
299 if ((uc_flood ^ brport_flags) & BR_FLOOD) {
300 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
301 !mlxsw_sp_port->uc_flood);
302 if (err)
303 return err;
304 }
305
306 if ((learning ^ brport_flags) & BR_LEARNING) {
307 err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
308 !mlxsw_sp_port->learning);
309 if (err)
310 goto err_port_learning_set;
311 }
312
313 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
314 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
315 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
316
317 return 0;
318
319 err_port_learning_set:
320 if ((uc_flood ^ brport_flags) & BR_FLOOD)
321 mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
322 mlxsw_sp_port->uc_flood);
323 return err;
324 }
325
326 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
327 {
328 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
329 int err;
330
331 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
332 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
333 if (err)
334 return err;
335 mlxsw_sp->ageing_time = ageing_time;
336 return 0;
337 }
338
339 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
340 struct switchdev_trans *trans,
341 unsigned long ageing_clock_t)
342 {
343 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
344 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
345 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
346
347 if (switchdev_trans_ph_prepare(trans)) {
348 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
349 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
350 return -ERANGE;
351 else
352 return 0;
353 }
354
355 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
356 }
357
358 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
359 struct switchdev_trans *trans,
360 struct net_device *orig_dev,
361 bool vlan_enabled)
362 {
363 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
364
365 /* SWITCHDEV_TRANS_PREPARE phase */
366 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
367 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
368 return -EINVAL;
369 }
370
371 return 0;
372 }
373
374 static int mlxsw_sp_port_attr_set(struct net_device *dev,
375 const struct switchdev_attr *attr,
376 struct switchdev_trans *trans)
377 {
378 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
379 int err = 0;
380
381 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
382 if (!mlxsw_sp_port)
383 return -EINVAL;
384
385 switch (attr->id) {
386 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
387 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
388 attr->u.stp_state);
389 break;
390 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
391 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
392 attr->u.brport_flags);
393 break;
394 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
395 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
396 attr->u.ageing_time);
397 break;
398 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
399 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
400 attr->orig_dev,
401 attr->u.vlan_filtering);
402 break;
403 default:
404 err = -EOPNOTSUPP;
405 break;
406 }
407
408 return err;
409 }
410
411 static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
412 {
413 char sfmr_pl[MLXSW_REG_SFMR_LEN];
414
415 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
416 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
417 }
418
419 static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
420 {
421 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
422 char svfa_pl[MLXSW_REG_SVFA_LEN];
423
424 mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
425 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
426 }
427
428 static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
429 {
430 struct mlxsw_sp_fid *f;
431
432 f = kzalloc(sizeof(*f), GFP_KERNEL);
433 if (!f)
434 return NULL;
435
436 f->fid = fid;
437
438 return f;
439 }
440
441 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
442 {
443 struct mlxsw_sp_fid *f;
444 int err;
445
446 err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
447 if (err)
448 return ERR_PTR(err);
449
450 /* Although all the ports member in the FID might be using a
451 * {Port, VID} to FID mapping, we create a global VID-to-FID
452 * mapping. This allows a port to transition to VLAN mode,
453 * knowing the global mapping exists.
454 */
455 err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
456 if (err)
457 goto err_fid_map;
458
459 f = mlxsw_sp_fid_alloc(fid);
460 if (!f) {
461 err = -ENOMEM;
462 goto err_allocate_fid;
463 }
464
465 list_add(&f->list, &mlxsw_sp->fids);
466
467 return f;
468
469 err_allocate_fid:
470 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
471 err_fid_map:
472 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
473 return ERR_PTR(err);
474 }
475
476 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
477 {
478 u16 fid = f->fid;
479
480 list_del(&f->list);
481
482 if (f->r)
483 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
484
485 kfree(f);
486
487 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
488
489 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
490 }
491
492 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
493 u16 fid)
494 {
495 struct mlxsw_sp_fid *f;
496
497 if (test_bit(fid, mlxsw_sp_port->active_vlans))
498 return 0;
499
500 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
501 if (!f) {
502 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
503 if (IS_ERR(f))
504 return PTR_ERR(f);
505 }
506
507 f->ref_count++;
508
509 netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
510
511 return 0;
512 }
513
514 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
515 u16 fid)
516 {
517 struct mlxsw_sp_fid *f;
518
519 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
520 if (WARN_ON(!f))
521 return;
522
523 netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
524
525 mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
526
527 if (--f->ref_count == 0)
528 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
529 }
530
531 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
532 bool valid)
533 {
534 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
535
536 /* If port doesn't have vPorts, then it can use the global
537 * VID-to-FID mapping.
538 */
539 if (list_empty(&mlxsw_sp_port->vports_list))
540 return 0;
541
542 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
543 }
544
545 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
546 u16 fid_begin, u16 fid_end)
547 {
548 int fid, err;
549
550 for (fid = fid_begin; fid <= fid_end; fid++) {
551 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
552 if (err)
553 goto err_port_fid_join;
554 }
555
556 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
557 mlxsw_sp_port->uc_flood, true);
558 if (err)
559 goto err_port_flood_set;
560
561 for (fid = fid_begin; fid <= fid_end; fid++) {
562 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
563 if (err)
564 goto err_port_fid_map;
565 }
566
567 return 0;
568
569 err_port_fid_map:
570 for (fid--; fid >= fid_begin; fid--)
571 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
572 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
573 false);
574 err_port_flood_set:
575 fid = fid_end;
576 err_port_fid_join:
577 for (fid--; fid >= fid_begin; fid--)
578 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
579 return err;
580 }
581
582 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
583 u16 fid_begin, u16 fid_end)
584 {
585 int fid;
586
587 for (fid = fid_begin; fid <= fid_end; fid++)
588 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
589
590 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
591 false);
592
593 for (fid = fid_begin; fid <= fid_end; fid++)
594 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
595 }
596
597 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
598 u16 vid)
599 {
600 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
601 char spvid_pl[MLXSW_REG_SPVID_LEN];
602
603 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
604 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
605 }
606
607 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
608 bool allow)
609 {
610 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
611 char spaft_pl[MLXSW_REG_SPAFT_LEN];
612
613 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
614 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
615 }
616
617 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
618 {
619 struct net_device *dev = mlxsw_sp_port->dev;
620 int err;
621
622 if (!vid) {
623 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
624 if (err) {
625 netdev_err(dev, "Failed to disallow untagged traffic\n");
626 return err;
627 }
628 } else {
629 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
630 if (err) {
631 netdev_err(dev, "Failed to set PVID\n");
632 return err;
633 }
634
635 /* Only allow if not already allowed. */
636 if (!mlxsw_sp_port->pvid) {
637 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
638 true);
639 if (err) {
640 netdev_err(dev, "Failed to allow untagged traffic\n");
641 goto err_port_allow_untagged_set;
642 }
643 }
644 }
645
646 mlxsw_sp_port->pvid = vid;
647 return 0;
648
649 err_port_allow_untagged_set:
650 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
651 return err;
652 }
653
654 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
655 u16 vid_begin, u16 vid_end, bool is_member,
656 bool untagged)
657 {
658 u16 vid, vid_e;
659 int err;
660
661 for (vid = vid_begin; vid <= vid_end;
662 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
663 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
664 vid_end);
665
666 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
667 is_member, untagged);
668 if (err)
669 return err;
670 }
671
672 return 0;
673 }
674
675 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
676 u16 vid_begin, u16 vid_end,
677 bool learn_enable)
678 {
679 u16 vid, vid_e;
680 int err;
681
682 for (vid = vid_begin; vid <= vid_end;
683 vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
684 vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
685 vid_end);
686
687 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
688 vid_e, learn_enable);
689 if (err)
690 return err;
691 }
692
693 return 0;
694 }
695
696 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
697 u16 vid_begin, u16 vid_end,
698 bool flag_untagged, bool flag_pvid)
699 {
700 struct net_device *dev = mlxsw_sp_port->dev;
701 u16 vid, old_pvid;
702 int err;
703
704 if (!mlxsw_sp_port->bridged)
705 return -EINVAL;
706
707 err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
708 if (err) {
709 netdev_err(dev, "Failed to join FIDs\n");
710 return err;
711 }
712
713 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
714 true, flag_untagged);
715 if (err) {
716 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
717 vid_end);
718 goto err_port_vlans_set;
719 }
720
721 old_pvid = mlxsw_sp_port->pvid;
722 if (flag_pvid && old_pvid != vid_begin) {
723 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
724 if (err) {
725 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
726 goto err_port_pvid_set;
727 }
728 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
729 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
730 if (err) {
731 netdev_err(dev, "Unable to del PVID\n");
732 goto err_port_pvid_set;
733 }
734 }
735
736 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
737 mlxsw_sp_port->learning);
738 if (err) {
739 netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
740 vid_begin, vid_end);
741 goto err_port_vid_learning_set;
742 }
743
744 /* Changing activity bits only if HW operation succeded */
745 for (vid = vid_begin; vid <= vid_end; vid++) {
746 set_bit(vid, mlxsw_sp_port->active_vlans);
747 if (flag_untagged)
748 set_bit(vid, mlxsw_sp_port->untagged_vlans);
749 else
750 clear_bit(vid, mlxsw_sp_port->untagged_vlans);
751 }
752
753 /* STP state change must be done after we set active VLANs */
754 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
755 mlxsw_sp_port->stp_state);
756 if (err) {
757 netdev_err(dev, "Failed to set STP state\n");
758 goto err_port_stp_state_set;
759 }
760
761 return 0;
762
763 err_port_stp_state_set:
764 for (vid = vid_begin; vid <= vid_end; vid++)
765 clear_bit(vid, mlxsw_sp_port->active_vlans);
766 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
767 false);
768 err_port_vid_learning_set:
769 if (old_pvid != mlxsw_sp_port->pvid)
770 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
771 err_port_pvid_set:
772 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
773 false);
774 err_port_vlans_set:
775 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
776 return err;
777 }
778
779 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
780 const struct switchdev_obj_port_vlan *vlan,
781 struct switchdev_trans *trans)
782 {
783 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
784 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
785
786 if (switchdev_trans_ph_prepare(trans))
787 return 0;
788
789 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
790 vlan->vid_begin, vlan->vid_end,
791 flag_untagged, flag_pvid);
792 }
793
794 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
795 {
796 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
797 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
798 }
799
800 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
801 {
802 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
803 MLXSW_REG_SFD_OP_WRITE_REMOVE;
804 }
805
806 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
807 const char *mac, u16 fid, bool adding,
808 enum mlxsw_reg_sfd_rec_action action,
809 bool dynamic)
810 {
811 char *sfd_pl;
812 int err;
813
814 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
815 if (!sfd_pl)
816 return -ENOMEM;
817
818 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
819 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
820 mac, fid, action, local_port);
821 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
822 kfree(sfd_pl);
823
824 return err;
825 }
826
827 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
828 const char *mac, u16 fid, bool adding,
829 bool dynamic)
830 {
831 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
832 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
833 }
834
835 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
836 bool adding)
837 {
838 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
839 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
840 false);
841 }
842
843 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
844 const char *mac, u16 fid, u16 lag_vid,
845 bool adding, bool dynamic)
846 {
847 char *sfd_pl;
848 int err;
849
850 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
851 if (!sfd_pl)
852 return -ENOMEM;
853
854 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
855 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
856 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
857 lag_vid, lag_id);
858 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
859 kfree(sfd_pl);
860
861 return err;
862 }
863
864 static int
865 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
866 const struct switchdev_obj_port_fdb *fdb,
867 struct switchdev_trans *trans)
868 {
869 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
870 u16 lag_vid = 0;
871
872 if (switchdev_trans_ph_prepare(trans))
873 return 0;
874
875 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
876 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
877 }
878
879 if (!mlxsw_sp_port->lagged)
880 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
881 mlxsw_sp_port->local_port,
882 fdb->addr, fid, true, false);
883 else
884 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
885 mlxsw_sp_port->lag_id,
886 fdb->addr, fid, lag_vid,
887 true, false);
888 }
889
890 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
891 u16 fid, u16 mid, bool adding)
892 {
893 char *sfd_pl;
894 int err;
895
896 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
897 if (!sfd_pl)
898 return -ENOMEM;
899
900 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
901 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
902 MLXSW_REG_SFD_REC_ACTION_NOP, mid);
903 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
904 kfree(sfd_pl);
905 return err;
906 }
907
908 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
909 bool add, bool clear_all_ports)
910 {
911 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
912 char *smid_pl;
913 int err, i;
914
915 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
916 if (!smid_pl)
917 return -ENOMEM;
918
919 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
920 if (clear_all_ports) {
921 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
922 if (mlxsw_sp->ports[i])
923 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
924 }
925 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
926 kfree(smid_pl);
927 return err;
928 }
929
930 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
931 const unsigned char *addr,
932 u16 fid)
933 {
934 struct mlxsw_sp_mid *mid;
935
936 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
937 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
938 return mid;
939 }
940 return NULL;
941 }
942
943 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
944 const unsigned char *addr,
945 u16 fid)
946 {
947 struct mlxsw_sp_mid *mid;
948 u16 mid_idx;
949
950 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
951 MLXSW_SP_MID_MAX);
952 if (mid_idx == MLXSW_SP_MID_MAX)
953 return NULL;
954
955 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
956 if (!mid)
957 return NULL;
958
959 set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
960 ether_addr_copy(mid->addr, addr);
961 mid->fid = fid;
962 mid->mid = mid_idx;
963 mid->ref_count = 0;
964 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
965
966 return mid;
967 }
968
969 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
970 struct mlxsw_sp_mid *mid)
971 {
972 if (--mid->ref_count == 0) {
973 list_del(&mid->list);
974 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
975 kfree(mid);
976 return 1;
977 }
978 return 0;
979 }
980
981 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
982 const struct switchdev_obj_port_mdb *mdb,
983 struct switchdev_trans *trans)
984 {
985 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
986 struct net_device *dev = mlxsw_sp_port->dev;
987 struct mlxsw_sp_mid *mid;
988 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
989 int err = 0;
990
991 if (switchdev_trans_ph_prepare(trans))
992 return 0;
993
994 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
995 if (!mid) {
996 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
997 if (!mid) {
998 netdev_err(dev, "Unable to allocate MC group\n");
999 return -ENOMEM;
1000 }
1001 }
1002 mid->ref_count++;
1003
1004 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
1005 mid->ref_count == 1);
1006 if (err) {
1007 netdev_err(dev, "Unable to set SMID\n");
1008 goto err_out;
1009 }
1010
1011 if (mid->ref_count == 1) {
1012 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
1013 true);
1014 if (err) {
1015 netdev_err(dev, "Unable to set MC SFD\n");
1016 goto err_out;
1017 }
1018 }
1019
1020 return 0;
1021
1022 err_out:
1023 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
1024 return err;
1025 }
1026
1027 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1028 const struct switchdev_obj *obj,
1029 struct switchdev_trans *trans)
1030 {
1031 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1032 int err = 0;
1033
1034 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1035 if (!mlxsw_sp_port)
1036 return -EINVAL;
1037
1038 switch (obj->id) {
1039 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1040 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1041 return 0;
1042
1043 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
1044 SWITCHDEV_OBJ_PORT_VLAN(obj),
1045 trans);
1046 break;
1047 case SWITCHDEV_OBJ_ID_PORT_FDB:
1048 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
1049 SWITCHDEV_OBJ_PORT_FDB(obj),
1050 trans);
1051 break;
1052 case SWITCHDEV_OBJ_ID_PORT_MDB:
1053 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1054 SWITCHDEV_OBJ_PORT_MDB(obj),
1055 trans);
1056 break;
1057 default:
1058 err = -EOPNOTSUPP;
1059 break;
1060 }
1061
1062 return err;
1063 }
1064
1065 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1066 u16 vid_begin, u16 vid_end)
1067 {
1068 u16 vid, pvid;
1069
1070 if (!mlxsw_sp_port->bridged)
1071 return -EINVAL;
1072
1073 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
1074 false);
1075
1076 pvid = mlxsw_sp_port->pvid;
1077 if (pvid >= vid_begin && pvid <= vid_end)
1078 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
1079
1080 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
1081 false);
1082
1083 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
1084
1085 /* Changing activity bits only if HW operation succeded */
1086 for (vid = vid_begin; vid <= vid_end; vid++)
1087 clear_bit(vid, mlxsw_sp_port->active_vlans);
1088
1089 return 0;
1090 }
1091
1092 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1093 const struct switchdev_obj_port_vlan *vlan)
1094 {
1095 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
1096 vlan->vid_end);
1097 }
1098
1099 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1100 {
1101 u16 vid;
1102
1103 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1104 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
1105 }
1106
1107 static int
1108 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
1109 const struct switchdev_obj_port_fdb *fdb)
1110 {
1111 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
1112 u16 lag_vid = 0;
1113
1114 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1115 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1116 }
1117
1118 if (!mlxsw_sp_port->lagged)
1119 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
1120 mlxsw_sp_port->local_port,
1121 fdb->addr, fid,
1122 false, false);
1123 else
1124 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
1125 mlxsw_sp_port->lag_id,
1126 fdb->addr, fid, lag_vid,
1127 false, false);
1128 }
1129
1130 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1131 const struct switchdev_obj_port_mdb *mdb)
1132 {
1133 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1134 struct net_device *dev = mlxsw_sp_port->dev;
1135 struct mlxsw_sp_mid *mid;
1136 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1137 u16 mid_idx;
1138 int err = 0;
1139
1140 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
1141 if (!mid) {
1142 netdev_err(dev, "Unable to remove port from MC DB\n");
1143 return -EINVAL;
1144 }
1145
1146 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1147 if (err)
1148 netdev_err(dev, "Unable to remove port from SMID\n");
1149
1150 mid_idx = mid->mid;
1151 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1152 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
1153 false);
1154 if (err)
1155 netdev_err(dev, "Unable to remove MC SFD\n");
1156 }
1157
1158 return err;
1159 }
1160
1161 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1162 const struct switchdev_obj *obj)
1163 {
1164 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1165 int err = 0;
1166
1167 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1168 if (!mlxsw_sp_port)
1169 return -EINVAL;
1170
1171 switch (obj->id) {
1172 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1173 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1174 return 0;
1175
1176 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1177 SWITCHDEV_OBJ_PORT_VLAN(obj));
1178 break;
1179 case SWITCHDEV_OBJ_ID_PORT_FDB:
1180 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1181 SWITCHDEV_OBJ_PORT_FDB(obj));
1182 break;
1183 case SWITCHDEV_OBJ_ID_PORT_MDB:
1184 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1185 SWITCHDEV_OBJ_PORT_MDB(obj));
1186 break;
1187 default:
1188 err = -EOPNOTSUPP;
1189 break;
1190 }
1191
1192 return err;
1193 }
1194
1195 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1196 u16 lag_id)
1197 {
1198 struct mlxsw_sp_port *mlxsw_sp_port;
1199 u64 max_lag_members;
1200 int i;
1201
1202 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1203 MAX_LAG_MEMBERS);
1204 for (i = 0; i < max_lag_members; i++) {
1205 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1206 if (mlxsw_sp_port)
1207 return mlxsw_sp_port;
1208 }
1209 return NULL;
1210 }
1211
1212 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1213 struct switchdev_obj_port_fdb *fdb,
1214 switchdev_obj_dump_cb_t *cb,
1215 struct net_device *orig_dev)
1216 {
1217 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1218 struct mlxsw_sp_port *tmp;
1219 struct mlxsw_sp_fid *f;
1220 u16 vport_fid;
1221 char *sfd_pl;
1222 char mac[ETH_ALEN];
1223 u16 fid;
1224 u8 local_port;
1225 u16 lag_id;
1226 u8 num_rec;
1227 int stored_err = 0;
1228 int i;
1229 int err;
1230
1231 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1232 if (!sfd_pl)
1233 return -ENOMEM;
1234
1235 f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
1236 vport_fid = f ? f->fid : 0;
1237
1238 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1239 do {
1240 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
1241 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1242 if (err)
1243 goto out;
1244
1245 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1246
1247 /* Even in case of error, we have to run the dump to the end
1248 * so the session in firmware is finished.
1249 */
1250 if (stored_err)
1251 continue;
1252
1253 for (i = 0; i < num_rec; i++) {
1254 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
1255 case MLXSW_REG_SFD_REC_TYPE_UNICAST:
1256 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
1257 &local_port);
1258 if (local_port == mlxsw_sp_port->local_port) {
1259 if (vport_fid && vport_fid == fid)
1260 fdb->vid = 0;
1261 else if (!vport_fid &&
1262 !mlxsw_sp_fid_is_vfid(fid))
1263 fdb->vid = fid;
1264 else
1265 continue;
1266 ether_addr_copy(fdb->addr, mac);
1267 fdb->ndm_state = NUD_REACHABLE;
1268 err = cb(&fdb->obj);
1269 if (err)
1270 stored_err = err;
1271 }
1272 break;
1273 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1274 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
1275 mac, &fid, &lag_id);
1276 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1277 if (tmp && tmp->local_port ==
1278 mlxsw_sp_port->local_port) {
1279 /* LAG records can only point to LAG
1280 * devices or VLAN devices on top.
1281 */
1282 if (!netif_is_lag_master(orig_dev) &&
1283 !is_vlan_dev(orig_dev))
1284 continue;
1285 if (vport_fid && vport_fid == fid)
1286 fdb->vid = 0;
1287 else if (!vport_fid &&
1288 !mlxsw_sp_fid_is_vfid(fid))
1289 fdb->vid = fid;
1290 else
1291 continue;
1292 ether_addr_copy(fdb->addr, mac);
1293 fdb->ndm_state = NUD_REACHABLE;
1294 err = cb(&fdb->obj);
1295 if (err)
1296 stored_err = err;
1297 }
1298 break;
1299 }
1300 }
1301 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1302
1303 out:
1304 kfree(sfd_pl);
1305 return stored_err ? stored_err : err;
1306 }
1307
1308 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1309 struct switchdev_obj_port_vlan *vlan,
1310 switchdev_obj_dump_cb_t *cb)
1311 {
1312 u16 vid;
1313 int err = 0;
1314
1315 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1316 vlan->flags = 0;
1317 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1318 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1319 return cb(&vlan->obj);
1320 }
1321
1322 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1323 vlan->flags = 0;
1324 if (vid == mlxsw_sp_port->pvid)
1325 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1326 if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
1327 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1328 vlan->vid_begin = vid;
1329 vlan->vid_end = vid;
1330 err = cb(&vlan->obj);
1331 if (err)
1332 break;
1333 }
1334 return err;
1335 }
1336
1337 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1338 struct switchdev_obj *obj,
1339 switchdev_obj_dump_cb_t *cb)
1340 {
1341 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1342 int err = 0;
1343
1344 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1345 if (!mlxsw_sp_port)
1346 return -EINVAL;
1347
1348 switch (obj->id) {
1349 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1350 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
1351 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
1352 break;
1353 case SWITCHDEV_OBJ_ID_PORT_FDB:
1354 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
1355 SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1356 obj->orig_dev);
1357 break;
1358 default:
1359 err = -EOPNOTSUPP;
1360 break;
1361 }
1362
1363 return err;
1364 }
1365
1366 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1367 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
1368 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
1369 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
1370 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
1371 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
1372 };
1373
1374 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1375 char *mac, u16 vid,
1376 struct net_device *dev)
1377 {
1378 struct switchdev_notifier_fdb_info info;
1379 unsigned long notifier_type;
1380
1381 if (learning_sync) {
1382 info.addr = mac;
1383 info.vid = vid;
1384 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1385 call_switchdev_notifiers(notifier_type, dev, &info.info);
1386 }
1387 }
1388
1389 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1390 char *sfn_pl, int rec_index,
1391 bool adding)
1392 {
1393 struct mlxsw_sp_port *mlxsw_sp_port;
1394 char mac[ETH_ALEN];
1395 u8 local_port;
1396 u16 vid, fid;
1397 bool do_notification = true;
1398 int err;
1399
1400 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1401 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1402 if (!mlxsw_sp_port) {
1403 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1404 goto just_remove;
1405 }
1406
1407 if (mlxsw_sp_fid_is_vfid(fid)) {
1408 struct mlxsw_sp_port *mlxsw_sp_vport;
1409
1410 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1411 fid);
1412 if (!mlxsw_sp_vport) {
1413 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1414 goto just_remove;
1415 }
1416 vid = 0;
1417 /* Override the physical port with the vPort. */
1418 mlxsw_sp_port = mlxsw_sp_vport;
1419 } else {
1420 vid = fid;
1421 }
1422
1423 do_fdb_op:
1424 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1425 adding, true);
1426 if (err) {
1427 if (net_ratelimit())
1428 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1429 return;
1430 }
1431
1432 if (!do_notification)
1433 return;
1434 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
1435 adding, mac, vid, mlxsw_sp_port->dev);
1436 return;
1437
1438 just_remove:
1439 adding = false;
1440 do_notification = false;
1441 goto do_fdb_op;
1442 }
1443
1444 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1445 char *sfn_pl, int rec_index,
1446 bool adding)
1447 {
1448 struct mlxsw_sp_port *mlxsw_sp_port;
1449 struct net_device *dev;
1450 char mac[ETH_ALEN];
1451 u16 lag_vid = 0;
1452 u16 lag_id;
1453 u16 vid, fid;
1454 bool do_notification = true;
1455 int err;
1456
1457 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1458 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1459 if (!mlxsw_sp_port) {
1460 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1461 goto just_remove;
1462 }
1463
1464 if (mlxsw_sp_fid_is_vfid(fid)) {
1465 struct mlxsw_sp_port *mlxsw_sp_vport;
1466
1467 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1468 fid);
1469 if (!mlxsw_sp_vport) {
1470 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1471 goto just_remove;
1472 }
1473
1474 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1475 dev = mlxsw_sp_vport->dev;
1476 vid = 0;
1477 /* Override the physical port with the vPort. */
1478 mlxsw_sp_port = mlxsw_sp_vport;
1479 } else {
1480 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
1481 vid = fid;
1482 }
1483
1484 do_fdb_op:
1485 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1486 adding, true);
1487 if (err) {
1488 if (net_ratelimit())
1489 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1490 return;
1491 }
1492
1493 if (!do_notification)
1494 return;
1495 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
1496 vid, dev);
1497 return;
1498
1499 just_remove:
1500 adding = false;
1501 do_notification = false;
1502 goto do_fdb_op;
1503 }
1504
1505 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1506 char *sfn_pl, int rec_index)
1507 {
1508 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1509 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1510 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1511 rec_index, true);
1512 break;
1513 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1514 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1515 rec_index, false);
1516 break;
1517 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1518 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1519 rec_index, true);
1520 break;
1521 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1522 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1523 rec_index, false);
1524 break;
1525 }
1526 }
1527
1528 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1529 {
1530 mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
1531 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
1532 }
1533
1534 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1535 {
1536 struct mlxsw_sp *mlxsw_sp;
1537 char *sfn_pl;
1538 u8 num_rec;
1539 int i;
1540 int err;
1541
1542 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1543 if (!sfn_pl)
1544 return;
1545
1546 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1547
1548 rtnl_lock();
1549 mlxsw_reg_sfn_pack(sfn_pl);
1550 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1551 if (err) {
1552 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1553 goto out;
1554 }
1555 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1556 for (i = 0; i < num_rec; i++)
1557 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1558
1559 out:
1560 rtnl_unlock();
1561 kfree(sfn_pl);
1562 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1563 }
1564
1565 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1566 {
1567 int err;
1568
1569 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1570 if (err) {
1571 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1572 return err;
1573 }
1574 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1575 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1576 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1577 return 0;
1578 }
1579
1580 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1581 {
1582 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1583 }
1584
1585 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1586 {
1587 return mlxsw_sp_fdb_init(mlxsw_sp);
1588 }
1589
1590 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1591 {
1592 mlxsw_sp_fdb_fini(mlxsw_sp);
1593 }
1594
1595 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1596 {
1597 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1598 }
1599
1600 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1601 {
1602 }