]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
mlxsw: spectrum: Create PVID vPort before registering netdevice
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_switchdev.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
50
51 #include "spectrum.h"
52 #include "core.h"
53 #include "reg.h"
54
55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
56 u16 vid)
57 {
58 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
59 u16 fid = vid;
60
61 fid = f ? f->fid : fid;
62
63 if (!fid)
64 fid = mlxsw_sp_port->pvid;
65
66 return fid;
67 }
68
69 static struct mlxsw_sp_port *
70 mlxsw_sp_port_orig_get(struct net_device *dev,
71 struct mlxsw_sp_port *mlxsw_sp_port)
72 {
73 struct mlxsw_sp_port *mlxsw_sp_vport;
74 u16 vid;
75
76 if (!is_vlan_dev(dev))
77 return mlxsw_sp_port;
78
79 vid = vlan_dev_vlan_id(dev);
80 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
81 WARN_ON(!mlxsw_sp_vport);
82
83 return mlxsw_sp_vport;
84 }
85
86 static int mlxsw_sp_port_attr_get(struct net_device *dev,
87 struct switchdev_attr *attr)
88 {
89 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
90 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
91
92 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
93 if (!mlxsw_sp_port)
94 return -EINVAL;
95
96 switch (attr->id) {
97 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
98 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
99 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
100 attr->u.ppid.id_len);
101 break;
102 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
103 attr->u.brport_flags =
104 (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
105 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
106 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
107 break;
108 default:
109 return -EOPNOTSUPP;
110 }
111
112 return 0;
113 }
114
115 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
116 u8 state)
117 {
118 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
119 enum mlxsw_reg_spms_state spms_state;
120 char *spms_pl;
121 u16 vid;
122 int err;
123
124 switch (state) {
125 case BR_STATE_FORWARDING:
126 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
127 break;
128 case BR_STATE_LEARNING:
129 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
130 break;
131 case BR_STATE_LISTENING: /* fall-through */
132 case BR_STATE_DISABLED: /* fall-through */
133 case BR_STATE_BLOCKING:
134 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
135 break;
136 default:
137 BUG();
138 }
139
140 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
141 if (!spms_pl)
142 return -ENOMEM;
143 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
144
145 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
146 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
147 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
148 } else {
149 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
150 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
151 }
152
153 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
154 kfree(spms_pl);
155 return err;
156 }
157
158 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
159 struct switchdev_trans *trans,
160 u8 state)
161 {
162 if (switchdev_trans_ph_prepare(trans))
163 return 0;
164
165 mlxsw_sp_port->stp_state = state;
166 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
167 }
168
169 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
170 u16 idx_begin, u16 idx_end, bool set,
171 bool only_uc)
172 {
173 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
174 u16 local_port = mlxsw_sp_port->local_port;
175 enum mlxsw_flood_table_type table_type;
176 u16 range = idx_end - idx_begin + 1;
177 char *sftr_pl;
178 int err;
179
180 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
181 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
182 else
183 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
184
185 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
186 if (!sftr_pl)
187 return -ENOMEM;
188
189 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
190 table_type, range, local_port, set);
191 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
192 if (err)
193 goto buffer_out;
194
195 /* Flooding control allows one to decide whether a given port will
196 * flood unicast traffic for which there is no FDB entry.
197 */
198 if (only_uc)
199 goto buffer_out;
200
201 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
202 table_type, range, local_port, set);
203 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
204 if (err)
205 goto err_flood_bm_set;
206 else
207 goto buffer_out;
208
209 err_flood_bm_set:
210 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
211 table_type, range, local_port, !set);
212 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
213 buffer_out:
214 kfree(sftr_pl);
215 return err;
216 }
217
218 static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
219 bool set)
220 {
221 struct net_device *dev = mlxsw_sp_port->dev;
222 u16 vid, last_visited_vid;
223 int err;
224
225 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
226 u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
227 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
228
229 return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
230 set, true);
231 }
232
233 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
234 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
235 true);
236 if (err) {
237 last_visited_vid = vid;
238 goto err_port_flood_set;
239 }
240 }
241
242 return 0;
243
244 err_port_flood_set:
245 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
246 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
247 netdev_err(dev, "Failed to configure unicast flooding\n");
248 return err;
249 }
250
251 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
252 bool set)
253 {
254 u16 vfid;
255
256 /* In case of vFIDs, index into the flooding table is relative to
257 * the start of the vFIDs range.
258 */
259 vfid = mlxsw_sp_fid_to_vfid(fid);
260 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
261 false);
262 }
263
264 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
265 struct switchdev_trans *trans,
266 unsigned long brport_flags)
267 {
268 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
269 bool set;
270 int err;
271
272 if (!mlxsw_sp_port->bridged)
273 return -EINVAL;
274
275 if (switchdev_trans_ph_prepare(trans))
276 return 0;
277
278 if ((uc_flood ^ brport_flags) & BR_FLOOD) {
279 set = mlxsw_sp_port->uc_flood ? false : true;
280 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
281 if (err)
282 return err;
283 }
284
285 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
286 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
287 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
288
289 return 0;
290 }
291
292 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
293 {
294 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
295 int err;
296
297 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
298 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
299 if (err)
300 return err;
301 mlxsw_sp->ageing_time = ageing_time;
302 return 0;
303 }
304
305 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
306 struct switchdev_trans *trans,
307 unsigned long ageing_clock_t)
308 {
309 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
310 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
311 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
312
313 if (switchdev_trans_ph_prepare(trans)) {
314 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
315 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
316 return -ERANGE;
317 else
318 return 0;
319 }
320
321 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
322 }
323
324 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
325 struct switchdev_trans *trans,
326 struct net_device *orig_dev,
327 bool vlan_enabled)
328 {
329 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
330
331 /* SWITCHDEV_TRANS_PREPARE phase */
332 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
333 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
334 return -EINVAL;
335 }
336
337 return 0;
338 }
339
340 static int mlxsw_sp_port_attr_set(struct net_device *dev,
341 const struct switchdev_attr *attr,
342 struct switchdev_trans *trans)
343 {
344 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
345 int err = 0;
346
347 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
348 if (!mlxsw_sp_port)
349 return -EINVAL;
350
351 switch (attr->id) {
352 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
353 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
354 attr->u.stp_state);
355 break;
356 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
357 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
358 attr->u.brport_flags);
359 break;
360 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
361 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
362 attr->u.ageing_time);
363 break;
364 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
365 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
366 attr->orig_dev,
367 attr->u.vlan_filtering);
368 break;
369 default:
370 err = -EOPNOTSUPP;
371 break;
372 }
373
374 return err;
375 }
376
377 static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
378 {
379 char sfmr_pl[MLXSW_REG_SFMR_LEN];
380
381 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
382 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
383 }
384
385 static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
386 {
387 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
388 char svfa_pl[MLXSW_REG_SVFA_LEN];
389
390 mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
391 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
392 }
393
394 static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
395 {
396 struct mlxsw_sp_fid *f;
397
398 f = kzalloc(sizeof(*f), GFP_KERNEL);
399 if (!f)
400 return NULL;
401
402 f->fid = fid;
403
404 return f;
405 }
406
407 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
408 {
409 struct mlxsw_sp_fid *f;
410 int err;
411
412 err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
413 if (err)
414 return ERR_PTR(err);
415
416 /* Although all the ports member in the FID might be using a
417 * {Port, VID} to FID mapping, we create a global VID-to-FID
418 * mapping. This allows a port to transition to VLAN mode,
419 * knowing the global mapping exists.
420 */
421 err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
422 if (err)
423 goto err_fid_map;
424
425 f = mlxsw_sp_fid_alloc(fid);
426 if (!f) {
427 err = -ENOMEM;
428 goto err_allocate_fid;
429 }
430
431 list_add(&f->list, &mlxsw_sp->fids);
432
433 return f;
434
435 err_allocate_fid:
436 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
437 err_fid_map:
438 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
439 return ERR_PTR(err);
440 }
441
442 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
443 {
444 u16 fid = f->fid;
445
446 list_del(&f->list);
447
448 if (f->r)
449 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
450
451 kfree(f);
452
453 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
454 }
455
456 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
457 u16 fid)
458 {
459 struct mlxsw_sp_fid *f;
460
461 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
462 if (!f) {
463 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
464 if (IS_ERR(f))
465 return PTR_ERR(f);
466 }
467
468 f->ref_count++;
469
470 netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
471
472 return 0;
473 }
474
475 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
476 u16 fid)
477 {
478 struct mlxsw_sp_fid *f;
479
480 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
481 if (WARN_ON(!f))
482 return;
483
484 netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
485
486 mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
487
488 if (--f->ref_count == 0)
489 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
490 }
491
492 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
493 bool valid)
494 {
495 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
496
497 /* If port doesn't have vPorts, then it can use the global
498 * VID-to-FID mapping.
499 */
500 if (list_empty(&mlxsw_sp_port->vports_list))
501 return 0;
502
503 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
504 }
505
506 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
507 u16 fid_begin, u16 fid_end)
508 {
509 int fid, err;
510
511 for (fid = fid_begin; fid <= fid_end; fid++) {
512 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
513 if (err)
514 goto err_port_fid_join;
515 }
516
517 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
518 true, false);
519 if (err)
520 goto err_port_flood_set;
521
522 for (fid = fid_begin; fid <= fid_end; fid++) {
523 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
524 if (err)
525 goto err_port_fid_map;
526 }
527
528 return 0;
529
530 err_port_fid_map:
531 for (fid--; fid >= fid_begin; fid--)
532 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
533 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
534 false);
535 err_port_flood_set:
536 fid = fid_end;
537 err_port_fid_join:
538 for (fid--; fid >= fid_begin; fid--)
539 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
540 return err;
541 }
542
543 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
544 u16 fid_begin, u16 fid_end)
545 {
546 int fid;
547
548 for (fid = fid_begin; fid <= fid_end; fid++)
549 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
550
551 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
552 false);
553
554 for (fid = fid_begin; fid <= fid_end; fid++)
555 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
556 }
557
558 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
559 u16 vid)
560 {
561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
562 char spvid_pl[MLXSW_REG_SPVID_LEN];
563
564 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
565 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
566 }
567
568 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
569 bool allow)
570 {
571 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
572 char spaft_pl[MLXSW_REG_SPAFT_LEN];
573
574 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
575 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
576 }
577
578 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
579 {
580 struct net_device *dev = mlxsw_sp_port->dev;
581 int err;
582
583 if (!vid) {
584 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
585 if (err) {
586 netdev_err(dev, "Failed to disallow untagged traffic\n");
587 return err;
588 }
589 } else {
590 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
591 if (err) {
592 netdev_err(dev, "Failed to set PVID\n");
593 return err;
594 }
595
596 /* Only allow if not already allowed. */
597 if (!mlxsw_sp_port->pvid) {
598 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
599 true);
600 if (err) {
601 netdev_err(dev, "Failed to allow untagged traffic\n");
602 goto err_port_allow_untagged_set;
603 }
604 }
605 }
606
607 mlxsw_sp_port->pvid = vid;
608 return 0;
609
610 err_port_allow_untagged_set:
611 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
612 return err;
613 }
614
615 static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
616 u16 vid_begin, u16 vid_end, bool is_member,
617 bool untagged)
618 {
619 u16 vid, vid_e;
620 int err;
621
622 for (vid = vid_begin; vid <= vid_end;
623 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
624 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
625 vid_end);
626
627 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
628 is_member, untagged);
629 if (err)
630 return err;
631 }
632
633 return 0;
634 }
635
636 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
637 u16 vid_begin, u16 vid_end,
638 bool flag_untagged, bool flag_pvid)
639 {
640 struct net_device *dev = mlxsw_sp_port->dev;
641 u16 vid, old_pvid;
642 int err;
643
644 if (!mlxsw_sp_port->bridged)
645 return -EINVAL;
646
647 err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
648 if (err) {
649 netdev_err(dev, "Failed to join FIDs\n");
650 return err;
651 }
652
653 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
654 true, flag_untagged);
655 if (err) {
656 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
657 vid_end);
658 goto err_port_vlans_set;
659 }
660
661 old_pvid = mlxsw_sp_port->pvid;
662 if (flag_pvid && old_pvid != vid_begin) {
663 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
664 if (err) {
665 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
666 goto err_port_pvid_set;
667 }
668 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
669 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
670 if (err) {
671 netdev_err(dev, "Unable to del PVID\n");
672 goto err_port_pvid_set;
673 }
674 }
675
676 /* Changing activity bits only if HW operation succeded */
677 for (vid = vid_begin; vid <= vid_end; vid++) {
678 set_bit(vid, mlxsw_sp_port->active_vlans);
679 if (flag_untagged)
680 set_bit(vid, mlxsw_sp_port->untagged_vlans);
681 else
682 clear_bit(vid, mlxsw_sp_port->untagged_vlans);
683 }
684
685 /* STP state change must be done after we set active VLANs */
686 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
687 mlxsw_sp_port->stp_state);
688 if (err) {
689 netdev_err(dev, "Failed to set STP state\n");
690 goto err_port_stp_state_set;
691 }
692
693 return 0;
694
695 err_port_stp_state_set:
696 for (vid = vid_begin; vid <= vid_end; vid++)
697 clear_bit(vid, mlxsw_sp_port->active_vlans);
698 if (old_pvid != mlxsw_sp_port->pvid)
699 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
700 err_port_pvid_set:
701 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
702 false);
703 err_port_vlans_set:
704 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
705 return err;
706 }
707
708 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
709 const struct switchdev_obj_port_vlan *vlan,
710 struct switchdev_trans *trans)
711 {
712 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
713 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
714
715 if (switchdev_trans_ph_prepare(trans))
716 return 0;
717
718 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
719 vlan->vid_begin, vlan->vid_end,
720 flag_untagged, flag_pvid);
721 }
722
723 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
724 {
725 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
726 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
727 }
728
729 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
730 {
731 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
732 MLXSW_REG_SFD_OP_WRITE_REMOVE;
733 }
734
735 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
736 const char *mac, u16 fid, bool adding,
737 enum mlxsw_reg_sfd_rec_action action,
738 bool dynamic)
739 {
740 char *sfd_pl;
741 int err;
742
743 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
744 if (!sfd_pl)
745 return -ENOMEM;
746
747 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
748 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
749 mac, fid, action, local_port);
750 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
751 kfree(sfd_pl);
752
753 return err;
754 }
755
756 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
757 const char *mac, u16 fid, bool adding,
758 bool dynamic)
759 {
760 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
761 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
762 }
763
764 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
765 bool adding)
766 {
767 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
768 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
769 false);
770 }
771
772 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
773 const char *mac, u16 fid, u16 lag_vid,
774 bool adding, bool dynamic)
775 {
776 char *sfd_pl;
777 int err;
778
779 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
780 if (!sfd_pl)
781 return -ENOMEM;
782
783 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
784 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
785 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
786 lag_vid, lag_id);
787 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
788 kfree(sfd_pl);
789
790 return err;
791 }
792
793 static int
794 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
795 const struct switchdev_obj_port_fdb *fdb,
796 struct switchdev_trans *trans)
797 {
798 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
799 u16 lag_vid = 0;
800
801 if (switchdev_trans_ph_prepare(trans))
802 return 0;
803
804 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
805 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
806 }
807
808 if (!mlxsw_sp_port->lagged)
809 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
810 mlxsw_sp_port->local_port,
811 fdb->addr, fid, true, false);
812 else
813 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
814 mlxsw_sp_port->lag_id,
815 fdb->addr, fid, lag_vid,
816 true, false);
817 }
818
819 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
820 u16 fid, u16 mid, bool adding)
821 {
822 char *sfd_pl;
823 int err;
824
825 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
826 if (!sfd_pl)
827 return -ENOMEM;
828
829 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
830 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
831 MLXSW_REG_SFD_REC_ACTION_NOP, mid);
832 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
833 kfree(sfd_pl);
834 return err;
835 }
836
837 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
838 bool add, bool clear_all_ports)
839 {
840 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
841 char *smid_pl;
842 int err, i;
843
844 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
845 if (!smid_pl)
846 return -ENOMEM;
847
848 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
849 if (clear_all_ports) {
850 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
851 if (mlxsw_sp->ports[i])
852 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
853 }
854 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
855 kfree(smid_pl);
856 return err;
857 }
858
859 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
860 const unsigned char *addr,
861 u16 vid)
862 {
863 struct mlxsw_sp_mid *mid;
864
865 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
866 if (ether_addr_equal(mid->addr, addr) && mid->vid == vid)
867 return mid;
868 }
869 return NULL;
870 }
871
872 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
873 const unsigned char *addr,
874 u16 vid)
875 {
876 struct mlxsw_sp_mid *mid;
877 u16 mid_idx;
878
879 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
880 MLXSW_SP_MID_MAX);
881 if (mid_idx == MLXSW_SP_MID_MAX)
882 return NULL;
883
884 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
885 if (!mid)
886 return NULL;
887
888 set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
889 ether_addr_copy(mid->addr, addr);
890 mid->vid = vid;
891 mid->mid = mid_idx;
892 mid->ref_count = 0;
893 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
894
895 return mid;
896 }
897
898 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
899 struct mlxsw_sp_mid *mid)
900 {
901 if (--mid->ref_count == 0) {
902 list_del(&mid->list);
903 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
904 kfree(mid);
905 return 1;
906 }
907 return 0;
908 }
909
910 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
911 const struct switchdev_obj_port_mdb *mdb,
912 struct switchdev_trans *trans)
913 {
914 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
915 struct net_device *dev = mlxsw_sp_port->dev;
916 struct mlxsw_sp_mid *mid;
917 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
918 int err = 0;
919
920 if (switchdev_trans_ph_prepare(trans))
921 return 0;
922
923 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
924 if (!mid) {
925 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid);
926 if (!mid) {
927 netdev_err(dev, "Unable to allocate MC group\n");
928 return -ENOMEM;
929 }
930 }
931 mid->ref_count++;
932
933 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
934 mid->ref_count == 1);
935 if (err) {
936 netdev_err(dev, "Unable to set SMID\n");
937 goto err_out;
938 }
939
940 if (mid->ref_count == 1) {
941 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
942 true);
943 if (err) {
944 netdev_err(dev, "Unable to set MC SFD\n");
945 goto err_out;
946 }
947 }
948
949 return 0;
950
951 err_out:
952 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
953 return err;
954 }
955
956 static int mlxsw_sp_port_obj_add(struct net_device *dev,
957 const struct switchdev_obj *obj,
958 struct switchdev_trans *trans)
959 {
960 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
961 int err = 0;
962
963 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
964 if (!mlxsw_sp_port)
965 return -EINVAL;
966
967 switch (obj->id) {
968 case SWITCHDEV_OBJ_ID_PORT_VLAN:
969 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
970 return 0;
971
972 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
973 SWITCHDEV_OBJ_PORT_VLAN(obj),
974 trans);
975 break;
976 case SWITCHDEV_OBJ_ID_IPV4_FIB:
977 err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
978 SWITCHDEV_OBJ_IPV4_FIB(obj),
979 trans);
980 break;
981 case SWITCHDEV_OBJ_ID_PORT_FDB:
982 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
983 SWITCHDEV_OBJ_PORT_FDB(obj),
984 trans);
985 break;
986 case SWITCHDEV_OBJ_ID_PORT_MDB:
987 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
988 SWITCHDEV_OBJ_PORT_MDB(obj),
989 trans);
990 break;
991 default:
992 err = -EOPNOTSUPP;
993 break;
994 }
995
996 return err;
997 }
998
999 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1000 u16 vid_begin, u16 vid_end)
1001 {
1002 struct net_device *dev = mlxsw_sp_port->dev;
1003 u16 vid, pvid;
1004 int err;
1005
1006 if (!mlxsw_sp_port->bridged)
1007 return -EINVAL;
1008
1009 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
1010 false, false);
1011 if (err) {
1012 netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
1013 vid_end);
1014 return err;
1015 }
1016
1017 pvid = mlxsw_sp_port->pvid;
1018 if (pvid >= vid_begin && pvid <= vid_end) {
1019 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
1020 if (err) {
1021 netdev_err(dev, "Unable to del PVID %d\n", pvid);
1022 return err;
1023 }
1024 }
1025
1026 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
1027
1028 /* Changing activity bits only if HW operation succeded */
1029 for (vid = vid_begin; vid <= vid_end; vid++)
1030 clear_bit(vid, mlxsw_sp_port->active_vlans);
1031
1032 return 0;
1033 }
1034
1035 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1036 const struct switchdev_obj_port_vlan *vlan)
1037 {
1038 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
1039 vlan->vid_end);
1040 }
1041
1042 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1043 {
1044 u16 vid;
1045
1046 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1047 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
1048 }
1049
1050 static int
1051 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
1052 const struct switchdev_obj_port_fdb *fdb)
1053 {
1054 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
1055 u16 lag_vid = 0;
1056
1057 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1058 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1059 }
1060
1061 if (!mlxsw_sp_port->lagged)
1062 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
1063 mlxsw_sp_port->local_port,
1064 fdb->addr, fid,
1065 false, false);
1066 else
1067 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
1068 mlxsw_sp_port->lag_id,
1069 fdb->addr, fid, lag_vid,
1070 false, false);
1071 }
1072
1073 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1074 const struct switchdev_obj_port_mdb *mdb)
1075 {
1076 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1077 struct net_device *dev = mlxsw_sp_port->dev;
1078 struct mlxsw_sp_mid *mid;
1079 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1080 u16 mid_idx;
1081 int err = 0;
1082
1083 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid);
1084 if (!mid) {
1085 netdev_err(dev, "Unable to remove port from MC DB\n");
1086 return -EINVAL;
1087 }
1088
1089 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1090 if (err)
1091 netdev_err(dev, "Unable to remove port from SMID\n");
1092
1093 mid_idx = mid->mid;
1094 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1095 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
1096 false);
1097 if (err)
1098 netdev_err(dev, "Unable to remove MC SFD\n");
1099 }
1100
1101 return err;
1102 }
1103
1104 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1105 const struct switchdev_obj *obj)
1106 {
1107 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1108 int err = 0;
1109
1110 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1111 if (!mlxsw_sp_port)
1112 return -EINVAL;
1113
1114 switch (obj->id) {
1115 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1116 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1117 return 0;
1118
1119 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1120 SWITCHDEV_OBJ_PORT_VLAN(obj));
1121 break;
1122 case SWITCHDEV_OBJ_ID_IPV4_FIB:
1123 err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
1124 SWITCHDEV_OBJ_IPV4_FIB(obj));
1125 break;
1126 case SWITCHDEV_OBJ_ID_PORT_FDB:
1127 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1128 SWITCHDEV_OBJ_PORT_FDB(obj));
1129 break;
1130 case SWITCHDEV_OBJ_ID_PORT_MDB:
1131 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1132 SWITCHDEV_OBJ_PORT_MDB(obj));
1133 break;
1134 default:
1135 err = -EOPNOTSUPP;
1136 break;
1137 }
1138
1139 return err;
1140 }
1141
1142 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1143 u16 lag_id)
1144 {
1145 struct mlxsw_sp_port *mlxsw_sp_port;
1146 int i;
1147
1148 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
1149 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1150 if (mlxsw_sp_port)
1151 return mlxsw_sp_port;
1152 }
1153 return NULL;
1154 }
1155
1156 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1157 struct switchdev_obj_port_fdb *fdb,
1158 switchdev_obj_dump_cb_t *cb,
1159 struct net_device *orig_dev)
1160 {
1161 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1162 struct mlxsw_sp_port *tmp;
1163 struct mlxsw_sp_fid *f;
1164 u16 vport_fid;
1165 char *sfd_pl;
1166 char mac[ETH_ALEN];
1167 u16 fid;
1168 u8 local_port;
1169 u16 lag_id;
1170 u8 num_rec;
1171 int stored_err = 0;
1172 int i;
1173 int err;
1174
1175 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1176 if (!sfd_pl)
1177 return -ENOMEM;
1178
1179 f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
1180 vport_fid = f ? f->fid : 0;
1181
1182 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1183 do {
1184 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
1185 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1186 if (err)
1187 goto out;
1188
1189 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1190
1191 /* Even in case of error, we have to run the dump to the end
1192 * so the session in firmware is finished.
1193 */
1194 if (stored_err)
1195 continue;
1196
1197 for (i = 0; i < num_rec; i++) {
1198 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
1199 case MLXSW_REG_SFD_REC_TYPE_UNICAST:
1200 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
1201 &local_port);
1202 if (local_port == mlxsw_sp_port->local_port) {
1203 if (vport_fid && vport_fid == fid)
1204 fdb->vid = 0;
1205 else if (!vport_fid &&
1206 !mlxsw_sp_fid_is_vfid(fid))
1207 fdb->vid = fid;
1208 else
1209 continue;
1210 ether_addr_copy(fdb->addr, mac);
1211 fdb->ndm_state = NUD_REACHABLE;
1212 err = cb(&fdb->obj);
1213 if (err)
1214 stored_err = err;
1215 }
1216 break;
1217 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1218 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
1219 mac, &fid, &lag_id);
1220 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1221 if (tmp && tmp->local_port ==
1222 mlxsw_sp_port->local_port) {
1223 /* LAG records can only point to LAG
1224 * devices or VLAN devices on top.
1225 */
1226 if (!netif_is_lag_master(orig_dev) &&
1227 !is_vlan_dev(orig_dev))
1228 continue;
1229 if (vport_fid && vport_fid == fid)
1230 fdb->vid = 0;
1231 else if (!vport_fid &&
1232 !mlxsw_sp_fid_is_vfid(fid))
1233 fdb->vid = fid;
1234 else
1235 continue;
1236 ether_addr_copy(fdb->addr, mac);
1237 fdb->ndm_state = NUD_REACHABLE;
1238 err = cb(&fdb->obj);
1239 if (err)
1240 stored_err = err;
1241 }
1242 break;
1243 }
1244 }
1245 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1246
1247 out:
1248 kfree(sfd_pl);
1249 return stored_err ? stored_err : err;
1250 }
1251
1252 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1253 struct switchdev_obj_port_vlan *vlan,
1254 switchdev_obj_dump_cb_t *cb)
1255 {
1256 u16 vid;
1257 int err = 0;
1258
1259 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1260 vlan->flags = 0;
1261 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1262 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1263 return cb(&vlan->obj);
1264 }
1265
1266 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1267 vlan->flags = 0;
1268 if (vid == mlxsw_sp_port->pvid)
1269 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1270 if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
1271 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1272 vlan->vid_begin = vid;
1273 vlan->vid_end = vid;
1274 err = cb(&vlan->obj);
1275 if (err)
1276 break;
1277 }
1278 return err;
1279 }
1280
1281 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1282 struct switchdev_obj *obj,
1283 switchdev_obj_dump_cb_t *cb)
1284 {
1285 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1286 int err = 0;
1287
1288 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1289 if (!mlxsw_sp_port)
1290 return -EINVAL;
1291
1292 switch (obj->id) {
1293 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1294 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
1295 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
1296 break;
1297 case SWITCHDEV_OBJ_ID_PORT_FDB:
1298 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
1299 SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1300 obj->orig_dev);
1301 break;
1302 default:
1303 err = -EOPNOTSUPP;
1304 break;
1305 }
1306
1307 return err;
1308 }
1309
1310 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1311 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
1312 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
1313 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
1314 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
1315 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
1316 };
1317
1318 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1319 char *mac, u16 vid,
1320 struct net_device *dev)
1321 {
1322 struct switchdev_notifier_fdb_info info;
1323 unsigned long notifier_type;
1324
1325 if (learning_sync) {
1326 info.addr = mac;
1327 info.vid = vid;
1328 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1329 call_switchdev_notifiers(notifier_type, dev, &info.info);
1330 }
1331 }
1332
1333 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1334 char *sfn_pl, int rec_index,
1335 bool adding)
1336 {
1337 struct mlxsw_sp_port *mlxsw_sp_port;
1338 char mac[ETH_ALEN];
1339 u8 local_port;
1340 u16 vid, fid;
1341 bool do_notification = true;
1342 int err;
1343
1344 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1345 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1346 if (!mlxsw_sp_port) {
1347 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1348 goto just_remove;
1349 }
1350
1351 if (mlxsw_sp_fid_is_vfid(fid)) {
1352 struct mlxsw_sp_port *mlxsw_sp_vport;
1353
1354 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1355 fid);
1356 if (!mlxsw_sp_vport) {
1357 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1358 goto just_remove;
1359 }
1360 vid = 0;
1361 /* Override the physical port with the vPort. */
1362 mlxsw_sp_port = mlxsw_sp_vport;
1363 } else {
1364 vid = fid;
1365 }
1366
1367 adding = adding && mlxsw_sp_port->learning;
1368
1369 do_fdb_op:
1370 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1371 adding, true);
1372 if (err) {
1373 if (net_ratelimit())
1374 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1375 return;
1376 }
1377
1378 if (!do_notification)
1379 return;
1380 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
1381 adding, mac, vid, mlxsw_sp_port->dev);
1382 return;
1383
1384 just_remove:
1385 adding = false;
1386 do_notification = false;
1387 goto do_fdb_op;
1388 }
1389
1390 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1391 char *sfn_pl, int rec_index,
1392 bool adding)
1393 {
1394 struct mlxsw_sp_port *mlxsw_sp_port;
1395 struct net_device *dev;
1396 char mac[ETH_ALEN];
1397 u16 lag_vid = 0;
1398 u16 lag_id;
1399 u16 vid, fid;
1400 bool do_notification = true;
1401 int err;
1402
1403 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1404 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1405 if (!mlxsw_sp_port) {
1406 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1407 goto just_remove;
1408 }
1409
1410 if (mlxsw_sp_fid_is_vfid(fid)) {
1411 struct mlxsw_sp_port *mlxsw_sp_vport;
1412
1413 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1414 fid);
1415 if (!mlxsw_sp_vport) {
1416 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1417 goto just_remove;
1418 }
1419
1420 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1421 dev = mlxsw_sp_vport->dev;
1422 vid = 0;
1423 /* Override the physical port with the vPort. */
1424 mlxsw_sp_port = mlxsw_sp_vport;
1425 } else {
1426 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
1427 vid = fid;
1428 }
1429
1430 adding = adding && mlxsw_sp_port->learning;
1431
1432 do_fdb_op:
1433 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1434 adding, true);
1435 if (err) {
1436 if (net_ratelimit())
1437 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1438 return;
1439 }
1440
1441 if (!do_notification)
1442 return;
1443 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
1444 vid, dev);
1445 return;
1446
1447 just_remove:
1448 adding = false;
1449 do_notification = false;
1450 goto do_fdb_op;
1451 }
1452
1453 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1454 char *sfn_pl, int rec_index)
1455 {
1456 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1457 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1458 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1459 rec_index, true);
1460 break;
1461 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1462 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1463 rec_index, false);
1464 break;
1465 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1466 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1467 rec_index, true);
1468 break;
1469 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1470 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1471 rec_index, false);
1472 break;
1473 }
1474 }
1475
1476 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1477 {
1478 mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
1479 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
1480 }
1481
1482 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1483 {
1484 struct mlxsw_sp *mlxsw_sp;
1485 char *sfn_pl;
1486 u8 num_rec;
1487 int i;
1488 int err;
1489
1490 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1491 if (!sfn_pl)
1492 return;
1493
1494 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1495
1496 rtnl_lock();
1497 do {
1498 mlxsw_reg_sfn_pack(sfn_pl);
1499 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1500 if (err) {
1501 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1502 break;
1503 }
1504 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1505 for (i = 0; i < num_rec; i++)
1506 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1507
1508 } while (num_rec);
1509 rtnl_unlock();
1510
1511 kfree(sfn_pl);
1512 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1513 }
1514
1515 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1516 {
1517 int err;
1518
1519 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1520 if (err) {
1521 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1522 return err;
1523 }
1524 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1525 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1526 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1527 return 0;
1528 }
1529
1530 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1531 {
1532 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1533 }
1534
1535 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1536 {
1537 return mlxsw_sp_fdb_init(mlxsw_sp);
1538 }
1539
1540 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1541 {
1542 mlxsw_sp_fdb_fini(mlxsw_sp);
1543 }
1544
1545 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1546 {
1547 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1548 }
1549
1550 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1551 {
1552 }