]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
Merge remote-tracking branch 'regulator/fix/max77802' into regulator-linus
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_switchdev.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/netdevice.h>
40 #include <linux/etherdevice.h>
41 #include <linux/slab.h>
42 #include <linux/device.h>
43 #include <linux/skbuff.h>
44 #include <linux/if_vlan.h>
45 #include <linux/if_bridge.h>
46 #include <linux/workqueue.h>
47 #include <linux/jiffies.h>
48 #include <linux/rtnetlink.h>
49 #include <net/switchdev.h>
50
51 #include "spectrum.h"
52 #include "core.h"
53 #include "reg.h"
54
55 static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
56 u16 vid)
57 {
58 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
59 u16 fid = vid;
60
61 fid = f ? f->fid : fid;
62
63 if (!fid)
64 fid = mlxsw_sp_port->pvid;
65
66 return fid;
67 }
68
69 static struct mlxsw_sp_port *
70 mlxsw_sp_port_orig_get(struct net_device *dev,
71 struct mlxsw_sp_port *mlxsw_sp_port)
72 {
73 struct mlxsw_sp_port *mlxsw_sp_vport;
74 struct mlxsw_sp_fid *fid;
75 u16 vid;
76
77 if (netif_is_bridge_master(dev)) {
78 fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp,
79 dev);
80 if (fid) {
81 mlxsw_sp_vport =
82 mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
83 fid->fid);
84 WARN_ON(!mlxsw_sp_vport);
85 return mlxsw_sp_vport;
86 }
87 }
88
89 if (!is_vlan_dev(dev))
90 return mlxsw_sp_port;
91
92 vid = vlan_dev_vlan_id(dev);
93 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
94 WARN_ON(!mlxsw_sp_vport);
95
96 return mlxsw_sp_vport;
97 }
98
99 static int mlxsw_sp_port_attr_get(struct net_device *dev,
100 struct switchdev_attr *attr)
101 {
102 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
103 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
104
105 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
106 if (!mlxsw_sp_port)
107 return -EINVAL;
108
109 switch (attr->id) {
110 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
111 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
112 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
113 attr->u.ppid.id_len);
114 break;
115 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
116 attr->u.brport_flags =
117 (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
118 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
119 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
120 break;
121 default:
122 return -EOPNOTSUPP;
123 }
124
125 return 0;
126 }
127
128 static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
129 u8 state)
130 {
131 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
132 enum mlxsw_reg_spms_state spms_state;
133 char *spms_pl;
134 u16 vid;
135 int err;
136
137 switch (state) {
138 case BR_STATE_FORWARDING:
139 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
140 break;
141 case BR_STATE_LEARNING:
142 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
143 break;
144 case BR_STATE_LISTENING: /* fall-through */
145 case BR_STATE_DISABLED: /* fall-through */
146 case BR_STATE_BLOCKING:
147 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
148 break;
149 default:
150 BUG();
151 }
152
153 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
154 if (!spms_pl)
155 return -ENOMEM;
156 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
157
158 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
159 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
160 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
161 } else {
162 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
163 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
164 }
165
166 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
167 kfree(spms_pl);
168 return err;
169 }
170
171 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
172 struct switchdev_trans *trans,
173 u8 state)
174 {
175 if (switchdev_trans_ph_prepare(trans))
176 return 0;
177
178 mlxsw_sp_port->stp_state = state;
179 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
180 }
181
182 static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
183 u16 idx_begin, u16 idx_end,
184 enum mlxsw_sp_flood_table table,
185 bool set)
186 {
187 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
188 u16 local_port = mlxsw_sp_port->local_port;
189 enum mlxsw_flood_table_type table_type;
190 u16 range = idx_end - idx_begin + 1;
191 char *sftr_pl;
192 int err;
193
194 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
195 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
196 else
197 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
198
199 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
200 if (!sftr_pl)
201 return -ENOMEM;
202
203 mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin,
204 table_type, range, local_port, set);
205 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
206
207 kfree(sftr_pl);
208 return err;
209 }
210
211 static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
212 u16 idx_begin, u16 idx_end, bool uc_set,
213 bool bc_set, bool mc_set)
214 {
215 int err;
216
217 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
218 MLXSW_SP_FLOOD_TABLE_UC, uc_set);
219 if (err)
220 return err;
221
222 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
223 MLXSW_SP_FLOOD_TABLE_BC, bc_set);
224 if (err)
225 goto err_flood_bm_set;
226
227 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
228 MLXSW_SP_FLOOD_TABLE_MC, mc_set);
229 if (err)
230 goto err_flood_mc_set;
231 return 0;
232
233 err_flood_mc_set:
234 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
235 MLXSW_SP_FLOOD_TABLE_BC, !bc_set);
236 err_flood_bm_set:
237 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
238 MLXSW_SP_FLOOD_TABLE_UC, !uc_set);
239 return err;
240 }
241
242 static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
243 enum mlxsw_sp_flood_table table,
244 bool set)
245 {
246 struct net_device *dev = mlxsw_sp_port->dev;
247 u16 vid, last_visited_vid;
248 int err;
249
250 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
251 u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
252 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
253
254 return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid,
255 vfid, table, set);
256 }
257
258 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
259 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid,
260 table, set);
261 if (err) {
262 last_visited_vid = vid;
263 goto err_port_flood_set;
264 }
265 }
266
267 return 0;
268
269 err_port_flood_set:
270 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
271 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table,
272 !set);
273 netdev_err(dev, "Failed to configure unicast flooding\n");
274 return err;
275 }
276
277 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
278 struct switchdev_trans *trans,
279 bool mc_disabled)
280 {
281 int set;
282 int err = 0;
283
284 if (switchdev_trans_ph_prepare(trans))
285 return 0;
286
287 if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) {
288 set = mc_disabled ?
289 mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
290 err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
291 MLXSW_SP_FLOOD_TABLE_MC,
292 set);
293 }
294
295 if (!err)
296 mlxsw_sp_port->mc_disabled = mc_disabled;
297
298 return err;
299 }
300
301 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
302 bool set)
303 {
304 bool mc_set = set;
305 u16 vfid;
306
307 /* In case of vFIDs, index into the flooding table is relative to
308 * the start of the vFIDs range.
309 */
310 vfid = mlxsw_sp_fid_to_vfid(fid);
311
312 if (set)
313 mc_set = mlxsw_sp_vport->mc_disabled ?
314 mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router;
315
316 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set,
317 mc_set);
318 }
319
320 static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
321 bool set)
322 {
323 u16 vid;
324 int err;
325
326 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
327 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
328
329 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
330 set);
331 }
332
333 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
334 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
335 set);
336 if (err)
337 goto err_port_vid_learning_set;
338 }
339
340 return 0;
341
342 err_port_vid_learning_set:
343 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
344 __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
345 return err;
346 }
347
348 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
349 struct switchdev_trans *trans,
350 unsigned long brport_flags)
351 {
352 unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
353 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
354 int err;
355
356 if (!mlxsw_sp_port->bridged)
357 return -EINVAL;
358
359 if (switchdev_trans_ph_prepare(trans))
360 return 0;
361
362 if ((uc_flood ^ brport_flags) & BR_FLOOD) {
363 err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
364 MLXSW_SP_FLOOD_TABLE_UC,
365 !mlxsw_sp_port->uc_flood);
366 if (err)
367 return err;
368 }
369
370 if ((learning ^ brport_flags) & BR_LEARNING) {
371 err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
372 !mlxsw_sp_port->learning);
373 if (err)
374 goto err_port_learning_set;
375 }
376
377 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
378 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
379 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
380
381 return 0;
382
383 err_port_learning_set:
384 if ((uc_flood ^ brport_flags) & BR_FLOOD)
385 mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
386 MLXSW_SP_FLOOD_TABLE_UC,
387 mlxsw_sp_port->uc_flood);
388 return err;
389 }
390
391 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
392 {
393 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
394 int err;
395
396 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
397 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
398 if (err)
399 return err;
400 mlxsw_sp->ageing_time = ageing_time;
401 return 0;
402 }
403
404 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
405 struct switchdev_trans *trans,
406 unsigned long ageing_clock_t)
407 {
408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
409 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
410 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
411
412 if (switchdev_trans_ph_prepare(trans)) {
413 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
414 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
415 return -ERANGE;
416 else
417 return 0;
418 }
419
420 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
421 }
422
423 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
424 struct switchdev_trans *trans,
425 struct net_device *orig_dev,
426 bool vlan_enabled)
427 {
428 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
429
430 /* SWITCHDEV_TRANS_PREPARE phase */
431 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
432 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
433 return -EINVAL;
434 }
435
436 return 0;
437 }
438
439 static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
440 struct switchdev_trans *trans,
441 bool is_port_mc_router)
442 {
443 if (switchdev_trans_ph_prepare(trans))
444 return 0;
445
446 mlxsw_sp_port->mc_router = is_port_mc_router;
447 if (!mlxsw_sp_port->mc_disabled)
448 return mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
449 MLXSW_SP_FLOOD_TABLE_MC,
450 is_port_mc_router);
451
452 return 0;
453 }
454
455 static int mlxsw_sp_port_attr_set(struct net_device *dev,
456 const struct switchdev_attr *attr,
457 struct switchdev_trans *trans)
458 {
459 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
460 int err = 0;
461
462 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
463 if (!mlxsw_sp_port)
464 return -EINVAL;
465
466 switch (attr->id) {
467 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
468 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
469 attr->u.stp_state);
470 break;
471 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
472 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
473 attr->u.brport_flags);
474 break;
475 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
476 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
477 attr->u.ageing_time);
478 break;
479 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
480 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
481 attr->orig_dev,
482 attr->u.vlan_filtering);
483 break;
484 case SWITCHDEV_ATTR_ID_PORT_MROUTER:
485 err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
486 attr->u.mrouter);
487 break;
488 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
489 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
490 attr->u.mc_disabled);
491 break;
492 default:
493 err = -EOPNOTSUPP;
494 break;
495 }
496
497 return err;
498 }
499
500 static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
501 {
502 char sfmr_pl[MLXSW_REG_SFMR_LEN];
503
504 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
506 }
507
508 static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
509 {
510 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
511 char svfa_pl[MLXSW_REG_SVFA_LEN];
512
513 mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
514 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
515 }
516
517 static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
518 {
519 struct mlxsw_sp_fid *f;
520
521 f = kzalloc(sizeof(*f), GFP_KERNEL);
522 if (!f)
523 return NULL;
524
525 f->fid = fid;
526
527 return f;
528 }
529
530 struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
531 {
532 struct mlxsw_sp_fid *f;
533 int err;
534
535 err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
536 if (err)
537 return ERR_PTR(err);
538
539 /* Although all the ports member in the FID might be using a
540 * {Port, VID} to FID mapping, we create a global VID-to-FID
541 * mapping. This allows a port to transition to VLAN mode,
542 * knowing the global mapping exists.
543 */
544 err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
545 if (err)
546 goto err_fid_map;
547
548 f = mlxsw_sp_fid_alloc(fid);
549 if (!f) {
550 err = -ENOMEM;
551 goto err_allocate_fid;
552 }
553
554 list_add(&f->list, &mlxsw_sp->fids);
555
556 return f;
557
558 err_allocate_fid:
559 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
560 err_fid_map:
561 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
562 return ERR_PTR(err);
563 }
564
565 void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
566 {
567 u16 fid = f->fid;
568
569 list_del(&f->list);
570
571 if (f->rif)
572 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
573
574 kfree(f);
575
576 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
577
578 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
579 }
580
581 static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
582 u16 fid)
583 {
584 struct mlxsw_sp_fid *f;
585
586 if (test_bit(fid, mlxsw_sp_port->active_vlans))
587 return 0;
588
589 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
590 if (!f) {
591 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
592 if (IS_ERR(f))
593 return PTR_ERR(f);
594 }
595
596 f->ref_count++;
597
598 netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
599
600 return 0;
601 }
602
603 static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
604 u16 fid)
605 {
606 struct mlxsw_sp_fid *f;
607
608 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
609 if (WARN_ON(!f))
610 return;
611
612 netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
613
614 mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
615
616 if (--f->ref_count == 0)
617 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
618 }
619
620 static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
621 bool valid)
622 {
623 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
624
625 /* If port doesn't have vPorts, then it can use the global
626 * VID-to-FID mapping.
627 */
628 if (list_empty(&mlxsw_sp_port->vports_list))
629 return 0;
630
631 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
632 }
633
634 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
635 u16 fid_begin, u16 fid_end)
636 {
637 bool mc_flood;
638 int fid, err;
639
640 for (fid = fid_begin; fid <= fid_end; fid++) {
641 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
642 if (err)
643 goto err_port_fid_join;
644 }
645
646 mc_flood = mlxsw_sp_port->mc_disabled ?
647 mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
648
649 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
650 mlxsw_sp_port->uc_flood, true,
651 mc_flood);
652 if (err)
653 goto err_port_flood_set;
654
655 for (fid = fid_begin; fid <= fid_end; fid++) {
656 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
657 if (err)
658 goto err_port_fid_map;
659 }
660
661 return 0;
662
663 err_port_fid_map:
664 for (fid--; fid >= fid_begin; fid--)
665 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
666 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
667 false, false);
668 err_port_flood_set:
669 fid = fid_end;
670 err_port_fid_join:
671 for (fid--; fid >= fid_begin; fid--)
672 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
673 return err;
674 }
675
676 static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
677 u16 fid_begin, u16 fid_end)
678 {
679 int fid;
680
681 for (fid = fid_begin; fid <= fid_end; fid++)
682 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
683
684 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
685 false, false);
686
687 for (fid = fid_begin; fid <= fid_end; fid++)
688 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
689 }
690
691 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
692 u16 vid)
693 {
694 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
695 char spvid_pl[MLXSW_REG_SPVID_LEN];
696
697 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
698 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
699 }
700
701 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
702 bool allow)
703 {
704 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
705 char spaft_pl[MLXSW_REG_SPAFT_LEN];
706
707 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
708 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
709 }
710
711 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
712 {
713 struct net_device *dev = mlxsw_sp_port->dev;
714 int err;
715
716 if (!vid) {
717 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
718 if (err) {
719 netdev_err(dev, "Failed to disallow untagged traffic\n");
720 return err;
721 }
722 } else {
723 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
724 if (err) {
725 netdev_err(dev, "Failed to set PVID\n");
726 return err;
727 }
728
729 /* Only allow if not already allowed. */
730 if (!mlxsw_sp_port->pvid) {
731 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
732 true);
733 if (err) {
734 netdev_err(dev, "Failed to allow untagged traffic\n");
735 goto err_port_allow_untagged_set;
736 }
737 }
738 }
739
740 mlxsw_sp_port->pvid = vid;
741 return 0;
742
743 err_port_allow_untagged_set:
744 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
745 return err;
746 }
747
748 static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
749 u16 vid_begin, u16 vid_end,
750 bool learn_enable)
751 {
752 u16 vid, vid_e;
753 int err;
754
755 for (vid = vid_begin; vid <= vid_end;
756 vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
757 vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
758 vid_end);
759
760 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
761 vid_e, learn_enable);
762 if (err)
763 return err;
764 }
765
766 return 0;
767 }
768
769 static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
770 u16 vid_begin, u16 vid_end,
771 bool flag_untagged, bool flag_pvid)
772 {
773 struct net_device *dev = mlxsw_sp_port->dev;
774 u16 vid, old_pvid;
775 int err;
776
777 if (!mlxsw_sp_port->bridged)
778 return -EINVAL;
779
780 err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
781 if (err) {
782 netdev_err(dev, "Failed to join FIDs\n");
783 return err;
784 }
785
786 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
787 true, flag_untagged);
788 if (err) {
789 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
790 vid_end);
791 goto err_port_vlans_set;
792 }
793
794 old_pvid = mlxsw_sp_port->pvid;
795 if (flag_pvid && old_pvid != vid_begin) {
796 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
797 if (err) {
798 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
799 goto err_port_pvid_set;
800 }
801 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
802 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
803 if (err) {
804 netdev_err(dev, "Unable to del PVID\n");
805 goto err_port_pvid_set;
806 }
807 }
808
809 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
810 mlxsw_sp_port->learning);
811 if (err) {
812 netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
813 vid_begin, vid_end);
814 goto err_port_vid_learning_set;
815 }
816
817 /* Changing activity bits only if HW operation succeded */
818 for (vid = vid_begin; vid <= vid_end; vid++) {
819 set_bit(vid, mlxsw_sp_port->active_vlans);
820 if (flag_untagged)
821 set_bit(vid, mlxsw_sp_port->untagged_vlans);
822 else
823 clear_bit(vid, mlxsw_sp_port->untagged_vlans);
824 }
825
826 /* STP state change must be done after we set active VLANs */
827 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
828 mlxsw_sp_port->stp_state);
829 if (err) {
830 netdev_err(dev, "Failed to set STP state\n");
831 goto err_port_stp_state_set;
832 }
833
834 return 0;
835
836 err_port_stp_state_set:
837 for (vid = vid_begin; vid <= vid_end; vid++)
838 clear_bit(vid, mlxsw_sp_port->active_vlans);
839 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
840 false);
841 err_port_vid_learning_set:
842 if (old_pvid != mlxsw_sp_port->pvid)
843 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
844 err_port_pvid_set:
845 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
846 false, false);
847 err_port_vlans_set:
848 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
849 return err;
850 }
851
852 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
853 const struct switchdev_obj_port_vlan *vlan,
854 struct switchdev_trans *trans)
855 {
856 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
857 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
858
859 if (switchdev_trans_ph_prepare(trans))
860 return 0;
861
862 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
863 vlan->vid_begin, vlan->vid_end,
864 flag_untagged, flag_pvid);
865 }
866
867 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
868 {
869 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
870 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
871 }
872
873 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
874 {
875 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
876 MLXSW_REG_SFD_OP_WRITE_REMOVE;
877 }
878
879 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
880 const char *mac, u16 fid, bool adding,
881 enum mlxsw_reg_sfd_rec_action action,
882 bool dynamic)
883 {
884 char *sfd_pl;
885 int err;
886
887 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
888 if (!sfd_pl)
889 return -ENOMEM;
890
891 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
892 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
893 mac, fid, action, local_port);
894 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
895 kfree(sfd_pl);
896
897 return err;
898 }
899
900 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
901 const char *mac, u16 fid, bool adding,
902 bool dynamic)
903 {
904 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
905 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
906 }
907
908 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
909 bool adding)
910 {
911 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
912 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
913 false);
914 }
915
916 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
917 const char *mac, u16 fid, u16 lag_vid,
918 bool adding, bool dynamic)
919 {
920 char *sfd_pl;
921 int err;
922
923 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
924 if (!sfd_pl)
925 return -ENOMEM;
926
927 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
928 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
929 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
930 lag_vid, lag_id);
931 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
932 kfree(sfd_pl);
933
934 return err;
935 }
936
937 static int
938 mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
939 const struct switchdev_obj_port_fdb *fdb,
940 struct switchdev_trans *trans)
941 {
942 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
943 u16 lag_vid = 0;
944
945 if (switchdev_trans_ph_prepare(trans))
946 return 0;
947
948 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
949 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
950 }
951
952 if (!mlxsw_sp_port->lagged)
953 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
954 mlxsw_sp_port->local_port,
955 fdb->addr, fid, true, false);
956 else
957 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
958 mlxsw_sp_port->lag_id,
959 fdb->addr, fid, lag_vid,
960 true, false);
961 }
962
963 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
964 u16 fid, u16 mid, bool adding)
965 {
966 char *sfd_pl;
967 int err;
968
969 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
970 if (!sfd_pl)
971 return -ENOMEM;
972
973 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
974 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
975 MLXSW_REG_SFD_REC_ACTION_NOP, mid);
976 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
977 kfree(sfd_pl);
978 return err;
979 }
980
981 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
982 bool add, bool clear_all_ports)
983 {
984 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
985 char *smid_pl;
986 int err, i;
987
988 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
989 if (!smid_pl)
990 return -ENOMEM;
991
992 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
993 if (clear_all_ports) {
994 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
995 if (mlxsw_sp->ports[i])
996 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
997 }
998 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
999 kfree(smid_pl);
1000 return err;
1001 }
1002
1003 static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
1004 const unsigned char *addr,
1005 u16 fid)
1006 {
1007 struct mlxsw_sp_mid *mid;
1008
1009 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
1010 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1011 return mid;
1012 }
1013 return NULL;
1014 }
1015
1016 static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1017 const unsigned char *addr,
1018 u16 fid)
1019 {
1020 struct mlxsw_sp_mid *mid;
1021 u16 mid_idx;
1022
1023 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
1024 MLXSW_SP_MID_MAX);
1025 if (mid_idx == MLXSW_SP_MID_MAX)
1026 return NULL;
1027
1028 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1029 if (!mid)
1030 return NULL;
1031
1032 set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
1033 ether_addr_copy(mid->addr, addr);
1034 mid->fid = fid;
1035 mid->mid = mid_idx;
1036 mid->ref_count = 0;
1037 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
1038
1039 return mid;
1040 }
1041
1042 static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
1043 struct mlxsw_sp_mid *mid)
1044 {
1045 if (--mid->ref_count == 0) {
1046 list_del(&mid->list);
1047 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
1048 kfree(mid);
1049 return 1;
1050 }
1051 return 0;
1052 }
1053
1054 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1055 const struct switchdev_obj_port_mdb *mdb,
1056 struct switchdev_trans *trans)
1057 {
1058 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1059 struct net_device *dev = mlxsw_sp_port->dev;
1060 struct mlxsw_sp_mid *mid;
1061 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1062 int err = 0;
1063
1064 if (switchdev_trans_ph_prepare(trans))
1065 return 0;
1066
1067 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
1068 if (!mid) {
1069 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
1070 if (!mid) {
1071 netdev_err(dev, "Unable to allocate MC group\n");
1072 return -ENOMEM;
1073 }
1074 }
1075 mid->ref_count++;
1076
1077 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
1078 mid->ref_count == 1);
1079 if (err) {
1080 netdev_err(dev, "Unable to set SMID\n");
1081 goto err_out;
1082 }
1083
1084 if (mid->ref_count == 1) {
1085 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
1086 true);
1087 if (err) {
1088 netdev_err(dev, "Unable to set MC SFD\n");
1089 goto err_out;
1090 }
1091 }
1092
1093 return 0;
1094
1095 err_out:
1096 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
1097 return err;
1098 }
1099
1100 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1101 const struct switchdev_obj *obj,
1102 struct switchdev_trans *trans)
1103 {
1104 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1105 int err = 0;
1106
1107 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1108 if (!mlxsw_sp_port)
1109 return -EINVAL;
1110
1111 switch (obj->id) {
1112 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1113 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1114 return 0;
1115
1116 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
1117 SWITCHDEV_OBJ_PORT_VLAN(obj),
1118 trans);
1119 break;
1120 case SWITCHDEV_OBJ_ID_PORT_FDB:
1121 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
1122 SWITCHDEV_OBJ_PORT_FDB(obj),
1123 trans);
1124 break;
1125 case SWITCHDEV_OBJ_ID_PORT_MDB:
1126 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1127 SWITCHDEV_OBJ_PORT_MDB(obj),
1128 trans);
1129 break;
1130 default:
1131 err = -EOPNOTSUPP;
1132 break;
1133 }
1134
1135 return err;
1136 }
1137
1138 static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1139 u16 vid_begin, u16 vid_end)
1140 {
1141 u16 vid, pvid;
1142
1143 if (!mlxsw_sp_port->bridged)
1144 return -EINVAL;
1145
1146 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
1147 false);
1148
1149 pvid = mlxsw_sp_port->pvid;
1150 if (pvid >= vid_begin && pvid <= vid_end)
1151 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
1152
1153 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin, vid_end,
1154 false, false);
1155
1156 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
1157
1158 /* Changing activity bits only if HW operation succeded */
1159 for (vid = vid_begin; vid <= vid_end; vid++)
1160 clear_bit(vid, mlxsw_sp_port->active_vlans);
1161
1162 return 0;
1163 }
1164
1165 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1166 const struct switchdev_obj_port_vlan *vlan)
1167 {
1168 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
1169 vlan->vid_end);
1170 }
1171
1172 void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1173 {
1174 u16 vid;
1175
1176 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
1177 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
1178 }
1179
1180 static int
1181 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
1182 const struct switchdev_obj_port_fdb *fdb)
1183 {
1184 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
1185 u16 lag_vid = 0;
1186
1187 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1188 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1189 }
1190
1191 if (!mlxsw_sp_port->lagged)
1192 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
1193 mlxsw_sp_port->local_port,
1194 fdb->addr, fid,
1195 false, false);
1196 else
1197 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
1198 mlxsw_sp_port->lag_id,
1199 fdb->addr, fid, lag_vid,
1200 false, false);
1201 }
1202
1203 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1204 const struct switchdev_obj_port_mdb *mdb)
1205 {
1206 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1207 struct net_device *dev = mlxsw_sp_port->dev;
1208 struct mlxsw_sp_mid *mid;
1209 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1210 u16 mid_idx;
1211 int err = 0;
1212
1213 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
1214 if (!mid) {
1215 netdev_err(dev, "Unable to remove port from MC DB\n");
1216 return -EINVAL;
1217 }
1218
1219 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1220 if (err)
1221 netdev_err(dev, "Unable to remove port from SMID\n");
1222
1223 mid_idx = mid->mid;
1224 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1225 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
1226 false);
1227 if (err)
1228 netdev_err(dev, "Unable to remove MC SFD\n");
1229 }
1230
1231 return err;
1232 }
1233
1234 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1235 const struct switchdev_obj *obj)
1236 {
1237 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1238 int err = 0;
1239
1240 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1241 if (!mlxsw_sp_port)
1242 return -EINVAL;
1243
1244 switch (obj->id) {
1245 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1246 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1247 return 0;
1248
1249 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1250 SWITCHDEV_OBJ_PORT_VLAN(obj));
1251 break;
1252 case SWITCHDEV_OBJ_ID_PORT_FDB:
1253 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1254 SWITCHDEV_OBJ_PORT_FDB(obj));
1255 break;
1256 case SWITCHDEV_OBJ_ID_PORT_MDB:
1257 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1258 SWITCHDEV_OBJ_PORT_MDB(obj));
1259 break;
1260 default:
1261 err = -EOPNOTSUPP;
1262 break;
1263 }
1264
1265 return err;
1266 }
1267
1268 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1269 u16 lag_id)
1270 {
1271 struct mlxsw_sp_port *mlxsw_sp_port;
1272 u64 max_lag_members;
1273 int i;
1274
1275 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1276 MAX_LAG_MEMBERS);
1277 for (i = 0; i < max_lag_members; i++) {
1278 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1279 if (mlxsw_sp_port)
1280 return mlxsw_sp_port;
1281 }
1282 return NULL;
1283 }
1284
1285 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1286 struct switchdev_obj_port_fdb *fdb,
1287 switchdev_obj_dump_cb_t *cb,
1288 struct net_device *orig_dev)
1289 {
1290 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1291 struct mlxsw_sp_port *tmp;
1292 struct mlxsw_sp_fid *f;
1293 u16 vport_fid;
1294 char *sfd_pl;
1295 char mac[ETH_ALEN];
1296 u16 fid;
1297 u8 local_port;
1298 u16 lag_id;
1299 u8 num_rec;
1300 int stored_err = 0;
1301 int i;
1302 int err;
1303
1304 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1305 if (!sfd_pl)
1306 return -ENOMEM;
1307
1308 f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
1309 vport_fid = f ? f->fid : 0;
1310
1311 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1312 do {
1313 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
1314 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1315 if (err)
1316 goto out;
1317
1318 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1319
1320 /* Even in case of error, we have to run the dump to the end
1321 * so the session in firmware is finished.
1322 */
1323 if (stored_err)
1324 continue;
1325
1326 for (i = 0; i < num_rec; i++) {
1327 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
1328 case MLXSW_REG_SFD_REC_TYPE_UNICAST:
1329 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
1330 &local_port);
1331 if (local_port == mlxsw_sp_port->local_port) {
1332 if (vport_fid && vport_fid == fid)
1333 fdb->vid = 0;
1334 else if (!vport_fid &&
1335 !mlxsw_sp_fid_is_vfid(fid))
1336 fdb->vid = fid;
1337 else
1338 continue;
1339 ether_addr_copy(fdb->addr, mac);
1340 fdb->ndm_state = NUD_REACHABLE;
1341 err = cb(&fdb->obj);
1342 if (err)
1343 stored_err = err;
1344 }
1345 break;
1346 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1347 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
1348 mac, &fid, &lag_id);
1349 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1350 if (tmp && tmp->local_port ==
1351 mlxsw_sp_port->local_port) {
1352 /* LAG records can only point to LAG
1353 * devices or VLAN devices on top.
1354 */
1355 if (!netif_is_lag_master(orig_dev) &&
1356 !is_vlan_dev(orig_dev))
1357 continue;
1358 if (vport_fid && vport_fid == fid)
1359 fdb->vid = 0;
1360 else if (!vport_fid &&
1361 !mlxsw_sp_fid_is_vfid(fid))
1362 fdb->vid = fid;
1363 else
1364 continue;
1365 ether_addr_copy(fdb->addr, mac);
1366 fdb->ndm_state = NUD_REACHABLE;
1367 err = cb(&fdb->obj);
1368 if (err)
1369 stored_err = err;
1370 }
1371 break;
1372 }
1373 }
1374 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1375
1376 out:
1377 kfree(sfd_pl);
1378 return stored_err ? stored_err : err;
1379 }
1380
1381 static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1382 struct switchdev_obj_port_vlan *vlan,
1383 switchdev_obj_dump_cb_t *cb)
1384 {
1385 u16 vid;
1386 int err = 0;
1387
1388 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1389 vlan->flags = 0;
1390 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1391 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1392 return cb(&vlan->obj);
1393 }
1394
1395 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1396 vlan->flags = 0;
1397 if (vid == mlxsw_sp_port->pvid)
1398 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
1399 if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
1400 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1401 vlan->vid_begin = vid;
1402 vlan->vid_end = vid;
1403 err = cb(&vlan->obj);
1404 if (err)
1405 break;
1406 }
1407 return err;
1408 }
1409
1410 static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1411 struct switchdev_obj *obj,
1412 switchdev_obj_dump_cb_t *cb)
1413 {
1414 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1415 int err = 0;
1416
1417 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1418 if (!mlxsw_sp_port)
1419 return -EINVAL;
1420
1421 switch (obj->id) {
1422 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1423 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
1424 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
1425 break;
1426 case SWITCHDEV_OBJ_ID_PORT_FDB:
1427 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
1428 SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1429 obj->orig_dev);
1430 break;
1431 default:
1432 err = -EOPNOTSUPP;
1433 break;
1434 }
1435
1436 return err;
1437 }
1438
1439 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1440 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
1441 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
1442 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
1443 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
1444 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
1445 };
1446
1447 static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1448 char *mac, u16 vid,
1449 struct net_device *dev)
1450 {
1451 struct switchdev_notifier_fdb_info info;
1452 unsigned long notifier_type;
1453
1454 if (learning_sync) {
1455 info.addr = mac;
1456 info.vid = vid;
1457 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1458 call_switchdev_notifiers(notifier_type, dev, &info.info);
1459 }
1460 }
1461
1462 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1463 char *sfn_pl, int rec_index,
1464 bool adding)
1465 {
1466 struct mlxsw_sp_port *mlxsw_sp_port;
1467 char mac[ETH_ALEN];
1468 u8 local_port;
1469 u16 vid, fid;
1470 bool do_notification = true;
1471 int err;
1472
1473 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
1474 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1475 if (!mlxsw_sp_port) {
1476 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
1477 goto just_remove;
1478 }
1479
1480 if (mlxsw_sp_fid_is_vfid(fid)) {
1481 struct mlxsw_sp_port *mlxsw_sp_vport;
1482
1483 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1484 fid);
1485 if (!mlxsw_sp_vport) {
1486 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1487 goto just_remove;
1488 }
1489 vid = 0;
1490 /* Override the physical port with the vPort. */
1491 mlxsw_sp_port = mlxsw_sp_vport;
1492 } else {
1493 vid = fid;
1494 }
1495
1496 do_fdb_op:
1497 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
1498 adding, true);
1499 if (err) {
1500 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1501 return;
1502 }
1503
1504 if (!do_notification)
1505 return;
1506 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
1507 adding, mac, vid, mlxsw_sp_port->dev);
1508 return;
1509
1510 just_remove:
1511 adding = false;
1512 do_notification = false;
1513 goto do_fdb_op;
1514 }
1515
1516 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1517 char *sfn_pl, int rec_index,
1518 bool adding)
1519 {
1520 struct mlxsw_sp_port *mlxsw_sp_port;
1521 struct net_device *dev;
1522 char mac[ETH_ALEN];
1523 u16 lag_vid = 0;
1524 u16 lag_id;
1525 u16 vid, fid;
1526 bool do_notification = true;
1527 int err;
1528
1529 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
1530 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1531 if (!mlxsw_sp_port) {
1532 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
1533 goto just_remove;
1534 }
1535
1536 if (mlxsw_sp_fid_is_vfid(fid)) {
1537 struct mlxsw_sp_port *mlxsw_sp_vport;
1538
1539 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1540 fid);
1541 if (!mlxsw_sp_vport) {
1542 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
1543 goto just_remove;
1544 }
1545
1546 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1547 dev = mlxsw_sp_vport->dev;
1548 vid = 0;
1549 /* Override the physical port with the vPort. */
1550 mlxsw_sp_port = mlxsw_sp_vport;
1551 } else {
1552 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
1553 vid = fid;
1554 }
1555
1556 do_fdb_op:
1557 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
1558 adding, true);
1559 if (err) {
1560 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
1561 return;
1562 }
1563
1564 if (!do_notification)
1565 return;
1566 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
1567 vid, dev);
1568 return;
1569
1570 just_remove:
1571 adding = false;
1572 do_notification = false;
1573 goto do_fdb_op;
1574 }
1575
1576 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1577 char *sfn_pl, int rec_index)
1578 {
1579 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1580 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1581 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1582 rec_index, true);
1583 break;
1584 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1585 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1586 rec_index, false);
1587 break;
1588 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1589 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1590 rec_index, true);
1591 break;
1592 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1593 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1594 rec_index, false);
1595 break;
1596 }
1597 }
1598
1599 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1600 {
1601 mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
1602 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
1603 }
1604
1605 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1606 {
1607 struct mlxsw_sp *mlxsw_sp;
1608 char *sfn_pl;
1609 u8 num_rec;
1610 int i;
1611 int err;
1612
1613 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1614 if (!sfn_pl)
1615 return;
1616
1617 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1618
1619 rtnl_lock();
1620 mlxsw_reg_sfn_pack(sfn_pl);
1621 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1622 if (err) {
1623 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1624 goto out;
1625 }
1626 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1627 for (i = 0; i < num_rec; i++)
1628 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1629
1630 out:
1631 rtnl_unlock();
1632 kfree(sfn_pl);
1633 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1634 }
1635
1636 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1637 {
1638 int err;
1639
1640 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1641 if (err) {
1642 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1643 return err;
1644 }
1645 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1646 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1647 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1648 return 0;
1649 }
1650
1651 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1652 {
1653 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1654 }
1655
1656 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1657 {
1658 return mlxsw_sp_fdb_init(mlxsw_sp);
1659 }
1660
1661 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1662 {
1663 mlxsw_sp_fdb_fini(mlxsw_sp);
1664 }
1665
1666 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1667 {
1668 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1669 }
1670
1671 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1672 {
1673 }