]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/mellanox/mlxsw/switchx2.c
Merge branch 'for-rc' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / switchx2.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015-2016 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/slab.h>
44 #include <linux/device.h>
45 #include <linux/skbuff.h>
46 #include <linux/if_vlan.h>
47 #include <net/switchdev.h>
48
49 #include "pci.h"
50 #include "core.h"
51 #include "reg.h"
52 #include "port.h"
53 #include "trap.h"
54 #include "txheader.h"
55 #include "ib.h"
56
57 static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
58 static const char mlxsw_sx_driver_version[] = "1.0";
59
60 struct mlxsw_sx_port;
61
62 struct mlxsw_sx {
63 struct mlxsw_sx_port **ports;
64 struct mlxsw_core *core;
65 const struct mlxsw_bus_info *bus_info;
66 u8 hw_id[ETH_ALEN];
67 };
68
69 struct mlxsw_sx_port_pcpu_stats {
70 u64 rx_packets;
71 u64 rx_bytes;
72 u64 tx_packets;
73 u64 tx_bytes;
74 struct u64_stats_sync syncp;
75 u32 tx_dropped;
76 };
77
78 struct mlxsw_sx_port {
79 struct net_device *dev;
80 struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
81 struct mlxsw_sx *mlxsw_sx;
82 u8 local_port;
83 struct {
84 u8 module;
85 } mapping;
86 };
87
88 /* tx_hdr_version
89 * Tx header version.
90 * Must be set to 0.
91 */
92 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
93
94 /* tx_hdr_ctl
95 * Packet control type.
96 * 0 - Ethernet control (e.g. EMADs, LACP)
97 * 1 - Ethernet data
98 */
99 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
100
101 /* tx_hdr_proto
102 * Packet protocol type. Must be set to 1 (Ethernet).
103 */
104 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
105
106 /* tx_hdr_etclass
107 * Egress TClass to be used on the egress device on the egress port.
108 * The MSB is specified in the 'ctclass3' field.
109 * Range is 0-15, where 15 is the highest priority.
110 */
111 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
112
113 /* tx_hdr_swid
114 * Switch partition ID.
115 */
116 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
117
118 /* tx_hdr_port_mid
119 * Destination local port for unicast packets.
120 * Destination multicast ID for multicast packets.
121 *
122 * Control packets are directed to a specific egress port, while data
123 * packets are transmitted through the CPU port (0) into the switch partition,
124 * where forwarding rules are applied.
125 */
126 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
127
128 /* tx_hdr_ctclass3
129 * See field 'etclass'.
130 */
131 MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
132
133 /* tx_hdr_rdq
134 * RDQ for control packets sent to remote CPU.
135 * Must be set to 0x1F for EMADs, otherwise 0.
136 */
137 MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
138
139 /* tx_hdr_cpu_sig
140 * Signature control for packets going to CPU. Must be set to 0.
141 */
142 MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
143
144 /* tx_hdr_sig
145 * Stacking protocl signature. Must be set to 0xE0E0.
146 */
147 MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
148
149 /* tx_hdr_stclass
150 * Stacking TClass.
151 */
152 MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
153
154 /* tx_hdr_emad
155 * EMAD bit. Must be set for EMADs.
156 */
157 MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
158
159 /* tx_hdr_type
160 * 0 - Data packets
161 * 6 - Control packets
162 */
163 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
164
165 static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
166 const struct mlxsw_tx_info *tx_info)
167 {
168 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
169 bool is_emad = tx_info->is_emad;
170
171 memset(txhdr, 0, MLXSW_TXHDR_LEN);
172
173 /* We currently set default values for the egress tclass (QoS). */
174 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
175 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
176 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
177 mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
178 MLXSW_TXHDR_ETCLASS_5);
179 mlxsw_tx_hdr_swid_set(txhdr, 0);
180 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
181 mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
182 mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
183 MLXSW_TXHDR_RDQ_OTHER);
184 mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
185 mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
186 mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
187 mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
188 MLXSW_TXHDR_NOT_EMAD);
189 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
190 }
191
192 static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
193 bool is_up)
194 {
195 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
196 char paos_pl[MLXSW_REG_PAOS_LEN];
197
198 mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
199 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
200 MLXSW_PORT_ADMIN_STATUS_DOWN);
201 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
202 }
203
204 static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
205 bool *p_is_up)
206 {
207 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
208 char paos_pl[MLXSW_REG_PAOS_LEN];
209 u8 oper_status;
210 int err;
211
212 mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
213 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
214 if (err)
215 return err;
216 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
217 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
218 return 0;
219 }
220
221 static int __mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port,
222 u16 mtu)
223 {
224 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
225 char pmtu_pl[MLXSW_REG_PMTU_LEN];
226 int max_mtu;
227 int err;
228
229 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
230 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
231 if (err)
232 return err;
233 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
234
235 if (mtu > max_mtu)
236 return -EINVAL;
237
238 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
239 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
240 }
241
242 static int mlxsw_sx_port_mtu_eth_set(struct mlxsw_sx_port *mlxsw_sx_port,
243 u16 mtu)
244 {
245 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
246 return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
247 }
248
249 static int mlxsw_sx_port_mtu_ib_set(struct mlxsw_sx_port *mlxsw_sx_port,
250 u16 mtu)
251 {
252 return __mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
253 }
254
255 static int mlxsw_sx_port_ib_port_set(struct mlxsw_sx_port *mlxsw_sx_port,
256 u8 ib_port)
257 {
258 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
259 char plib_pl[MLXSW_REG_PLIB_LEN] = {0};
260 int err;
261
262 mlxsw_reg_plib_local_port_set(plib_pl, mlxsw_sx_port->local_port);
263 mlxsw_reg_plib_ib_port_set(plib_pl, ib_port);
264 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(plib), plib_pl);
265 return err;
266 }
267
268 static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
269 {
270 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
271 char pspa_pl[MLXSW_REG_PSPA_LEN];
272
273 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
274 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
275 }
276
277 static int
278 mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
279 {
280 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
281 char sspr_pl[MLXSW_REG_SSPR_LEN];
282
283 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
284 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
285 }
286
287 static int mlxsw_sx_port_module_info_get(struct mlxsw_sx *mlxsw_sx,
288 u8 local_port, u8 *p_module,
289 u8 *p_width)
290 {
291 char pmlp_pl[MLXSW_REG_PMLP_LEN];
292 int err;
293
294 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
295 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
296 if (err)
297 return err;
298 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
299 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
300 return 0;
301 }
302
303 static int mlxsw_sx_port_open(struct net_device *dev)
304 {
305 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
306 int err;
307
308 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
309 if (err)
310 return err;
311 netif_start_queue(dev);
312 return 0;
313 }
314
315 static int mlxsw_sx_port_stop(struct net_device *dev)
316 {
317 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
318
319 netif_stop_queue(dev);
320 return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
321 }
322
323 static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
324 struct net_device *dev)
325 {
326 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
327 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
328 struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
329 const struct mlxsw_tx_info tx_info = {
330 .local_port = mlxsw_sx_port->local_port,
331 .is_emad = false,
332 };
333 u64 len;
334 int err;
335
336 if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
337 return NETDEV_TX_BUSY;
338
339 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
340 struct sk_buff *skb_orig = skb;
341
342 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
343 if (!skb) {
344 this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
345 dev_kfree_skb_any(skb_orig);
346 return NETDEV_TX_OK;
347 }
348 dev_consume_skb_any(skb_orig);
349 }
350 mlxsw_sx_txhdr_construct(skb, &tx_info);
351 /* TX header is consumed by HW on the way so we shouldn't count its
352 * bytes as being sent.
353 */
354 len = skb->len - MLXSW_TXHDR_LEN;
355 /* Due to a race we might fail here because of a full queue. In that
356 * unlikely case we simply drop the packet.
357 */
358 err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
359
360 if (!err) {
361 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
362 u64_stats_update_begin(&pcpu_stats->syncp);
363 pcpu_stats->tx_packets++;
364 pcpu_stats->tx_bytes += len;
365 u64_stats_update_end(&pcpu_stats->syncp);
366 } else {
367 this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
368 dev_kfree_skb_any(skb);
369 }
370 return NETDEV_TX_OK;
371 }
372
373 static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
374 {
375 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
376 int err;
377
378 err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, mtu);
379 if (err)
380 return err;
381 dev->mtu = mtu;
382 return 0;
383 }
384
385 static struct rtnl_link_stats64 *
386 mlxsw_sx_port_get_stats64(struct net_device *dev,
387 struct rtnl_link_stats64 *stats)
388 {
389 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
390 struct mlxsw_sx_port_pcpu_stats *p;
391 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
392 u32 tx_dropped = 0;
393 unsigned int start;
394 int i;
395
396 for_each_possible_cpu(i) {
397 p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
398 do {
399 start = u64_stats_fetch_begin_irq(&p->syncp);
400 rx_packets = p->rx_packets;
401 rx_bytes = p->rx_bytes;
402 tx_packets = p->tx_packets;
403 tx_bytes = p->tx_bytes;
404 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
405
406 stats->rx_packets += rx_packets;
407 stats->rx_bytes += rx_bytes;
408 stats->tx_packets += tx_packets;
409 stats->tx_bytes += tx_bytes;
410 /* tx_dropped is u32, updated without syncp protection. */
411 tx_dropped += p->tx_dropped;
412 }
413 stats->tx_dropped = tx_dropped;
414 return stats;
415 }
416
417 static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
418 size_t len)
419 {
420 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
421 int err;
422
423 err = snprintf(name, len, "p%d", mlxsw_sx_port->mapping.module + 1);
424 if (err >= len)
425 return -EINVAL;
426
427 return 0;
428 }
429
430 static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
431 .ndo_open = mlxsw_sx_port_open,
432 .ndo_stop = mlxsw_sx_port_stop,
433 .ndo_start_xmit = mlxsw_sx_port_xmit,
434 .ndo_change_mtu = mlxsw_sx_port_change_mtu,
435 .ndo_get_stats64 = mlxsw_sx_port_get_stats64,
436 .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name,
437 };
438
439 static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
440 struct ethtool_drvinfo *drvinfo)
441 {
442 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
443 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
444
445 strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
446 strlcpy(drvinfo->version, mlxsw_sx_driver_version,
447 sizeof(drvinfo->version));
448 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
449 "%d.%d.%d",
450 mlxsw_sx->bus_info->fw_rev.major,
451 mlxsw_sx->bus_info->fw_rev.minor,
452 mlxsw_sx->bus_info->fw_rev.subminor);
453 strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
454 sizeof(drvinfo->bus_info));
455 }
456
457 struct mlxsw_sx_port_hw_stats {
458 char str[ETH_GSTRING_LEN];
459 u64 (*getter)(const char *payload);
460 };
461
462 static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
463 {
464 .str = "a_frames_transmitted_ok",
465 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
466 },
467 {
468 .str = "a_frames_received_ok",
469 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
470 },
471 {
472 .str = "a_frame_check_sequence_errors",
473 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
474 },
475 {
476 .str = "a_alignment_errors",
477 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
478 },
479 {
480 .str = "a_octets_transmitted_ok",
481 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
482 },
483 {
484 .str = "a_octets_received_ok",
485 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
486 },
487 {
488 .str = "a_multicast_frames_xmitted_ok",
489 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
490 },
491 {
492 .str = "a_broadcast_frames_xmitted_ok",
493 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
494 },
495 {
496 .str = "a_multicast_frames_received_ok",
497 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
498 },
499 {
500 .str = "a_broadcast_frames_received_ok",
501 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
502 },
503 {
504 .str = "a_in_range_length_errors",
505 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
506 },
507 {
508 .str = "a_out_of_range_length_field",
509 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
510 },
511 {
512 .str = "a_frame_too_long_errors",
513 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
514 },
515 {
516 .str = "a_symbol_error_during_carrier",
517 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
518 },
519 {
520 .str = "a_mac_control_frames_transmitted",
521 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
522 },
523 {
524 .str = "a_mac_control_frames_received",
525 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
526 },
527 {
528 .str = "a_unsupported_opcodes_received",
529 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
530 },
531 {
532 .str = "a_pause_mac_ctrl_frames_received",
533 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
534 },
535 {
536 .str = "a_pause_mac_ctrl_frames_xmitted",
537 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
538 },
539 };
540
541 #define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
542
543 static void mlxsw_sx_port_get_strings(struct net_device *dev,
544 u32 stringset, u8 *data)
545 {
546 u8 *p = data;
547 int i;
548
549 switch (stringset) {
550 case ETH_SS_STATS:
551 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
552 memcpy(p, mlxsw_sx_port_hw_stats[i].str,
553 ETH_GSTRING_LEN);
554 p += ETH_GSTRING_LEN;
555 }
556 break;
557 }
558 }
559
560 static void mlxsw_sx_port_get_stats(struct net_device *dev,
561 struct ethtool_stats *stats, u64 *data)
562 {
563 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
564 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
565 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
566 int i;
567 int err;
568
569 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
570 MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
571 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
572 for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
573 data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
574 }
575
576 static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
577 {
578 switch (sset) {
579 case ETH_SS_STATS:
580 return MLXSW_SX_PORT_HW_STATS_LEN;
581 default:
582 return -EOPNOTSUPP;
583 }
584 }
585
586 struct mlxsw_sx_port_link_mode {
587 u32 mask;
588 u32 supported;
589 u32 advertised;
590 u32 speed;
591 };
592
593 static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
594 {
595 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
596 .supported = SUPPORTED_100baseT_Full,
597 .advertised = ADVERTISED_100baseT_Full,
598 .speed = 100,
599 },
600 {
601 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
602 .speed = 100,
603 },
604 {
605 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
606 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
607 .supported = SUPPORTED_1000baseKX_Full,
608 .advertised = ADVERTISED_1000baseKX_Full,
609 .speed = 1000,
610 },
611 {
612 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
613 .supported = SUPPORTED_10000baseT_Full,
614 .advertised = ADVERTISED_10000baseT_Full,
615 .speed = 10000,
616 },
617 {
618 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
619 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
620 .supported = SUPPORTED_10000baseKX4_Full,
621 .advertised = ADVERTISED_10000baseKX4_Full,
622 .speed = 10000,
623 },
624 {
625 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
626 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
627 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
628 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
629 .supported = SUPPORTED_10000baseKR_Full,
630 .advertised = ADVERTISED_10000baseKR_Full,
631 .speed = 10000,
632 },
633 {
634 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
635 .supported = SUPPORTED_20000baseKR2_Full,
636 .advertised = ADVERTISED_20000baseKR2_Full,
637 .speed = 20000,
638 },
639 {
640 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
641 .supported = SUPPORTED_40000baseCR4_Full,
642 .advertised = ADVERTISED_40000baseCR4_Full,
643 .speed = 40000,
644 },
645 {
646 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
647 .supported = SUPPORTED_40000baseKR4_Full,
648 .advertised = ADVERTISED_40000baseKR4_Full,
649 .speed = 40000,
650 },
651 {
652 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
653 .supported = SUPPORTED_40000baseSR4_Full,
654 .advertised = ADVERTISED_40000baseSR4_Full,
655 .speed = 40000,
656 },
657 {
658 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
659 .supported = SUPPORTED_40000baseLR4_Full,
660 .advertised = ADVERTISED_40000baseLR4_Full,
661 .speed = 40000,
662 },
663 {
664 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
665 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
666 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
667 .speed = 25000,
668 },
669 {
670 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
671 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
672 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
673 .speed = 50000,
674 },
675 {
676 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
677 .supported = SUPPORTED_56000baseKR4_Full,
678 .advertised = ADVERTISED_56000baseKR4_Full,
679 .speed = 56000,
680 },
681 {
682 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
683 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
684 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
685 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
686 .speed = 100000,
687 },
688 };
689
690 #define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
691 #define MLXSW_SX_PORT_BASE_SPEED 10000 /* Mb/s */
692
693 static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
694 {
695 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
696 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
697 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
698 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
699 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
700 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
701 return SUPPORTED_FIBRE;
702
703 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
704 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
705 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
706 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
707 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
708 return SUPPORTED_Backplane;
709 return 0;
710 }
711
712 static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
713 {
714 u32 modes = 0;
715 int i;
716
717 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
718 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
719 modes |= mlxsw_sx_port_link_mode[i].supported;
720 }
721 return modes;
722 }
723
724 static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
725 {
726 u32 modes = 0;
727 int i;
728
729 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
730 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
731 modes |= mlxsw_sx_port_link_mode[i].advertised;
732 }
733 return modes;
734 }
735
736 static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
737 struct ethtool_cmd *cmd)
738 {
739 u32 speed = SPEED_UNKNOWN;
740 u8 duplex = DUPLEX_UNKNOWN;
741 int i;
742
743 if (!carrier_ok)
744 goto out;
745
746 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
747 if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
748 speed = mlxsw_sx_port_link_mode[i].speed;
749 duplex = DUPLEX_FULL;
750 break;
751 }
752 }
753 out:
754 ethtool_cmd_speed_set(cmd, speed);
755 cmd->duplex = duplex;
756 }
757
758 static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
759 {
760 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
761 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
762 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
763 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
764 return PORT_FIBRE;
765
766 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
767 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
768 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
769 return PORT_DA;
770
771 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
772 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
773 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
774 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
775 return PORT_NONE;
776
777 return PORT_OTHER;
778 }
779
780 static int mlxsw_sx_port_get_settings(struct net_device *dev,
781 struct ethtool_cmd *cmd)
782 {
783 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
784 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
785 char ptys_pl[MLXSW_REG_PTYS_LEN];
786 u32 eth_proto_cap;
787 u32 eth_proto_admin;
788 u32 eth_proto_oper;
789 int err;
790
791 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
792 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
793 if (err) {
794 netdev_err(dev, "Failed to get proto");
795 return err;
796 }
797 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap,
798 &eth_proto_admin, &eth_proto_oper);
799
800 cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
801 mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
802 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
803 cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
804 mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
805 eth_proto_oper, cmd);
806
807 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
808 cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
809 cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
810
811 cmd->transceiver = XCVR_INTERNAL;
812 return 0;
813 }
814
815 static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
816 {
817 u32 ptys_proto = 0;
818 int i;
819
820 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
821 if (advertising & mlxsw_sx_port_link_mode[i].advertised)
822 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
823 }
824 return ptys_proto;
825 }
826
827 static u32 mlxsw_sx_to_ptys_speed(u32 speed)
828 {
829 u32 ptys_proto = 0;
830 int i;
831
832 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
833 if (speed == mlxsw_sx_port_link_mode[i].speed)
834 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
835 }
836 return ptys_proto;
837 }
838
839 static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)
840 {
841 u32 ptys_proto = 0;
842 int i;
843
844 for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
845 if (mlxsw_sx_port_link_mode[i].speed <= upper_speed)
846 ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
847 }
848 return ptys_proto;
849 }
850
851 static int mlxsw_sx_port_set_settings(struct net_device *dev,
852 struct ethtool_cmd *cmd)
853 {
854 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
855 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
856 char ptys_pl[MLXSW_REG_PTYS_LEN];
857 u32 speed;
858 u32 eth_proto_new;
859 u32 eth_proto_cap;
860 u32 eth_proto_admin;
861 bool is_up;
862 int err;
863
864 speed = ethtool_cmd_speed(cmd);
865
866 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
867 mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
868 mlxsw_sx_to_ptys_speed(speed);
869
870 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
871 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
872 if (err) {
873 netdev_err(dev, "Failed to get proto");
874 return err;
875 }
876 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin,
877 NULL);
878
879 eth_proto_new = eth_proto_new & eth_proto_cap;
880 if (!eth_proto_new) {
881 netdev_err(dev, "Not supported proto admin requested");
882 return -EINVAL;
883 }
884 if (eth_proto_new == eth_proto_admin)
885 return 0;
886
887 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
888 eth_proto_new);
889 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
890 if (err) {
891 netdev_err(dev, "Failed to set proto admin");
892 return err;
893 }
894
895 err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
896 if (err) {
897 netdev_err(dev, "Failed to get oper status");
898 return err;
899 }
900 if (!is_up)
901 return 0;
902
903 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
904 if (err) {
905 netdev_err(dev, "Failed to set admin status");
906 return err;
907 }
908
909 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
910 if (err) {
911 netdev_err(dev, "Failed to set admin status");
912 return err;
913 }
914
915 return 0;
916 }
917
918 static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
919 .get_drvinfo = mlxsw_sx_port_get_drvinfo,
920 .get_link = ethtool_op_get_link,
921 .get_strings = mlxsw_sx_port_get_strings,
922 .get_ethtool_stats = mlxsw_sx_port_get_stats,
923 .get_sset_count = mlxsw_sx_port_get_sset_count,
924 .get_settings = mlxsw_sx_port_get_settings,
925 .set_settings = mlxsw_sx_port_set_settings,
926 };
927
928 static int mlxsw_sx_port_attr_get(struct net_device *dev,
929 struct switchdev_attr *attr)
930 {
931 struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
932 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
933
934 switch (attr->id) {
935 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
936 attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
937 memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
938 break;
939 default:
940 return -EOPNOTSUPP;
941 }
942
943 return 0;
944 }
945
946 static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
947 .switchdev_port_attr_get = mlxsw_sx_port_attr_get,
948 };
949
950 static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
951 {
952 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
953 int err;
954
955 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
956 if (err)
957 return err;
958 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
959 return 0;
960 }
961
962 static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
963 {
964 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
965 struct net_device *dev = mlxsw_sx_port->dev;
966 char ppad_pl[MLXSW_REG_PPAD_LEN];
967 int err;
968
969 mlxsw_reg_ppad_pack(ppad_pl, false, 0);
970 err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
971 if (err)
972 return err;
973 mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
974 /* The last byte value in base mac address is guaranteed
975 * to be such it does not overflow when adding local_port
976 * value.
977 */
978 dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
979 return 0;
980 }
981
982 static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
983 u16 vid, enum mlxsw_reg_spms_state state)
984 {
985 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
986 char *spms_pl;
987 int err;
988
989 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
990 if (!spms_pl)
991 return -ENOMEM;
992 mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
993 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
994 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
995 kfree(spms_pl);
996 return err;
997 }
998
999 static int mlxsw_sx_port_ib_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
1000 u16 speed, u16 width)
1001 {
1002 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
1003 char ptys_pl[MLXSW_REG_PTYS_LEN];
1004
1005 mlxsw_reg_ptys_ib_pack(ptys_pl, mlxsw_sx_port->local_port, speed,
1006 width);
1007 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
1008 }
1009
1010 static int
1011 mlxsw_sx_port_speed_by_width_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 width)
1012 {
1013 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
1014 u32 upper_speed = MLXSW_SX_PORT_BASE_SPEED * width;
1015 char ptys_pl[MLXSW_REG_PTYS_LEN];
1016 u32 eth_proto_admin;
1017
1018 eth_proto_admin = mlxsw_sx_to_ptys_upper_speed(upper_speed);
1019 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port,
1020 eth_proto_admin);
1021 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
1022 }
1023
1024 static int
1025 mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
1026 enum mlxsw_reg_spmlr_learn_mode mode)
1027 {
1028 struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
1029 char spmlr_pl[MLXSW_REG_SPMLR_LEN];
1030
1031 mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
1032 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
1033 }
1034
1035 static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1036 u8 module, u8 width)
1037 {
1038 struct mlxsw_sx_port *mlxsw_sx_port;
1039 struct net_device *dev;
1040 int err;
1041
1042 dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
1043 if (!dev)
1044 return -ENOMEM;
1045 SET_NETDEV_DEV(dev, mlxsw_sx->bus_info->dev);
1046 mlxsw_sx_port = netdev_priv(dev);
1047 mlxsw_sx_port->dev = dev;
1048 mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
1049 mlxsw_sx_port->local_port = local_port;
1050 mlxsw_sx_port->mapping.module = module;
1051
1052 mlxsw_sx_port->pcpu_stats =
1053 netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
1054 if (!mlxsw_sx_port->pcpu_stats) {
1055 err = -ENOMEM;
1056 goto err_alloc_stats;
1057 }
1058
1059 dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
1060 dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
1061 dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
1062
1063 err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
1064 if (err) {
1065 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
1066 mlxsw_sx_port->local_port);
1067 goto err_dev_addr_get;
1068 }
1069
1070 netif_carrier_off(dev);
1071
1072 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1073 NETIF_F_VLAN_CHALLENGED;
1074
1075 dev->min_mtu = 0;
1076 dev->max_mtu = ETH_MAX_MTU;
1077
1078 /* Each packet needs to have a Tx header (metadata) on top all other
1079 * headers.
1080 */
1081 dev->needed_headroom = MLXSW_TXHDR_LEN;
1082
1083 err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1084 if (err) {
1085 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1086 mlxsw_sx_port->local_port);
1087 goto err_port_system_port_mapping_set;
1088 }
1089
1090 err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
1091 if (err) {
1092 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1093 mlxsw_sx_port->local_port);
1094 goto err_port_swid_set;
1095 }
1096
1097 err = mlxsw_sx_port_speed_by_width_set(mlxsw_sx_port, width);
1098 if (err) {
1099 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1100 mlxsw_sx_port->local_port);
1101 goto err_port_speed_set;
1102 }
1103
1104 err = mlxsw_sx_port_mtu_eth_set(mlxsw_sx_port, ETH_DATA_LEN);
1105 if (err) {
1106 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1107 mlxsw_sx_port->local_port);
1108 goto err_port_mtu_set;
1109 }
1110
1111 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1112 if (err)
1113 goto err_port_admin_status_set;
1114
1115 err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
1116 MLXSW_PORT_DEFAULT_VID,
1117 MLXSW_REG_SPMS_STATE_FORWARDING);
1118 if (err) {
1119 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
1120 mlxsw_sx_port->local_port);
1121 goto err_port_stp_state_set;
1122 }
1123
1124 err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
1125 MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
1126 if (err) {
1127 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
1128 mlxsw_sx_port->local_port);
1129 goto err_port_mac_learning_mode_set;
1130 }
1131
1132 err = register_netdev(dev);
1133 if (err) {
1134 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
1135 mlxsw_sx_port->local_port);
1136 goto err_register_netdev;
1137 }
1138
1139 mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
1140 mlxsw_sx_port, dev, false, 0);
1141 mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1142 return 0;
1143
1144 err_register_netdev:
1145 err_port_mac_learning_mode_set:
1146 err_port_stp_state_set:
1147 err_port_admin_status_set:
1148 err_port_mtu_set:
1149 err_port_speed_set:
1150 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1151 err_port_swid_set:
1152 err_port_system_port_mapping_set:
1153 err_dev_addr_get:
1154 free_percpu(mlxsw_sx_port->pcpu_stats);
1155 err_alloc_stats:
1156 free_netdev(dev);
1157 return err;
1158 }
1159
1160 static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1161 u8 module, u8 width)
1162 {
1163 int err;
1164
1165 err = mlxsw_core_port_init(mlxsw_sx->core, local_port);
1166 if (err) {
1167 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
1168 local_port);
1169 return err;
1170 }
1171 err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module, width);
1172 if (err)
1173 goto err_port_create;
1174
1175 return 0;
1176
1177 err_port_create:
1178 mlxsw_core_port_fini(mlxsw_sx->core, local_port);
1179 return err;
1180 }
1181
1182 static void __mlxsw_sx_port_eth_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1183 {
1184 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1185
1186 mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
1187 unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
1188 mlxsw_sx->ports[local_port] = NULL;
1189 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1190 free_percpu(mlxsw_sx_port->pcpu_stats);
1191 free_netdev(mlxsw_sx_port->dev);
1192 }
1193
1194 static bool mlxsw_sx_port_created(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1195 {
1196 return mlxsw_sx->ports[local_port] != NULL;
1197 }
1198
1199 static int __mlxsw_sx_port_ib_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
1200 u8 module, u8 width)
1201 {
1202 struct mlxsw_sx_port *mlxsw_sx_port;
1203 int err;
1204
1205 mlxsw_sx_port = kzalloc(sizeof(*mlxsw_sx_port), GFP_KERNEL);
1206 if (!mlxsw_sx_port)
1207 return -ENOMEM;
1208 mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
1209 mlxsw_sx_port->local_port = local_port;
1210 mlxsw_sx_port->mapping.module = module;
1211
1212 err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
1213 if (err) {
1214 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1215 mlxsw_sx_port->local_port);
1216 goto err_port_system_port_mapping_set;
1217 }
1218
1219 /* Adding port to Infiniband swid (1) */
1220 err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 1);
1221 if (err) {
1222 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
1223 mlxsw_sx_port->local_port);
1224 goto err_port_swid_set;
1225 }
1226
1227 /* Expose the IB port number as it's front panel name */
1228 err = mlxsw_sx_port_ib_port_set(mlxsw_sx_port, module + 1);
1229 if (err) {
1230 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set IB port\n",
1231 mlxsw_sx_port->local_port);
1232 goto err_port_ib_set;
1233 }
1234
1235 /* Supports all speeds from SDR to FDR (bitmask) and support bus width
1236 * of 1x, 2x and 4x (3 bits bitmask)
1237 */
1238 err = mlxsw_sx_port_ib_speed_set(mlxsw_sx_port,
1239 MLXSW_REG_PTYS_IB_SPEED_EDR - 1,
1240 BIT(3) - 1);
1241 if (err) {
1242 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
1243 mlxsw_sx_port->local_port);
1244 goto err_port_speed_set;
1245 }
1246
1247 /* Change to the maximum MTU the device supports, the SMA will take
1248 * care of the active MTU
1249 */
1250 err = mlxsw_sx_port_mtu_ib_set(mlxsw_sx_port, MLXSW_IB_DEFAULT_MTU);
1251 if (err) {
1252 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
1253 mlxsw_sx_port->local_port);
1254 goto err_port_mtu_set;
1255 }
1256
1257 err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
1258 if (err) {
1259 dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to change admin state to UP\n",
1260 mlxsw_sx_port->local_port);
1261 goto err_port_admin_set;
1262 }
1263
1264 mlxsw_core_port_ib_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
1265 mlxsw_sx_port);
1266 mlxsw_sx->ports[local_port] = mlxsw_sx_port;
1267 return 0;
1268
1269 err_port_admin_set:
1270 err_port_mtu_set:
1271 err_port_speed_set:
1272 err_port_ib_set:
1273 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1274 err_port_swid_set:
1275 err_port_system_port_mapping_set:
1276 kfree(mlxsw_sx_port);
1277 return err;
1278 }
1279
1280 static void __mlxsw_sx_port_ib_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1281 {
1282 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1283
1284 mlxsw_core_port_clear(mlxsw_sx->core, local_port, mlxsw_sx);
1285 mlxsw_sx->ports[local_port] = NULL;
1286 mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
1287 mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
1288 kfree(mlxsw_sx_port);
1289 }
1290
1291 static void __mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1292 {
1293 enum devlink_port_type port_type =
1294 mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
1295
1296 if (port_type == DEVLINK_PORT_TYPE_ETH)
1297 __mlxsw_sx_port_eth_remove(mlxsw_sx, local_port);
1298 else if (port_type == DEVLINK_PORT_TYPE_IB)
1299 __mlxsw_sx_port_ib_remove(mlxsw_sx, local_port);
1300 }
1301
1302 static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
1303 {
1304 __mlxsw_sx_port_remove(mlxsw_sx, local_port);
1305 mlxsw_core_port_fini(mlxsw_sx->core, local_port);
1306 }
1307
1308 static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
1309 {
1310 int i;
1311
1312 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1313 if (mlxsw_sx_port_created(mlxsw_sx, i))
1314 mlxsw_sx_port_remove(mlxsw_sx, i);
1315 kfree(mlxsw_sx->ports);
1316 }
1317
1318 static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
1319 {
1320 size_t alloc_size;
1321 u8 module, width;
1322 int i;
1323 int err;
1324
1325 alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
1326 mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
1327 if (!mlxsw_sx->ports)
1328 return -ENOMEM;
1329
1330 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1331 err = mlxsw_sx_port_module_info_get(mlxsw_sx, i, &module,
1332 &width);
1333 if (err)
1334 goto err_port_module_info_get;
1335 if (!width)
1336 continue;
1337 err = mlxsw_sx_port_eth_create(mlxsw_sx, i, module, width);
1338 if (err)
1339 goto err_port_create;
1340 }
1341 return 0;
1342
1343 err_port_create:
1344 err_port_module_info_get:
1345 for (i--; i >= 1; i--)
1346 if (mlxsw_sx_port_created(mlxsw_sx, i))
1347 mlxsw_sx_port_remove(mlxsw_sx, i);
1348 kfree(mlxsw_sx->ports);
1349 return err;
1350 }
1351
1352 static void mlxsw_sx_pude_eth_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
1353 enum mlxsw_reg_pude_oper_status status)
1354 {
1355 if (status == MLXSW_PORT_OPER_STATUS_UP) {
1356 netdev_info(mlxsw_sx_port->dev, "link up\n");
1357 netif_carrier_on(mlxsw_sx_port->dev);
1358 } else {
1359 netdev_info(mlxsw_sx_port->dev, "link down\n");
1360 netif_carrier_off(mlxsw_sx_port->dev);
1361 }
1362 }
1363
1364 static void mlxsw_sx_pude_ib_event_func(struct mlxsw_sx_port *mlxsw_sx_port,
1365 enum mlxsw_reg_pude_oper_status status)
1366 {
1367 if (status == MLXSW_PORT_OPER_STATUS_UP)
1368 pr_info("ib link for port %d - up\n",
1369 mlxsw_sx_port->mapping.module + 1);
1370 else
1371 pr_info("ib link for port %d - down\n",
1372 mlxsw_sx_port->mapping.module + 1);
1373 }
1374
1375 static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
1376 char *pude_pl, void *priv)
1377 {
1378 struct mlxsw_sx *mlxsw_sx = priv;
1379 struct mlxsw_sx_port *mlxsw_sx_port;
1380 enum mlxsw_reg_pude_oper_status status;
1381 enum devlink_port_type port_type;
1382 u8 local_port;
1383
1384 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1385 mlxsw_sx_port = mlxsw_sx->ports[local_port];
1386 if (!mlxsw_sx_port) {
1387 dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1388 local_port);
1389 return;
1390 }
1391
1392 status = mlxsw_reg_pude_oper_status_get(pude_pl);
1393 port_type = mlxsw_core_port_type_get(mlxsw_sx->core, local_port);
1394 if (port_type == DEVLINK_PORT_TYPE_ETH)
1395 mlxsw_sx_pude_eth_event_func(mlxsw_sx_port, status);
1396 else if (port_type == DEVLINK_PORT_TYPE_IB)
1397 mlxsw_sx_pude_ib_event_func(mlxsw_sx_port, status);
1398 }
1399
1400 static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
1401 void *priv)
1402 {
1403 struct mlxsw_sx *mlxsw_sx = priv;
1404 struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
1405 struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
1406
1407 if (unlikely(!mlxsw_sx_port)) {
1408 dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
1409 local_port);
1410 return;
1411 }
1412
1413 skb->dev = mlxsw_sx_port->dev;
1414
1415 pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
1416 u64_stats_update_begin(&pcpu_stats->syncp);
1417 pcpu_stats->rx_packets++;
1418 pcpu_stats->rx_bytes += skb->len;
1419 u64_stats_update_end(&pcpu_stats->syncp);
1420
1421 skb->protocol = eth_type_trans(skb, skb->dev);
1422 netif_receive_skb(skb);
1423 }
1424
1425 static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1426 enum devlink_port_type new_type)
1427 {
1428 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1429 u8 module, width;
1430 int err;
1431
1432 if (new_type == DEVLINK_PORT_TYPE_AUTO)
1433 return -EOPNOTSUPP;
1434
1435 __mlxsw_sx_port_remove(mlxsw_sx, local_port);
1436 err = mlxsw_sx_port_module_info_get(mlxsw_sx, local_port, &module,
1437 &width);
1438 if (err)
1439 goto err_port_module_info_get;
1440
1441 if (new_type == DEVLINK_PORT_TYPE_ETH)
1442 err = __mlxsw_sx_port_eth_create(mlxsw_sx, local_port, module,
1443 width);
1444 else if (new_type == DEVLINK_PORT_TYPE_IB)
1445 err = __mlxsw_sx_port_ib_create(mlxsw_sx, local_port, module,
1446 width);
1447
1448 err_port_module_info_get:
1449 return err;
1450 }
1451
1452 #define MLXSW_SX_RXL(_trap_id) \
1453 MLXSW_RXL(mlxsw_sx_rx_listener_func, _trap_id, TRAP_TO_CPU, \
1454 false, SX2_RX, FORWARD)
1455
1456 static const struct mlxsw_listener mlxsw_sx_listener[] = {
1457 MLXSW_EVENTL(mlxsw_sx_pude_event_func, PUDE, EMAD),
1458 MLXSW_SX_RXL(FDB_MC),
1459 MLXSW_SX_RXL(STP),
1460 MLXSW_SX_RXL(LACP),
1461 MLXSW_SX_RXL(EAPOL),
1462 MLXSW_SX_RXL(LLDP),
1463 MLXSW_SX_RXL(MMRP),
1464 MLXSW_SX_RXL(MVRP),
1465 MLXSW_SX_RXL(RPVST),
1466 MLXSW_SX_RXL(DHCP),
1467 MLXSW_SX_RXL(IGMP_QUERY),
1468 MLXSW_SX_RXL(IGMP_V1_REPORT),
1469 MLXSW_SX_RXL(IGMP_V2_REPORT),
1470 MLXSW_SX_RXL(IGMP_V2_LEAVE),
1471 MLXSW_SX_RXL(IGMP_V3_REPORT),
1472 };
1473
1474 static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
1475 {
1476 char htgt_pl[MLXSW_REG_HTGT_LEN];
1477 int i;
1478 int err;
1479
1480 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_RX,
1481 MLXSW_REG_HTGT_INVALID_POLICER,
1482 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1483 MLXSW_REG_HTGT_DEFAULT_TC);
1484 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1485 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_RX);
1486
1487 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1488 if (err)
1489 return err;
1490
1491 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_SX2_CTRL,
1492 MLXSW_REG_HTGT_INVALID_POLICER,
1493 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1494 MLXSW_REG_HTGT_DEFAULT_TC);
1495 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1496 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_CTRL);
1497
1498 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
1499 if (err)
1500 return err;
1501
1502 for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
1503 err = mlxsw_core_trap_register(mlxsw_sx->core,
1504 &mlxsw_sx_listener[i],
1505 mlxsw_sx);
1506 if (err)
1507 goto err_listener_register;
1508
1509 }
1510 return 0;
1511
1512 err_listener_register:
1513 for (i--; i >= 0; i--) {
1514 mlxsw_core_trap_unregister(mlxsw_sx->core,
1515 &mlxsw_sx_listener[i],
1516 mlxsw_sx);
1517 }
1518 return err;
1519 }
1520
1521 static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
1522 {
1523 int i;
1524
1525 for (i = 0; i < ARRAY_SIZE(mlxsw_sx_listener); i++) {
1526 mlxsw_core_trap_unregister(mlxsw_sx->core,
1527 &mlxsw_sx_listener[i],
1528 mlxsw_sx);
1529 }
1530 }
1531
1532 static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
1533 {
1534 char sfgc_pl[MLXSW_REG_SFGC_LEN];
1535 char sgcr_pl[MLXSW_REG_SGCR_LEN];
1536 char *sftr_pl;
1537 int err;
1538
1539 /* Configure a flooding table, which includes only CPU port. */
1540 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
1541 if (!sftr_pl)
1542 return -ENOMEM;
1543 mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
1544 MLXSW_PORT_CPU_PORT, true);
1545 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
1546 kfree(sftr_pl);
1547 if (err)
1548 return err;
1549
1550 /* Flood different packet types using the flooding table. */
1551 mlxsw_reg_sfgc_pack(sfgc_pl,
1552 MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
1553 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1554 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1555 0);
1556 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1557 if (err)
1558 return err;
1559
1560 mlxsw_reg_sfgc_pack(sfgc_pl,
1561 MLXSW_REG_SFGC_TYPE_BROADCAST,
1562 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1563 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1564 0);
1565 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1566 if (err)
1567 return err;
1568
1569 mlxsw_reg_sfgc_pack(sfgc_pl,
1570 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
1571 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1572 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1573 0);
1574 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1575 if (err)
1576 return err;
1577
1578 mlxsw_reg_sfgc_pack(sfgc_pl,
1579 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
1580 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1581 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1582 0);
1583 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1584 if (err)
1585 return err;
1586
1587 mlxsw_reg_sfgc_pack(sfgc_pl,
1588 MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
1589 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
1590 MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
1591 0);
1592 err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
1593 if (err)
1594 return err;
1595
1596 mlxsw_reg_sgcr_pack(sgcr_pl, true);
1597 return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
1598 }
1599
1600 static int mlxsw_sx_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
1601 {
1602 char htgt_pl[MLXSW_REG_HTGT_LEN];
1603
1604 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
1605 MLXSW_REG_HTGT_INVALID_POLICER,
1606 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
1607 MLXSW_REG_HTGT_DEFAULT_TC);
1608 mlxsw_reg_htgt_swid_set(htgt_pl, MLXSW_PORT_SWID_ALL_SWIDS);
1609 mlxsw_reg_htgt_local_path_rdq_set(htgt_pl,
1610 MLXSW_REG_HTGT_LOCAL_PATH_RDQ_SX2_EMAD);
1611 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
1612 }
1613
1614 static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
1615 const struct mlxsw_bus_info *mlxsw_bus_info)
1616 {
1617 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1618 int err;
1619
1620 mlxsw_sx->core = mlxsw_core;
1621 mlxsw_sx->bus_info = mlxsw_bus_info;
1622
1623 err = mlxsw_sx_hw_id_get(mlxsw_sx);
1624 if (err) {
1625 dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
1626 return err;
1627 }
1628
1629 err = mlxsw_sx_ports_create(mlxsw_sx);
1630 if (err) {
1631 dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
1632 return err;
1633 }
1634
1635 err = mlxsw_sx_traps_init(mlxsw_sx);
1636 if (err) {
1637 dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps\n");
1638 goto err_listener_register;
1639 }
1640
1641 err = mlxsw_sx_flood_init(mlxsw_sx);
1642 if (err) {
1643 dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
1644 goto err_flood_init;
1645 }
1646
1647 return 0;
1648
1649 err_flood_init:
1650 mlxsw_sx_traps_fini(mlxsw_sx);
1651 err_listener_register:
1652 mlxsw_sx_ports_remove(mlxsw_sx);
1653 return err;
1654 }
1655
1656 static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
1657 {
1658 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
1659
1660 mlxsw_sx_traps_fini(mlxsw_sx);
1661 mlxsw_sx_ports_remove(mlxsw_sx);
1662 }
1663
1664 static struct mlxsw_config_profile mlxsw_sx_config_profile = {
1665 .used_max_vepa_channels = 1,
1666 .max_vepa_channels = 0,
1667 .used_max_mid = 1,
1668 .max_mid = 7000,
1669 .used_max_pgt = 1,
1670 .max_pgt = 0,
1671 .used_max_system_port = 1,
1672 .max_system_port = 48000,
1673 .used_max_vlan_groups = 1,
1674 .max_vlan_groups = 127,
1675 .used_max_regions = 1,
1676 .max_regions = 400,
1677 .used_flood_tables = 1,
1678 .max_flood_tables = 2,
1679 .max_vid_flood_tables = 1,
1680 .used_flood_mode = 1,
1681 .flood_mode = 3,
1682 .used_max_ib_mc = 1,
1683 .max_ib_mc = 6,
1684 .used_max_pkey = 1,
1685 .max_pkey = 0,
1686 .swid_config = {
1687 {
1688 .used_type = 1,
1689 .type = MLXSW_PORT_SWID_TYPE_ETH,
1690 },
1691 {
1692 .used_type = 1,
1693 .type = MLXSW_PORT_SWID_TYPE_IB,
1694 }
1695 },
1696 .resource_query_enable = 0,
1697 };
1698
1699 static struct mlxsw_driver mlxsw_sx_driver = {
1700 .kind = mlxsw_sx_driver_name,
1701 .priv_size = sizeof(struct mlxsw_sx),
1702 .init = mlxsw_sx_init,
1703 .fini = mlxsw_sx_fini,
1704 .basic_trap_groups_set = mlxsw_sx_basic_trap_groups_set,
1705 .txhdr_construct = mlxsw_sx_txhdr_construct,
1706 .txhdr_len = MLXSW_TXHDR_LEN,
1707 .profile = &mlxsw_sx_config_profile,
1708 .port_type_set = mlxsw_sx_port_type_set,
1709 };
1710
1711 static const struct pci_device_id mlxsw_sx_pci_id_table[] = {
1712 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
1713 {0, },
1714 };
1715
1716 static struct pci_driver mlxsw_sx_pci_driver = {
1717 .name = mlxsw_sx_driver_name,
1718 .id_table = mlxsw_sx_pci_id_table,
1719 };
1720
1721 static int __init mlxsw_sx_module_init(void)
1722 {
1723 int err;
1724
1725 err = mlxsw_core_driver_register(&mlxsw_sx_driver);
1726 if (err)
1727 return err;
1728
1729 err = mlxsw_pci_driver_register(&mlxsw_sx_pci_driver);
1730 if (err)
1731 goto err_pci_driver_register;
1732
1733 return 0;
1734
1735 err_pci_driver_register:
1736 mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1737 return err;
1738 }
1739
1740 static void __exit mlxsw_sx_module_exit(void)
1741 {
1742 mlxsw_pci_driver_unregister(&mlxsw_sx_pci_driver);
1743 mlxsw_core_driver_unregister(&mlxsw_sx_driver);
1744 }
1745
1746 module_init(mlxsw_sx_module_init);
1747 module_exit(mlxsw_sx_module_exit);
1748
1749 MODULE_LICENSE("Dual BSD/GPL");
1750 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1751 MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
1752 MODULE_DEVICE_TABLE(pci, mlxsw_sx_pci_id_table);