]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/dsa/xrs700x/xrs700x.c
130abb0f1438e8f827a7877ae0a056d23a7430af
[mirror_ubuntu-jammy-kernel.git] / drivers / net / dsa / xrs700x / xrs700x.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020 NovaTech LLC
4 * George McCollister <george.mccollister@gmail.com>
5 */
6
7 #include <net/dsa.h>
8 #include <linux/if_bridge.h>
9 #include <linux/of_device.h>
10 #include <linux/netdev_features.h>
11 #include <linux/if_hsr.h>
12 #include "xrs700x.h"
13 #include "xrs700x_reg.h"
14
15 #define XRS700X_MIB_INTERVAL msecs_to_jiffies(3000)
16
17 #define XRS7000X_SUPPORTED_HSR_FEATURES \
18 (NETIF_F_HW_HSR_TAG_INS | NETIF_F_HW_HSR_TAG_RM | \
19 NETIF_F_HW_HSR_FWD | NETIF_F_HW_HSR_DUP)
20
21 #define XRS7003E_ID 0x100
22 #define XRS7003F_ID 0x101
23 #define XRS7004E_ID 0x200
24 #define XRS7004F_ID 0x201
25
26 const struct xrs700x_info xrs7003e_info = {XRS7003E_ID, "XRS7003E", 3};
27 EXPORT_SYMBOL(xrs7003e_info);
28
29 const struct xrs700x_info xrs7003f_info = {XRS7003F_ID, "XRS7003F", 3};
30 EXPORT_SYMBOL(xrs7003f_info);
31
32 const struct xrs700x_info xrs7004e_info = {XRS7004E_ID, "XRS7004E", 4};
33 EXPORT_SYMBOL(xrs7004e_info);
34
35 const struct xrs700x_info xrs7004f_info = {XRS7004F_ID, "XRS7004F", 4};
36 EXPORT_SYMBOL(xrs7004f_info);
37
38 struct xrs700x_regfield {
39 struct reg_field rf;
40 struct regmap_field **rmf;
41 };
42
43 struct xrs700x_mib {
44 unsigned int offset;
45 const char *name;
46 int stats64_offset;
47 };
48
49 #define XRS700X_MIB_ETHTOOL_ONLY(o, n) {o, n, -1}
50 #define XRS700X_MIB(o, n, m) {o, n, offsetof(struct rtnl_link_stats64, m)}
51
52 static const struct xrs700x_mib xrs700x_mibs[] = {
53 XRS700X_MIB(XRS_RX_GOOD_OCTETS_L, "rx_good_octets", rx_bytes),
54 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_BAD_OCTETS_L, "rx_bad_octets"),
55 XRS700X_MIB(XRS_RX_UNICAST_L, "rx_unicast", rx_packets),
56 XRS700X_MIB(XRS_RX_BROADCAST_L, "rx_broadcast", rx_packets),
57 XRS700X_MIB(XRS_RX_MULTICAST_L, "rx_multicast", multicast),
58 XRS700X_MIB(XRS_RX_UNDERSIZE_L, "rx_undersize", rx_length_errors),
59 XRS700X_MIB(XRS_RX_FRAGMENTS_L, "rx_fragments", rx_length_errors),
60 XRS700X_MIB(XRS_RX_OVERSIZE_L, "rx_oversize", rx_length_errors),
61 XRS700X_MIB(XRS_RX_JABBER_L, "rx_jabber", rx_length_errors),
62 XRS700X_MIB(XRS_RX_ERR_L, "rx_err", rx_errors),
63 XRS700X_MIB(XRS_RX_CRC_L, "rx_crc", rx_crc_errors),
64 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_64_L, "rx_64"),
65 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_65_127_L, "rx_65_127"),
66 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_128_255_L, "rx_128_255"),
67 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_256_511_L, "rx_256_511"),
68 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_512_1023_L, "rx_512_1023"),
69 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_1024_1536_L, "rx_1024_1536"),
70 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_HSR_PRP_L, "rx_hsr_prp"),
71 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_WRONGLAN_L, "rx_wronglan"),
72 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_DUPLICATE_L, "rx_duplicate"),
73 XRS700X_MIB(XRS_TX_OCTETS_L, "tx_octets", tx_bytes),
74 XRS700X_MIB(XRS_TX_UNICAST_L, "tx_unicast", tx_packets),
75 XRS700X_MIB(XRS_TX_BROADCAST_L, "tx_broadcast", tx_packets),
76 XRS700X_MIB(XRS_TX_MULTICAST_L, "tx_multicast", tx_packets),
77 XRS700X_MIB_ETHTOOL_ONLY(XRS_TX_HSR_PRP_L, "tx_hsr_prp"),
78 XRS700X_MIB(XRS_PRIQ_DROP_L, "priq_drop", tx_dropped),
79 XRS700X_MIB(XRS_EARLY_DROP_L, "early_drop", tx_dropped),
80 };
81
82 static const u8 eth_hsrsup_addr[ETH_ALEN] = {
83 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00};
84
85 static void xrs700x_get_strings(struct dsa_switch *ds, int port,
86 u32 stringset, u8 *data)
87 {
88 int i;
89
90 if (stringset != ETH_SS_STATS)
91 return;
92
93 for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
94 strscpy(data, xrs700x_mibs[i].name, ETH_GSTRING_LEN);
95 data += ETH_GSTRING_LEN;
96 }
97 }
98
99 static int xrs700x_get_sset_count(struct dsa_switch *ds, int port, int sset)
100 {
101 if (sset != ETH_SS_STATS)
102 return -EOPNOTSUPP;
103
104 return ARRAY_SIZE(xrs700x_mibs);
105 }
106
107 static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
108 {
109 struct xrs700x_port *p = &priv->ports[port];
110 struct rtnl_link_stats64 stats;
111 int i;
112
113 memset(&stats, 0, sizeof(stats));
114
115 mutex_lock(&p->mib_mutex);
116
117 /* Capture counter values */
118 regmap_write(priv->regmap, XRS_CNT_CTRL(port), 1);
119
120 for (i = 0; i < ARRAY_SIZE(xrs700x_mibs); i++) {
121 unsigned int high = 0, low = 0, reg;
122
123 reg = xrs700x_mibs[i].offset + XRS_PORT_OFFSET * port;
124 regmap_read(priv->regmap, reg, &low);
125 regmap_read(priv->regmap, reg + 2, &high);
126
127 p->mib_data[i] += (high << 16) | low;
128
129 if (xrs700x_mibs[i].stats64_offset >= 0) {
130 u8 *s = (u8 *)&stats + xrs700x_mibs[i].stats64_offset;
131 *(u64 *)s += p->mib_data[i];
132 }
133 }
134
135 /* multicast must be added to rx_packets (which already includes
136 * unicast and broadcast)
137 */
138 stats.rx_packets += stats.multicast;
139
140 u64_stats_update_begin(&p->syncp);
141 p->stats64 = stats;
142 u64_stats_update_end(&p->syncp);
143
144 mutex_unlock(&p->mib_mutex);
145 }
146
147 static void xrs700x_mib_work(struct work_struct *work)
148 {
149 struct xrs700x *priv = container_of(work, struct xrs700x,
150 mib_work.work);
151 int i;
152
153 for (i = 0; i < priv->ds->num_ports; i++)
154 xrs700x_read_port_counters(priv, i);
155
156 schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
157 }
158
159 static void xrs700x_get_ethtool_stats(struct dsa_switch *ds, int port,
160 u64 *data)
161 {
162 struct xrs700x *priv = ds->priv;
163 struct xrs700x_port *p = &priv->ports[port];
164
165 xrs700x_read_port_counters(priv, port);
166
167 mutex_lock(&p->mib_mutex);
168 memcpy(data, p->mib_data, sizeof(*data) * ARRAY_SIZE(xrs700x_mibs));
169 mutex_unlock(&p->mib_mutex);
170 }
171
172 static void xrs700x_get_stats64(struct dsa_switch *ds, int port,
173 struct rtnl_link_stats64 *s)
174 {
175 struct xrs700x *priv = ds->priv;
176 struct xrs700x_port *p = &priv->ports[port];
177 unsigned int start;
178
179 do {
180 start = u64_stats_fetch_begin(&p->syncp);
181 *s = p->stats64;
182 } while (u64_stats_fetch_retry(&p->syncp, start));
183 }
184
185 static int xrs700x_setup_regmap_range(struct xrs700x *priv)
186 {
187 struct xrs700x_regfield regfields[] = {
188 {
189 .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 0, 1,
190 priv->ds->num_ports,
191 XRS_PORT_OFFSET),
192 .rmf = &priv->ps_forward
193 },
194 {
195 .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 2, 3,
196 priv->ds->num_ports,
197 XRS_PORT_OFFSET),
198 .rmf = &priv->ps_management
199 },
200 {
201 .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 4, 9,
202 priv->ds->num_ports,
203 XRS_PORT_OFFSET),
204 .rmf = &priv->ps_sel_speed
205 },
206 {
207 .rf = REG_FIELD_ID(XRS_PORT_STATE(0), 10, 11,
208 priv->ds->num_ports,
209 XRS_PORT_OFFSET),
210 .rmf = &priv->ps_cur_speed
211 }
212 };
213 int i = 0;
214
215 for (; i < ARRAY_SIZE(regfields); i++) {
216 *regfields[i].rmf = devm_regmap_field_alloc(priv->dev,
217 priv->regmap,
218 regfields[i].rf);
219 if (IS_ERR(*regfields[i].rmf))
220 return PTR_ERR(*regfields[i].rmf);
221 }
222
223 return 0;
224 }
225
226 static enum dsa_tag_protocol xrs700x_get_tag_protocol(struct dsa_switch *ds,
227 int port,
228 enum dsa_tag_protocol m)
229 {
230 return DSA_TAG_PROTO_XRS700X;
231 }
232
233 static int xrs700x_reset(struct dsa_switch *ds)
234 {
235 struct xrs700x *priv = ds->priv;
236 unsigned int val;
237 int ret;
238
239 ret = regmap_write(priv->regmap, XRS_GENERAL, XRS_GENERAL_RESET);
240 if (ret)
241 goto error;
242
243 ret = regmap_read_poll_timeout(priv->regmap, XRS_GENERAL,
244 val, !(val & XRS_GENERAL_RESET),
245 10, 1000);
246 error:
247 if (ret) {
248 dev_err_ratelimited(priv->dev, "error resetting switch: %d\n",
249 ret);
250 }
251
252 return ret;
253 }
254
255 static void xrs700x_port_stp_state_set(struct dsa_switch *ds, int port,
256 u8 state)
257 {
258 struct xrs700x *priv = ds->priv;
259 unsigned int bpdus = 1;
260 unsigned int val;
261
262 switch (state) {
263 case BR_STATE_DISABLED:
264 bpdus = 0;
265 fallthrough;
266 case BR_STATE_BLOCKING:
267 case BR_STATE_LISTENING:
268 val = XRS_PORT_DISABLED;
269 break;
270 case BR_STATE_LEARNING:
271 val = XRS_PORT_LEARNING;
272 break;
273 case BR_STATE_FORWARDING:
274 val = XRS_PORT_FORWARDING;
275 break;
276 default:
277 dev_err(ds->dev, "invalid STP state: %d\n", state);
278 return;
279 }
280
281 regmap_fields_write(priv->ps_forward, port, val);
282
283 /* Enable/disable inbound policy added by xrs700x_port_add_bpdu_ipf()
284 * which allows BPDU forwarding to the CPU port when the front facing
285 * port is in disabled/learning state.
286 */
287 regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 1, bpdus);
288
289 dev_dbg_ratelimited(priv->dev, "%s - port: %d, state: %u, val: 0x%x\n",
290 __func__, port, state, val);
291 }
292
293 /* Add an inbound policy filter which matches the BPDU destination MAC
294 * and forwards to the CPU port. Leave the policy disabled, it will be
295 * enabled as needed.
296 */
297 static int xrs700x_port_add_bpdu_ipf(struct dsa_switch *ds, int port)
298 {
299 struct xrs700x *priv = ds->priv;
300 unsigned int val = 0;
301 int i = 0;
302 int ret;
303
304 /* Compare all 48 bits of the destination MAC address. */
305 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 0), 48 << 2);
306 if (ret)
307 return ret;
308
309 /* match BPDU destination 01:80:c2:00:00:00 */
310 for (i = 0; i < sizeof(eth_stp_addr); i += 2) {
311 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 0) + i,
312 eth_stp_addr[i] |
313 (eth_stp_addr[i + 1] << 8));
314 if (ret)
315 return ret;
316 }
317
318 /* Mirror BPDU to CPU port */
319 for (i = 0; i < ds->num_ports; i++) {
320 if (dsa_is_cpu_port(ds, i))
321 val |= BIT(i);
322 }
323
324 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 0), val);
325 if (ret)
326 return ret;
327
328 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 0), 0);
329 if (ret)
330 return ret;
331
332 return 0;
333 }
334
335 /* Add an inbound policy filter which matches the HSR/PRP supervision MAC
336 * range and forwards to the CPU port without discarding duplicates.
337 * This is required to correctly populate the HSR/PRP node_table.
338 * Leave the policy disabled, it will be enabled as needed.
339 */
340 static int xrs700x_port_add_hsrsup_ipf(struct dsa_switch *ds, int port,
341 int fwdport)
342 {
343 struct xrs700x *priv = ds->priv;
344 unsigned int val = 0;
345 int i = 0;
346 int ret;
347
348 /* Compare 40 bits of the destination MAC address. */
349 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 40 << 2);
350 if (ret)
351 return ret;
352
353 /* match HSR/PRP supervision destination 01:15:4e:00:01:XX */
354 for (i = 0; i < sizeof(eth_hsrsup_addr); i += 2) {
355 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 1) + i,
356 eth_hsrsup_addr[i] |
357 (eth_hsrsup_addr[i + 1] << 8));
358 if (ret)
359 return ret;
360 }
361
362 /* Mirror HSR/PRP supervision to CPU port */
363 for (i = 0; i < ds->num_ports; i++) {
364 if (dsa_is_cpu_port(ds, i))
365 val |= BIT(i);
366 }
367
368 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 1), val);
369 if (ret)
370 return ret;
371
372 if (fwdport >= 0)
373 val |= BIT(fwdport);
374
375 /* Allow must be set prevent duplicate discard */
376 ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 1), val);
377 if (ret)
378 return ret;
379
380 return 0;
381 }
382
383 static int xrs700x_port_setup(struct dsa_switch *ds, int port)
384 {
385 bool cpu_port = dsa_is_cpu_port(ds, port);
386 struct xrs700x *priv = ds->priv;
387 unsigned int val = 0;
388 int ret, i;
389
390 xrs700x_port_stp_state_set(ds, port, BR_STATE_DISABLED);
391
392 /* Disable forwarding to non-CPU ports */
393 for (i = 0; i < ds->num_ports; i++) {
394 if (!dsa_is_cpu_port(ds, i))
395 val |= BIT(i);
396 }
397
398 /* 1 = Disable forwarding to the port */
399 ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
400 if (ret)
401 return ret;
402
403 val = cpu_port ? XRS_PORT_MODE_MANAGEMENT : XRS_PORT_MODE_NORMAL;
404 ret = regmap_fields_write(priv->ps_management, port, val);
405 if (ret)
406 return ret;
407
408 if (!cpu_port) {
409 ret = xrs700x_port_add_bpdu_ipf(ds, port);
410 if (ret)
411 return ret;
412 }
413
414 return 0;
415 }
416
417 static int xrs700x_setup(struct dsa_switch *ds)
418 {
419 struct xrs700x *priv = ds->priv;
420 int ret, i;
421
422 ret = xrs700x_reset(ds);
423 if (ret)
424 return ret;
425
426 for (i = 0; i < ds->num_ports; i++) {
427 ret = xrs700x_port_setup(ds, i);
428 if (ret)
429 return ret;
430 }
431
432 schedule_delayed_work(&priv->mib_work, XRS700X_MIB_INTERVAL);
433
434 return 0;
435 }
436
437 static void xrs700x_teardown(struct dsa_switch *ds)
438 {
439 struct xrs700x *priv = ds->priv;
440
441 cancel_delayed_work_sync(&priv->mib_work);
442 }
443
444 static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
445 unsigned long *supported,
446 struct phylink_link_state *state)
447 {
448 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
449
450 switch (port) {
451 case 0:
452 break;
453 case 1:
454 case 2:
455 case 3:
456 phylink_set(mask, 1000baseT_Full);
457 break;
458 default:
459 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
460 dev_err(ds->dev, "Unsupported port: %i\n", port);
461 return;
462 }
463
464 phylink_set_port_modes(mask);
465
466 /* The switch only supports full duplex. */
467 phylink_set(mask, 10baseT_Full);
468 phylink_set(mask, 100baseT_Full);
469
470 bitmap_and(supported, supported, mask,
471 __ETHTOOL_LINK_MODE_MASK_NBITS);
472 bitmap_and(state->advertising, state->advertising, mask,
473 __ETHTOOL_LINK_MODE_MASK_NBITS);
474 }
475
476 static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
477 unsigned int mode, phy_interface_t interface,
478 struct phy_device *phydev,
479 int speed, int duplex,
480 bool tx_pause, bool rx_pause)
481 {
482 struct xrs700x *priv = ds->priv;
483 unsigned int val;
484
485 switch (speed) {
486 case SPEED_1000:
487 val = XRS_PORT_SPEED_1000;
488 break;
489 case SPEED_100:
490 val = XRS_PORT_SPEED_100;
491 break;
492 case SPEED_10:
493 val = XRS_PORT_SPEED_10;
494 break;
495 default:
496 return;
497 }
498
499 regmap_fields_write(priv->ps_sel_speed, port, val);
500
501 dev_dbg_ratelimited(priv->dev, "%s: port: %d mode: %u speed: %u\n",
502 __func__, port, mode, speed);
503 }
504
505 static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
506 struct net_device *bridge, bool join)
507 {
508 unsigned int i, cpu_mask = 0, mask = 0;
509 struct xrs700x *priv = ds->priv;
510 int ret;
511
512 for (i = 0; i < ds->num_ports; i++) {
513 if (dsa_is_cpu_port(ds, i))
514 continue;
515
516 cpu_mask |= BIT(i);
517
518 if (dsa_to_port(ds, i)->bridge_dev == bridge)
519 continue;
520
521 mask |= BIT(i);
522 }
523
524 for (i = 0; i < ds->num_ports; i++) {
525 if (dsa_to_port(ds, i)->bridge_dev != bridge)
526 continue;
527
528 /* 1 = Disable forwarding to the port */
529 ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(i), mask);
530 if (ret)
531 return ret;
532 }
533
534 if (!join) {
535 ret = regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port),
536 cpu_mask);
537 if (ret)
538 return ret;
539 }
540
541 return 0;
542 }
543
544 static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
545 struct net_device *bridge)
546 {
547 return xrs700x_bridge_common(ds, port, bridge, true);
548 }
549
550 static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
551 struct net_device *bridge)
552 {
553 xrs700x_bridge_common(ds, port, bridge, false);
554 }
555
556 static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
557 struct net_device *hsr)
558 {
559 unsigned int val = XRS_HSR_CFG_HSR_PRP;
560 struct dsa_port *partner = NULL, *dp;
561 struct xrs700x *priv = ds->priv;
562 struct net_device *slave;
563 int ret, i, hsr_pair[2];
564 enum hsr_version ver;
565 bool fwd = false;
566
567 ret = hsr_get_version(hsr, &ver);
568 if (ret)
569 return ret;
570
571 /* Only ports 1 and 2 can be HSR/PRP redundant ports. */
572 if (port != 1 && port != 2)
573 return -EOPNOTSUPP;
574
575 if (ver == HSR_V1)
576 val |= XRS_HSR_CFG_HSR;
577 else if (ver == PRP_V1)
578 val |= XRS_HSR_CFG_PRP;
579 else
580 return -EOPNOTSUPP;
581
582 dsa_hsr_foreach_port(dp, ds, hsr) {
583 if (dp->index != port) {
584 partner = dp;
585 break;
586 }
587 }
588
589 /* We can't enable redundancy on the switch until both
590 * redundant ports have signed up.
591 */
592 if (!partner)
593 return 0;
594
595 regmap_fields_write(priv->ps_forward, partner->index,
596 XRS_PORT_DISABLED);
597 regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
598
599 regmap_write(priv->regmap, XRS_HSR_CFG(partner->index),
600 val | XRS_HSR_CFG_LANID_A);
601 regmap_write(priv->regmap, XRS_HSR_CFG(port),
602 val | XRS_HSR_CFG_LANID_B);
603
604 /* Clear bits for both redundant ports (HSR only) and the CPU port to
605 * enable forwarding.
606 */
607 val = GENMASK(ds->num_ports - 1, 0);
608 if (ver == HSR_V1) {
609 val &= ~BIT(partner->index);
610 val &= ~BIT(port);
611 fwd = true;
612 }
613 val &= ~BIT(dsa_upstream_port(ds, port));
614 regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
615 regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
616
617 regmap_fields_write(priv->ps_forward, partner->index,
618 XRS_PORT_FORWARDING);
619 regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
620
621 /* Enable inbound policy which allows HSR/PRP supervision forwarding
622 * to the CPU port without discarding duplicates. Continue to
623 * forward to redundant ports when in HSR mode while discarding
624 * duplicates.
625 */
626 ret = xrs700x_port_add_hsrsup_ipf(ds, partner->index, fwd ? port : -1);
627 if (ret)
628 return ret;
629
630 ret = xrs700x_port_add_hsrsup_ipf(ds, port, fwd ? partner->index : -1);
631 if (ret)
632 return ret;
633
634 regmap_update_bits(priv->regmap,
635 XRS_ETH_ADDR_CFG(partner->index, 1), 1, 1);
636 regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 1);
637
638 hsr_pair[0] = port;
639 hsr_pair[1] = partner->index;
640 for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
641 slave = dsa_to_port(ds, hsr_pair[i])->slave;
642 slave->features |= XRS7000X_SUPPORTED_HSR_FEATURES;
643 }
644
645 return 0;
646 }
647
648 static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
649 struct net_device *hsr)
650 {
651 struct dsa_port *partner = NULL, *dp;
652 struct xrs700x *priv = ds->priv;
653 struct net_device *slave;
654 int i, hsr_pair[2];
655 unsigned int val;
656
657 dsa_hsr_foreach_port(dp, ds, hsr) {
658 if (dp->index != port) {
659 partner = dp;
660 break;
661 }
662 }
663
664 if (!partner)
665 return 0;
666
667 regmap_fields_write(priv->ps_forward, partner->index,
668 XRS_PORT_DISABLED);
669 regmap_fields_write(priv->ps_forward, port, XRS_PORT_DISABLED);
670
671 regmap_write(priv->regmap, XRS_HSR_CFG(partner->index), 0);
672 regmap_write(priv->regmap, XRS_HSR_CFG(port), 0);
673
674 /* Clear bit for the CPU port to enable forwarding. */
675 val = GENMASK(ds->num_ports - 1, 0);
676 val &= ~BIT(dsa_upstream_port(ds, port));
677 regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
678 regmap_write(priv->regmap, XRS_PORT_FWD_MASK(port), val);
679
680 regmap_fields_write(priv->ps_forward, partner->index,
681 XRS_PORT_FORWARDING);
682 regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
683
684 /* Disable inbound policy added by xrs700x_port_add_hsrsup_ipf()
685 * which allows HSR/PRP supervision forwarding to the CPU port without
686 * discarding duplicates.
687 */
688 regmap_update_bits(priv->regmap,
689 XRS_ETH_ADDR_CFG(partner->index, 1), 1, 0);
690 regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 0);
691
692 hsr_pair[0] = port;
693 hsr_pair[1] = partner->index;
694 for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
695 slave = dsa_to_port(ds, hsr_pair[i])->slave;
696 slave->features &= ~XRS7000X_SUPPORTED_HSR_FEATURES;
697 }
698
699 return 0;
700 }
701
702 static const struct dsa_switch_ops xrs700x_ops = {
703 .get_tag_protocol = xrs700x_get_tag_protocol,
704 .setup = xrs700x_setup,
705 .teardown = xrs700x_teardown,
706 .port_stp_state_set = xrs700x_port_stp_state_set,
707 .phylink_validate = xrs700x_phylink_validate,
708 .phylink_mac_link_up = xrs700x_mac_link_up,
709 .get_strings = xrs700x_get_strings,
710 .get_sset_count = xrs700x_get_sset_count,
711 .get_ethtool_stats = xrs700x_get_ethtool_stats,
712 .get_stats64 = xrs700x_get_stats64,
713 .port_bridge_join = xrs700x_bridge_join,
714 .port_bridge_leave = xrs700x_bridge_leave,
715 .port_hsr_join = xrs700x_hsr_join,
716 .port_hsr_leave = xrs700x_hsr_leave,
717 };
718
719 static int xrs700x_detect(struct xrs700x *priv)
720 {
721 const struct xrs700x_info *info;
722 unsigned int id;
723 int ret;
724
725 ret = regmap_read(priv->regmap, XRS_DEV_ID0, &id);
726 if (ret) {
727 dev_err(priv->dev, "error %d while reading switch id.\n",
728 ret);
729 return ret;
730 }
731
732 info = of_device_get_match_data(priv->dev);
733 if (!info)
734 return -EINVAL;
735
736 if (info->id == id) {
737 priv->ds->num_ports = info->num_ports;
738 dev_info(priv->dev, "%s detected.\n", info->name);
739 return 0;
740 }
741
742 dev_err(priv->dev, "expected switch id 0x%x but found 0x%x.\n",
743 info->id, id);
744
745 return -ENODEV;
746 }
747
748 struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv)
749 {
750 struct dsa_switch *ds;
751 struct xrs700x *priv;
752
753 ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
754 if (!ds)
755 return NULL;
756
757 ds->dev = base;
758
759 priv = devm_kzalloc(base, sizeof(*priv), GFP_KERNEL);
760 if (!priv)
761 return NULL;
762
763 INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work);
764
765 ds->ops = &xrs700x_ops;
766 ds->priv = priv;
767 priv->dev = base;
768
769 priv->ds = ds;
770 priv->priv = devpriv;
771
772 return priv;
773 }
774 EXPORT_SYMBOL(xrs700x_switch_alloc);
775
776 static int xrs700x_alloc_port_mib(struct xrs700x *priv, int port)
777 {
778 struct xrs700x_port *p = &priv->ports[port];
779
780 p->mib_data = devm_kcalloc(priv->dev, ARRAY_SIZE(xrs700x_mibs),
781 sizeof(*p->mib_data), GFP_KERNEL);
782 if (!p->mib_data)
783 return -ENOMEM;
784
785 mutex_init(&p->mib_mutex);
786 u64_stats_init(&p->syncp);
787
788 return 0;
789 }
790
791 int xrs700x_switch_register(struct xrs700x *priv)
792 {
793 int ret;
794 int i;
795
796 ret = xrs700x_detect(priv);
797 if (ret)
798 return ret;
799
800 ret = xrs700x_setup_regmap_range(priv);
801 if (ret)
802 return ret;
803
804 priv->ports = devm_kcalloc(priv->dev, priv->ds->num_ports,
805 sizeof(*priv->ports), GFP_KERNEL);
806 if (!priv->ports)
807 return -ENOMEM;
808
809 for (i = 0; i < priv->ds->num_ports; i++) {
810 ret = xrs700x_alloc_port_mib(priv, i);
811 if (ret)
812 return ret;
813 }
814
815 return dsa_register_switch(priv->ds);
816 }
817 EXPORT_SYMBOL(xrs700x_switch_register);
818
819 void xrs700x_switch_remove(struct xrs700x *priv)
820 {
821 dsa_unregister_switch(priv->ds);
822 }
823 EXPORT_SYMBOL(xrs700x_switch_remove);
824
825 MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
826 MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
827 MODULE_LICENSE("GPL v2");