1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020 NovaTech LLC
4 * George McCollister <george.mccollister@gmail.com>
8 #include <linux/if_bridge.h>
9 #include <linux/of_device.h>
10 #include <linux/netdev_features.h>
11 #include <linux/if_hsr.h>
13 #include "xrs700x_reg.h"
15 #define XRS700X_MIB_INTERVAL msecs_to_jiffies(3000)
17 #define XRS7000X_SUPPORTED_HSR_FEATURES \
18 (NETIF_F_HW_HSR_TAG_INS | NETIF_F_HW_HSR_TAG_RM | \
19 NETIF_F_HW_HSR_FWD | NETIF_F_HW_HSR_DUP)
21 #define XRS7003E_ID 0x100
22 #define XRS7003F_ID 0x101
23 #define XRS7004E_ID 0x200
24 #define XRS7004F_ID 0x201
26 const struct xrs700x_info xrs7003e_info
= {XRS7003E_ID
, "XRS7003E", 3};
27 EXPORT_SYMBOL(xrs7003e_info
);
29 const struct xrs700x_info xrs7003f_info
= {XRS7003F_ID
, "XRS7003F", 3};
30 EXPORT_SYMBOL(xrs7003f_info
);
32 const struct xrs700x_info xrs7004e_info
= {XRS7004E_ID
, "XRS7004E", 4};
33 EXPORT_SYMBOL(xrs7004e_info
);
35 const struct xrs700x_info xrs7004f_info
= {XRS7004F_ID
, "XRS7004F", 4};
36 EXPORT_SYMBOL(xrs7004f_info
);
38 struct xrs700x_regfield
{
40 struct regmap_field
**rmf
;
49 #define XRS700X_MIB_ETHTOOL_ONLY(o, n) {o, n, -1}
50 #define XRS700X_MIB(o, n, m) {o, n, offsetof(struct rtnl_link_stats64, m)}
52 static const struct xrs700x_mib xrs700x_mibs
[] = {
53 XRS700X_MIB(XRS_RX_GOOD_OCTETS_L
, "rx_good_octets", rx_bytes
),
54 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_BAD_OCTETS_L
, "rx_bad_octets"),
55 XRS700X_MIB(XRS_RX_UNICAST_L
, "rx_unicast", rx_packets
),
56 XRS700X_MIB(XRS_RX_BROADCAST_L
, "rx_broadcast", rx_packets
),
57 XRS700X_MIB(XRS_RX_MULTICAST_L
, "rx_multicast", multicast
),
58 XRS700X_MIB(XRS_RX_UNDERSIZE_L
, "rx_undersize", rx_length_errors
),
59 XRS700X_MIB(XRS_RX_FRAGMENTS_L
, "rx_fragments", rx_length_errors
),
60 XRS700X_MIB(XRS_RX_OVERSIZE_L
, "rx_oversize", rx_length_errors
),
61 XRS700X_MIB(XRS_RX_JABBER_L
, "rx_jabber", rx_length_errors
),
62 XRS700X_MIB(XRS_RX_ERR_L
, "rx_err", rx_errors
),
63 XRS700X_MIB(XRS_RX_CRC_L
, "rx_crc", rx_crc_errors
),
64 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_64_L
, "rx_64"),
65 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_65_127_L
, "rx_65_127"),
66 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_128_255_L
, "rx_128_255"),
67 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_256_511_L
, "rx_256_511"),
68 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_512_1023_L
, "rx_512_1023"),
69 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_1024_1536_L
, "rx_1024_1536"),
70 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_HSR_PRP_L
, "rx_hsr_prp"),
71 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_WRONGLAN_L
, "rx_wronglan"),
72 XRS700X_MIB_ETHTOOL_ONLY(XRS_RX_DUPLICATE_L
, "rx_duplicate"),
73 XRS700X_MIB(XRS_TX_OCTETS_L
, "tx_octets", tx_bytes
),
74 XRS700X_MIB(XRS_TX_UNICAST_L
, "tx_unicast", tx_packets
),
75 XRS700X_MIB(XRS_TX_BROADCAST_L
, "tx_broadcast", tx_packets
),
76 XRS700X_MIB(XRS_TX_MULTICAST_L
, "tx_multicast", tx_packets
),
77 XRS700X_MIB_ETHTOOL_ONLY(XRS_TX_HSR_PRP_L
, "tx_hsr_prp"),
78 XRS700X_MIB(XRS_PRIQ_DROP_L
, "priq_drop", tx_dropped
),
79 XRS700X_MIB(XRS_EARLY_DROP_L
, "early_drop", tx_dropped
),
82 static const u8 eth_hsrsup_addr
[ETH_ALEN
] = {
83 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00};
85 static void xrs700x_get_strings(struct dsa_switch
*ds
, int port
,
86 u32 stringset
, u8
*data
)
90 if (stringset
!= ETH_SS_STATS
)
93 for (i
= 0; i
< ARRAY_SIZE(xrs700x_mibs
); i
++) {
94 strscpy(data
, xrs700x_mibs
[i
].name
, ETH_GSTRING_LEN
);
95 data
+= ETH_GSTRING_LEN
;
99 static int xrs700x_get_sset_count(struct dsa_switch
*ds
, int port
, int sset
)
101 if (sset
!= ETH_SS_STATS
)
104 return ARRAY_SIZE(xrs700x_mibs
);
107 static void xrs700x_read_port_counters(struct xrs700x
*priv
, int port
)
109 struct xrs700x_port
*p
= &priv
->ports
[port
];
110 struct rtnl_link_stats64 stats
;
113 memset(&stats
, 0, sizeof(stats
));
115 mutex_lock(&p
->mib_mutex
);
117 /* Capture counter values */
118 regmap_write(priv
->regmap
, XRS_CNT_CTRL(port
), 1);
120 for (i
= 0; i
< ARRAY_SIZE(xrs700x_mibs
); i
++) {
121 unsigned int high
= 0, low
= 0, reg
;
123 reg
= xrs700x_mibs
[i
].offset
+ XRS_PORT_OFFSET
* port
;
124 regmap_read(priv
->regmap
, reg
, &low
);
125 regmap_read(priv
->regmap
, reg
+ 2, &high
);
127 p
->mib_data
[i
] += (high
<< 16) | low
;
129 if (xrs700x_mibs
[i
].stats64_offset
>= 0) {
130 u8
*s
= (u8
*)&stats
+ xrs700x_mibs
[i
].stats64_offset
;
131 *(u64
*)s
+= p
->mib_data
[i
];
135 /* multicast must be added to rx_packets (which already includes
136 * unicast and broadcast)
138 stats
.rx_packets
+= stats
.multicast
;
140 u64_stats_update_begin(&p
->syncp
);
142 u64_stats_update_end(&p
->syncp
);
144 mutex_unlock(&p
->mib_mutex
);
147 static void xrs700x_mib_work(struct work_struct
*work
)
149 struct xrs700x
*priv
= container_of(work
, struct xrs700x
,
153 for (i
= 0; i
< priv
->ds
->num_ports
; i
++)
154 xrs700x_read_port_counters(priv
, i
);
156 schedule_delayed_work(&priv
->mib_work
, XRS700X_MIB_INTERVAL
);
159 static void xrs700x_get_ethtool_stats(struct dsa_switch
*ds
, int port
,
162 struct xrs700x
*priv
= ds
->priv
;
163 struct xrs700x_port
*p
= &priv
->ports
[port
];
165 xrs700x_read_port_counters(priv
, port
);
167 mutex_lock(&p
->mib_mutex
);
168 memcpy(data
, p
->mib_data
, sizeof(*data
) * ARRAY_SIZE(xrs700x_mibs
));
169 mutex_unlock(&p
->mib_mutex
);
172 static void xrs700x_get_stats64(struct dsa_switch
*ds
, int port
,
173 struct rtnl_link_stats64
*s
)
175 struct xrs700x
*priv
= ds
->priv
;
176 struct xrs700x_port
*p
= &priv
->ports
[port
];
180 start
= u64_stats_fetch_begin(&p
->syncp
);
182 } while (u64_stats_fetch_retry(&p
->syncp
, start
));
185 static int xrs700x_setup_regmap_range(struct xrs700x
*priv
)
187 struct xrs700x_regfield regfields
[] = {
189 .rf
= REG_FIELD_ID(XRS_PORT_STATE(0), 0, 1,
192 .rmf
= &priv
->ps_forward
195 .rf
= REG_FIELD_ID(XRS_PORT_STATE(0), 2, 3,
198 .rmf
= &priv
->ps_management
201 .rf
= REG_FIELD_ID(XRS_PORT_STATE(0), 4, 9,
204 .rmf
= &priv
->ps_sel_speed
207 .rf
= REG_FIELD_ID(XRS_PORT_STATE(0), 10, 11,
210 .rmf
= &priv
->ps_cur_speed
215 for (; i
< ARRAY_SIZE(regfields
); i
++) {
216 *regfields
[i
].rmf
= devm_regmap_field_alloc(priv
->dev
,
219 if (IS_ERR(*regfields
[i
].rmf
))
220 return PTR_ERR(*regfields
[i
].rmf
);
226 static enum dsa_tag_protocol
xrs700x_get_tag_protocol(struct dsa_switch
*ds
,
228 enum dsa_tag_protocol m
)
230 return DSA_TAG_PROTO_XRS700X
;
233 static int xrs700x_reset(struct dsa_switch
*ds
)
235 struct xrs700x
*priv
= ds
->priv
;
239 ret
= regmap_write(priv
->regmap
, XRS_GENERAL
, XRS_GENERAL_RESET
);
243 ret
= regmap_read_poll_timeout(priv
->regmap
, XRS_GENERAL
,
244 val
, !(val
& XRS_GENERAL_RESET
),
248 dev_err_ratelimited(priv
->dev
, "error resetting switch: %d\n",
255 static void xrs700x_port_stp_state_set(struct dsa_switch
*ds
, int port
,
258 struct xrs700x
*priv
= ds
->priv
;
259 unsigned int bpdus
= 1;
263 case BR_STATE_DISABLED
:
266 case BR_STATE_BLOCKING
:
267 case BR_STATE_LISTENING
:
268 val
= XRS_PORT_DISABLED
;
270 case BR_STATE_LEARNING
:
271 val
= XRS_PORT_LEARNING
;
273 case BR_STATE_FORWARDING
:
274 val
= XRS_PORT_FORWARDING
;
277 dev_err(ds
->dev
, "invalid STP state: %d\n", state
);
281 regmap_fields_write(priv
->ps_forward
, port
, val
);
283 /* Enable/disable inbound policy added by xrs700x_port_add_bpdu_ipf()
284 * which allows BPDU forwarding to the CPU port when the front facing
285 * port is in disabled/learning state.
287 regmap_update_bits(priv
->regmap
, XRS_ETH_ADDR_CFG(port
, 0), 1, bpdus
);
289 dev_dbg_ratelimited(priv
->dev
, "%s - port: %d, state: %u, val: 0x%x\n",
290 __func__
, port
, state
, val
);
293 /* Add an inbound policy filter which matches the BPDU destination MAC
294 * and forwards to the CPU port. Leave the policy disabled, it will be
297 static int xrs700x_port_add_bpdu_ipf(struct dsa_switch
*ds
, int port
)
299 struct xrs700x
*priv
= ds
->priv
;
300 unsigned int val
= 0;
304 /* Compare all 48 bits of the destination MAC address. */
305 ret
= regmap_write(priv
->regmap
, XRS_ETH_ADDR_CFG(port
, 0), 48 << 2);
309 /* match BPDU destination 01:80:c2:00:00:00 */
310 for (i
= 0; i
< sizeof(eth_stp_addr
); i
+= 2) {
311 ret
= regmap_write(priv
->regmap
, XRS_ETH_ADDR_0(port
, 0) + i
,
313 (eth_stp_addr
[i
+ 1] << 8));
318 /* Mirror BPDU to CPU port */
319 for (i
= 0; i
< ds
->num_ports
; i
++) {
320 if (dsa_is_cpu_port(ds
, i
))
324 ret
= regmap_write(priv
->regmap
, XRS_ETH_ADDR_FWD_MIRROR(port
, 0), val
);
328 ret
= regmap_write(priv
->regmap
, XRS_ETH_ADDR_FWD_ALLOW(port
, 0), 0);
335 /* Add an inbound policy filter which matches the HSR/PRP supervision MAC
336 * range and forwards to the CPU port without discarding duplicates.
337 * This is required to correctly populate the HSR/PRP node_table.
338 * Leave the policy disabled, it will be enabled as needed.
340 static int xrs700x_port_add_hsrsup_ipf(struct dsa_switch
*ds
, int port
,
343 struct xrs700x
*priv
= ds
->priv
;
344 unsigned int val
= 0;
348 /* Compare 40 bits of the destination MAC address. */
349 ret
= regmap_write(priv
->regmap
, XRS_ETH_ADDR_CFG(port
, 1), 40 << 2);
353 /* match HSR/PRP supervision destination 01:15:4e:00:01:XX */
354 for (i
= 0; i
< sizeof(eth_hsrsup_addr
); i
+= 2) {
355 ret
= regmap_write(priv
->regmap
, XRS_ETH_ADDR_0(port
, 1) + i
,
357 (eth_hsrsup_addr
[i
+ 1] << 8));
362 /* Mirror HSR/PRP supervision to CPU port */
363 for (i
= 0; i
< ds
->num_ports
; i
++) {
364 if (dsa_is_cpu_port(ds
, i
))
368 ret
= regmap_write(priv
->regmap
, XRS_ETH_ADDR_FWD_MIRROR(port
, 1), val
);
375 /* Allow must be set prevent duplicate discard */
376 ret
= regmap_write(priv
->regmap
, XRS_ETH_ADDR_FWD_ALLOW(port
, 1), val
);
383 static int xrs700x_port_setup(struct dsa_switch
*ds
, int port
)
385 bool cpu_port
= dsa_is_cpu_port(ds
, port
);
386 struct xrs700x
*priv
= ds
->priv
;
387 unsigned int val
= 0;
390 xrs700x_port_stp_state_set(ds
, port
, BR_STATE_DISABLED
);
392 /* Disable forwarding to non-CPU ports */
393 for (i
= 0; i
< ds
->num_ports
; i
++) {
394 if (!dsa_is_cpu_port(ds
, i
))
398 /* 1 = Disable forwarding to the port */
399 ret
= regmap_write(priv
->regmap
, XRS_PORT_FWD_MASK(port
), val
);
403 val
= cpu_port
? XRS_PORT_MODE_MANAGEMENT
: XRS_PORT_MODE_NORMAL
;
404 ret
= regmap_fields_write(priv
->ps_management
, port
, val
);
409 ret
= xrs700x_port_add_bpdu_ipf(ds
, port
);
417 static int xrs700x_setup(struct dsa_switch
*ds
)
419 struct xrs700x
*priv
= ds
->priv
;
422 ret
= xrs700x_reset(ds
);
426 for (i
= 0; i
< ds
->num_ports
; i
++) {
427 ret
= xrs700x_port_setup(ds
, i
);
432 schedule_delayed_work(&priv
->mib_work
, XRS700X_MIB_INTERVAL
);
437 static void xrs700x_teardown(struct dsa_switch
*ds
)
439 struct xrs700x
*priv
= ds
->priv
;
441 cancel_delayed_work_sync(&priv
->mib_work
);
444 static void xrs700x_phylink_validate(struct dsa_switch
*ds
, int port
,
445 unsigned long *supported
,
446 struct phylink_link_state
*state
)
448 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
456 phylink_set(mask
, 1000baseT_Full
);
459 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
460 dev_err(ds
->dev
, "Unsupported port: %i\n", port
);
464 phylink_set_port_modes(mask
);
466 /* The switch only supports full duplex. */
467 phylink_set(mask
, 10baseT_Full
);
468 phylink_set(mask
, 100baseT_Full
);
470 bitmap_and(supported
, supported
, mask
,
471 __ETHTOOL_LINK_MODE_MASK_NBITS
);
472 bitmap_and(state
->advertising
, state
->advertising
, mask
,
473 __ETHTOOL_LINK_MODE_MASK_NBITS
);
476 static void xrs700x_mac_link_up(struct dsa_switch
*ds
, int port
,
477 unsigned int mode
, phy_interface_t interface
,
478 struct phy_device
*phydev
,
479 int speed
, int duplex
,
480 bool tx_pause
, bool rx_pause
)
482 struct xrs700x
*priv
= ds
->priv
;
487 val
= XRS_PORT_SPEED_1000
;
490 val
= XRS_PORT_SPEED_100
;
493 val
= XRS_PORT_SPEED_10
;
499 regmap_fields_write(priv
->ps_sel_speed
, port
, val
);
501 dev_dbg_ratelimited(priv
->dev
, "%s: port: %d mode: %u speed: %u\n",
502 __func__
, port
, mode
, speed
);
505 static int xrs700x_bridge_common(struct dsa_switch
*ds
, int port
,
506 struct net_device
*bridge
, bool join
)
508 unsigned int i
, cpu_mask
= 0, mask
= 0;
509 struct xrs700x
*priv
= ds
->priv
;
512 for (i
= 0; i
< ds
->num_ports
; i
++) {
513 if (dsa_is_cpu_port(ds
, i
))
518 if (dsa_to_port(ds
, i
)->bridge_dev
== bridge
)
524 for (i
= 0; i
< ds
->num_ports
; i
++) {
525 if (dsa_to_port(ds
, i
)->bridge_dev
!= bridge
)
528 /* 1 = Disable forwarding to the port */
529 ret
= regmap_write(priv
->regmap
, XRS_PORT_FWD_MASK(i
), mask
);
535 ret
= regmap_write(priv
->regmap
, XRS_PORT_FWD_MASK(port
),
544 static int xrs700x_bridge_join(struct dsa_switch
*ds
, int port
,
545 struct net_device
*bridge
)
547 return xrs700x_bridge_common(ds
, port
, bridge
, true);
550 static void xrs700x_bridge_leave(struct dsa_switch
*ds
, int port
,
551 struct net_device
*bridge
)
553 xrs700x_bridge_common(ds
, port
, bridge
, false);
556 static int xrs700x_hsr_join(struct dsa_switch
*ds
, int port
,
557 struct net_device
*hsr
)
559 unsigned int val
= XRS_HSR_CFG_HSR_PRP
;
560 struct dsa_port
*partner
= NULL
, *dp
;
561 struct xrs700x
*priv
= ds
->priv
;
562 struct net_device
*slave
;
563 int ret
, i
, hsr_pair
[2];
564 enum hsr_version ver
;
567 ret
= hsr_get_version(hsr
, &ver
);
571 /* Only ports 1 and 2 can be HSR/PRP redundant ports. */
572 if (port
!= 1 && port
!= 2)
576 val
|= XRS_HSR_CFG_HSR
;
577 else if (ver
== PRP_V1
)
578 val
|= XRS_HSR_CFG_PRP
;
582 dsa_hsr_foreach_port(dp
, ds
, hsr
) {
583 if (dp
->index
!= port
) {
589 /* We can't enable redundancy on the switch until both
590 * redundant ports have signed up.
595 regmap_fields_write(priv
->ps_forward
, partner
->index
,
597 regmap_fields_write(priv
->ps_forward
, port
, XRS_PORT_DISABLED
);
599 regmap_write(priv
->regmap
, XRS_HSR_CFG(partner
->index
),
600 val
| XRS_HSR_CFG_LANID_A
);
601 regmap_write(priv
->regmap
, XRS_HSR_CFG(port
),
602 val
| XRS_HSR_CFG_LANID_B
);
604 /* Clear bits for both redundant ports (HSR only) and the CPU port to
607 val
= GENMASK(ds
->num_ports
- 1, 0);
609 val
&= ~BIT(partner
->index
);
613 val
&= ~BIT(dsa_upstream_port(ds
, port
));
614 regmap_write(priv
->regmap
, XRS_PORT_FWD_MASK(partner
->index
), val
);
615 regmap_write(priv
->regmap
, XRS_PORT_FWD_MASK(port
), val
);
617 regmap_fields_write(priv
->ps_forward
, partner
->index
,
618 XRS_PORT_FORWARDING
);
619 regmap_fields_write(priv
->ps_forward
, port
, XRS_PORT_FORWARDING
);
621 /* Enable inbound policy which allows HSR/PRP supervision forwarding
622 * to the CPU port without discarding duplicates. Continue to
623 * forward to redundant ports when in HSR mode while discarding
626 ret
= xrs700x_port_add_hsrsup_ipf(ds
, partner
->index
, fwd
? port
: -1);
630 ret
= xrs700x_port_add_hsrsup_ipf(ds
, port
, fwd
? partner
->index
: -1);
634 regmap_update_bits(priv
->regmap
,
635 XRS_ETH_ADDR_CFG(partner
->index
, 1), 1, 1);
636 regmap_update_bits(priv
->regmap
, XRS_ETH_ADDR_CFG(port
, 1), 1, 1);
639 hsr_pair
[1] = partner
->index
;
640 for (i
= 0; i
< ARRAY_SIZE(hsr_pair
); i
++) {
641 slave
= dsa_to_port(ds
, hsr_pair
[i
])->slave
;
642 slave
->features
|= XRS7000X_SUPPORTED_HSR_FEATURES
;
648 static int xrs700x_hsr_leave(struct dsa_switch
*ds
, int port
,
649 struct net_device
*hsr
)
651 struct dsa_port
*partner
= NULL
, *dp
;
652 struct xrs700x
*priv
= ds
->priv
;
653 struct net_device
*slave
;
657 dsa_hsr_foreach_port(dp
, ds
, hsr
) {
658 if (dp
->index
!= port
) {
667 regmap_fields_write(priv
->ps_forward
, partner
->index
,
669 regmap_fields_write(priv
->ps_forward
, port
, XRS_PORT_DISABLED
);
671 regmap_write(priv
->regmap
, XRS_HSR_CFG(partner
->index
), 0);
672 regmap_write(priv
->regmap
, XRS_HSR_CFG(port
), 0);
674 /* Clear bit for the CPU port to enable forwarding. */
675 val
= GENMASK(ds
->num_ports
- 1, 0);
676 val
&= ~BIT(dsa_upstream_port(ds
, port
));
677 regmap_write(priv
->regmap
, XRS_PORT_FWD_MASK(partner
->index
), val
);
678 regmap_write(priv
->regmap
, XRS_PORT_FWD_MASK(port
), val
);
680 regmap_fields_write(priv
->ps_forward
, partner
->index
,
681 XRS_PORT_FORWARDING
);
682 regmap_fields_write(priv
->ps_forward
, port
, XRS_PORT_FORWARDING
);
684 /* Disable inbound policy added by xrs700x_port_add_hsrsup_ipf()
685 * which allows HSR/PRP supervision forwarding to the CPU port without
686 * discarding duplicates.
688 regmap_update_bits(priv
->regmap
,
689 XRS_ETH_ADDR_CFG(partner
->index
, 1), 1, 0);
690 regmap_update_bits(priv
->regmap
, XRS_ETH_ADDR_CFG(port
, 1), 1, 0);
693 hsr_pair
[1] = partner
->index
;
694 for (i
= 0; i
< ARRAY_SIZE(hsr_pair
); i
++) {
695 slave
= dsa_to_port(ds
, hsr_pair
[i
])->slave
;
696 slave
->features
&= ~XRS7000X_SUPPORTED_HSR_FEATURES
;
702 static const struct dsa_switch_ops xrs700x_ops
= {
703 .get_tag_protocol
= xrs700x_get_tag_protocol
,
704 .setup
= xrs700x_setup
,
705 .teardown
= xrs700x_teardown
,
706 .port_stp_state_set
= xrs700x_port_stp_state_set
,
707 .phylink_validate
= xrs700x_phylink_validate
,
708 .phylink_mac_link_up
= xrs700x_mac_link_up
,
709 .get_strings
= xrs700x_get_strings
,
710 .get_sset_count
= xrs700x_get_sset_count
,
711 .get_ethtool_stats
= xrs700x_get_ethtool_stats
,
712 .get_stats64
= xrs700x_get_stats64
,
713 .port_bridge_join
= xrs700x_bridge_join
,
714 .port_bridge_leave
= xrs700x_bridge_leave
,
715 .port_hsr_join
= xrs700x_hsr_join
,
716 .port_hsr_leave
= xrs700x_hsr_leave
,
719 static int xrs700x_detect(struct xrs700x
*priv
)
721 const struct xrs700x_info
*info
;
725 ret
= regmap_read(priv
->regmap
, XRS_DEV_ID0
, &id
);
727 dev_err(priv
->dev
, "error %d while reading switch id.\n",
732 info
= of_device_get_match_data(priv
->dev
);
736 if (info
->id
== id
) {
737 priv
->ds
->num_ports
= info
->num_ports
;
738 dev_info(priv
->dev
, "%s detected.\n", info
->name
);
742 dev_err(priv
->dev
, "expected switch id 0x%x but found 0x%x.\n",
748 struct xrs700x
*xrs700x_switch_alloc(struct device
*base
, void *devpriv
)
750 struct dsa_switch
*ds
;
751 struct xrs700x
*priv
;
753 ds
= devm_kzalloc(base
, sizeof(*ds
), GFP_KERNEL
);
759 priv
= devm_kzalloc(base
, sizeof(*priv
), GFP_KERNEL
);
763 INIT_DELAYED_WORK(&priv
->mib_work
, xrs700x_mib_work
);
765 ds
->ops
= &xrs700x_ops
;
770 priv
->priv
= devpriv
;
774 EXPORT_SYMBOL(xrs700x_switch_alloc
);
776 static int xrs700x_alloc_port_mib(struct xrs700x
*priv
, int port
)
778 struct xrs700x_port
*p
= &priv
->ports
[port
];
780 p
->mib_data
= devm_kcalloc(priv
->dev
, ARRAY_SIZE(xrs700x_mibs
),
781 sizeof(*p
->mib_data
), GFP_KERNEL
);
785 mutex_init(&p
->mib_mutex
);
786 u64_stats_init(&p
->syncp
);
791 int xrs700x_switch_register(struct xrs700x
*priv
)
796 ret
= xrs700x_detect(priv
);
800 ret
= xrs700x_setup_regmap_range(priv
);
804 priv
->ports
= devm_kcalloc(priv
->dev
, priv
->ds
->num_ports
,
805 sizeof(*priv
->ports
), GFP_KERNEL
);
809 for (i
= 0; i
< priv
->ds
->num_ports
; i
++) {
810 ret
= xrs700x_alloc_port_mib(priv
, i
);
815 return dsa_register_switch(priv
->ds
);
817 EXPORT_SYMBOL(xrs700x_switch_register
);
819 void xrs700x_switch_remove(struct xrs700x
*priv
)
821 dsa_unregister_switch(priv
->ds
);
823 EXPORT_SYMBOL(xrs700x_switch_remove
);
825 MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
826 MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
827 MODULE_LICENSE("GPL v2");