1 // SPDX-License-Identifier: GPL-2.0
3 * DPAA2 Ethernet Switch driver
5 * Copyright 2014-2016 Freescale Semiconductor Inc.
6 * Copyright 2017-2021 NXP
10 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/msi.h>
14 #include <linux/kthread.h>
15 #include <linux/workqueue.h>
16 #include <linux/iommu.h>
18 #include <linux/fsl/mc.h>
20 #include "dpaa2-switch.h"
22 /* Minimal supported DPSW version */
23 #define DPSW_MIN_VER_MAJOR 8
24 #define DPSW_MIN_VER_MINOR 9
26 #define DEFAULT_VLAN_ID 1
28 static u16
dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv
*port_priv
)
30 return port_priv
->fdb
->fdb_id
;
33 static struct dpaa2_switch_fdb
*dpaa2_switch_fdb_get_unused(struct ethsw_core
*ethsw
)
37 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++)
38 if (!ethsw
->fdbs
[i
].in_use
)
39 return ðsw
->fdbs
[i
];
43 static u16
dpaa2_switch_port_set_fdb(struct ethsw_port_priv
*port_priv
,
44 struct net_device
*bridge_dev
)
46 struct ethsw_port_priv
*other_port_priv
= NULL
;
47 struct dpaa2_switch_fdb
*fdb
;
48 struct net_device
*other_dev
;
49 struct list_head
*iter
;
51 /* If we leave a bridge (bridge_dev is NULL), find an unused
55 fdb
= dpaa2_switch_fdb_get_unused(port_priv
->ethsw_data
);
57 /* If there is no unused FDB, we must be the last port that
58 * leaves the last bridge, all the others are standalone. We
59 * can just keep the FDB that we already have.
63 port_priv
->fdb
->bridge_dev
= NULL
;
68 port_priv
->fdb
->in_use
= true;
69 port_priv
->fdb
->bridge_dev
= NULL
;
73 /* The below call to netdev_for_each_lower_dev() demands the RTNL lock
74 * being held. Assert on it so that it's easier to catch new code
75 * paths that reach this point without the RTNL lock.
79 /* If part of a bridge, use the FDB of the first dpaa2 switch interface
80 * to be present in that bridge
82 netdev_for_each_lower_dev(bridge_dev
, other_dev
, iter
) {
83 if (!dpaa2_switch_port_dev_check(other_dev
))
86 if (other_dev
== port_priv
->netdev
)
89 other_port_priv
= netdev_priv(other_dev
);
93 /* The current port is about to change its FDB to the one used by the
94 * first port that joined the bridge.
96 if (other_port_priv
) {
97 /* The previous FDB is about to become unused, since the
98 * interface is no longer standalone.
100 port_priv
->fdb
->in_use
= false;
101 port_priv
->fdb
->bridge_dev
= NULL
;
103 /* Get a reference to the new FDB */
104 port_priv
->fdb
= other_port_priv
->fdb
;
107 /* Keep track of the new upper bridge device */
108 port_priv
->fdb
->bridge_dev
= bridge_dev
;
113 static void *dpaa2_iova_to_virt(struct iommu_domain
*domain
,
114 dma_addr_t iova_addr
)
116 phys_addr_t phys_addr
;
118 phys_addr
= domain
? iommu_iova_to_phys(domain
, iova_addr
) : iova_addr
;
120 return phys_to_virt(phys_addr
);
123 static int dpaa2_switch_add_vlan(struct ethsw_port_priv
*port_priv
, u16 vid
)
125 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
126 struct dpsw_vlan_cfg vcfg
= {0};
129 vcfg
.fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
130 err
= dpsw_vlan_add(ethsw
->mc_io
, 0,
131 ethsw
->dpsw_handle
, vid
, &vcfg
);
133 dev_err(ethsw
->dev
, "dpsw_vlan_add err %d\n", err
);
136 ethsw
->vlans
[vid
] = ETHSW_VLAN_MEMBER
;
141 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv
*port_priv
)
143 struct net_device
*netdev
= port_priv
->netdev
;
144 struct dpsw_link_state state
;
147 err
= dpsw_if_get_link_state(port_priv
->ethsw_data
->mc_io
, 0,
148 port_priv
->ethsw_data
->dpsw_handle
,
149 port_priv
->idx
, &state
);
151 netdev_err(netdev
, "dpsw_if_get_link_state() err %d\n", err
);
155 WARN_ONCE(state
.up
> 1, "Garbage read into link_state");
157 return state
.up
? true : false;
160 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv
*port_priv
, u16 pvid
)
162 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
163 struct net_device
*netdev
= port_priv
->netdev
;
164 struct dpsw_tci_cfg tci_cfg
= { 0 };
168 err
= dpsw_if_get_tci(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
169 port_priv
->idx
, &tci_cfg
);
171 netdev_err(netdev
, "dpsw_if_get_tci err %d\n", err
);
175 tci_cfg
.vlan_id
= pvid
;
177 /* Interface needs to be down to change PVID */
178 up
= dpaa2_switch_port_is_up(port_priv
);
180 err
= dpsw_if_disable(ethsw
->mc_io
, 0,
184 netdev_err(netdev
, "dpsw_if_disable err %d\n", err
);
189 err
= dpsw_if_set_tci(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
190 port_priv
->idx
, &tci_cfg
);
192 netdev_err(netdev
, "dpsw_if_set_tci err %d\n", err
);
196 /* Delete previous PVID info and mark the new one */
197 port_priv
->vlans
[port_priv
->pvid
] &= ~ETHSW_VLAN_PVID
;
198 port_priv
->vlans
[pvid
] |= ETHSW_VLAN_PVID
;
199 port_priv
->pvid
= pvid
;
203 ret
= dpsw_if_enable(ethsw
->mc_io
, 0,
207 netdev_err(netdev
, "dpsw_if_enable err %d\n", ret
);
215 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv
*port_priv
,
218 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
219 struct net_device
*netdev
= port_priv
->netdev
;
220 struct dpsw_vlan_if_cfg vcfg
= {0};
223 if (port_priv
->vlans
[vid
]) {
224 netdev_warn(netdev
, "VLAN %d already configured\n", vid
);
228 /* If hit, this VLAN rule will lead the packet into the FDB table
229 * specified in the vlan configuration below
232 vcfg
.if_id
[0] = port_priv
->idx
;
233 vcfg
.fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
234 vcfg
.options
|= DPSW_VLAN_ADD_IF_OPT_FDB_ID
;
235 err
= dpsw_vlan_add_if(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, vid
, &vcfg
);
237 netdev_err(netdev
, "dpsw_vlan_add_if err %d\n", err
);
241 port_priv
->vlans
[vid
] = ETHSW_VLAN_MEMBER
;
243 if (flags
& BRIDGE_VLAN_INFO_UNTAGGED
) {
244 err
= dpsw_vlan_add_if_untagged(ethsw
->mc_io
, 0,
249 "dpsw_vlan_add_if_untagged err %d\n", err
);
252 port_priv
->vlans
[vid
] |= ETHSW_VLAN_UNTAGGED
;
255 if (flags
& BRIDGE_VLAN_INFO_PVID
) {
256 err
= dpaa2_switch_port_set_pvid(port_priv
, vid
);
264 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv
*port_priv
, u8 state
)
266 struct dpsw_stp_cfg stp_cfg
= {
272 if (!netif_running(port_priv
->netdev
) || state
== port_priv
->stp_state
)
273 return 0; /* Nothing to do */
275 for (vid
= 0; vid
<= VLAN_VID_MASK
; vid
++) {
276 if (port_priv
->vlans
[vid
] & ETHSW_VLAN_MEMBER
) {
277 stp_cfg
.vlan_id
= vid
;
278 err
= dpsw_if_set_stp(port_priv
->ethsw_data
->mc_io
, 0,
279 port_priv
->ethsw_data
->dpsw_handle
,
280 port_priv
->idx
, &stp_cfg
);
282 netdev_err(port_priv
->netdev
,
283 "dpsw_if_set_stp err %d\n", err
);
289 port_priv
->stp_state
= state
;
294 static int dpaa2_switch_dellink(struct ethsw_core
*ethsw
, u16 vid
)
296 struct ethsw_port_priv
*ppriv_local
= NULL
;
299 if (!ethsw
->vlans
[vid
])
302 err
= dpsw_vlan_remove(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, vid
);
304 dev_err(ethsw
->dev
, "dpsw_vlan_remove err %d\n", err
);
307 ethsw
->vlans
[vid
] = 0;
309 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++) {
310 ppriv_local
= ethsw
->ports
[i
];
311 ppriv_local
->vlans
[vid
] = 0;
317 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv
*port_priv
,
318 const unsigned char *addr
)
320 struct dpsw_fdb_unicast_cfg entry
= {0};
324 entry
.if_egress
= port_priv
->idx
;
325 entry
.type
= DPSW_FDB_ENTRY_STATIC
;
326 ether_addr_copy(entry
.mac_addr
, addr
);
328 fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
329 err
= dpsw_fdb_add_unicast(port_priv
->ethsw_data
->mc_io
, 0,
330 port_priv
->ethsw_data
->dpsw_handle
,
333 netdev_err(port_priv
->netdev
,
334 "dpsw_fdb_add_unicast err %d\n", err
);
338 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv
*port_priv
,
339 const unsigned char *addr
)
341 struct dpsw_fdb_unicast_cfg entry
= {0};
345 entry
.if_egress
= port_priv
->idx
;
346 entry
.type
= DPSW_FDB_ENTRY_STATIC
;
347 ether_addr_copy(entry
.mac_addr
, addr
);
349 fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
350 err
= dpsw_fdb_remove_unicast(port_priv
->ethsw_data
->mc_io
, 0,
351 port_priv
->ethsw_data
->dpsw_handle
,
353 /* Silently discard error for calling multiple times the del command */
354 if (err
&& err
!= -ENXIO
)
355 netdev_err(port_priv
->netdev
,
356 "dpsw_fdb_remove_unicast err %d\n", err
);
360 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv
*port_priv
,
361 const unsigned char *addr
)
363 struct dpsw_fdb_multicast_cfg entry
= {0};
367 ether_addr_copy(entry
.mac_addr
, addr
);
368 entry
.type
= DPSW_FDB_ENTRY_STATIC
;
370 entry
.if_id
[0] = port_priv
->idx
;
372 fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
373 err
= dpsw_fdb_add_multicast(port_priv
->ethsw_data
->mc_io
, 0,
374 port_priv
->ethsw_data
->dpsw_handle
,
376 /* Silently discard error for calling multiple times the add command */
377 if (err
&& err
!= -ENXIO
)
378 netdev_err(port_priv
->netdev
, "dpsw_fdb_add_multicast err %d\n",
383 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv
*port_priv
,
384 const unsigned char *addr
)
386 struct dpsw_fdb_multicast_cfg entry
= {0};
390 ether_addr_copy(entry
.mac_addr
, addr
);
391 entry
.type
= DPSW_FDB_ENTRY_STATIC
;
393 entry
.if_id
[0] = port_priv
->idx
;
395 fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
396 err
= dpsw_fdb_remove_multicast(port_priv
->ethsw_data
->mc_io
, 0,
397 port_priv
->ethsw_data
->dpsw_handle
,
399 /* Silently discard error for calling multiple times the del command */
400 if (err
&& err
!= -ENAVAIL
)
401 netdev_err(port_priv
->netdev
,
402 "dpsw_fdb_remove_multicast err %d\n", err
);
406 static void dpaa2_switch_port_get_stats(struct net_device
*netdev
,
407 struct rtnl_link_stats64
*stats
)
409 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
413 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
414 port_priv
->ethsw_data
->dpsw_handle
,
416 DPSW_CNT_ING_FRAME
, &stats
->rx_packets
);
420 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
421 port_priv
->ethsw_data
->dpsw_handle
,
423 DPSW_CNT_EGR_FRAME
, &stats
->tx_packets
);
427 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
428 port_priv
->ethsw_data
->dpsw_handle
,
430 DPSW_CNT_ING_BYTE
, &stats
->rx_bytes
);
434 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
435 port_priv
->ethsw_data
->dpsw_handle
,
437 DPSW_CNT_EGR_BYTE
, &stats
->tx_bytes
);
441 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
442 port_priv
->ethsw_data
->dpsw_handle
,
444 DPSW_CNT_ING_FRAME_DISCARD
,
449 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
450 port_priv
->ethsw_data
->dpsw_handle
,
452 DPSW_CNT_ING_FLTR_FRAME
,
456 stats
->rx_dropped
+= tmp
;
458 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
459 port_priv
->ethsw_data
->dpsw_handle
,
461 DPSW_CNT_EGR_FRAME_DISCARD
,
469 netdev_err(netdev
, "dpsw_if_get_counter err %d\n", err
);
472 static bool dpaa2_switch_port_has_offload_stats(const struct net_device
*netdev
,
475 return (attr_id
== IFLA_OFFLOAD_XSTATS_CPU_HIT
);
478 static int dpaa2_switch_port_get_offload_stats(int attr_id
,
479 const struct net_device
*netdev
,
483 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
484 dpaa2_switch_port_get_stats((struct net_device
*)netdev
, sp
);
491 static int dpaa2_switch_port_change_mtu(struct net_device
*netdev
, int mtu
)
493 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
496 err
= dpsw_if_set_max_frame_length(port_priv
->ethsw_data
->mc_io
,
498 port_priv
->ethsw_data
->dpsw_handle
,
500 (u16
)ETHSW_L2_MAX_FRM(mtu
));
503 "dpsw_if_set_max_frame_length() err %d\n", err
);
511 static int dpaa2_switch_port_carrier_state_sync(struct net_device
*netdev
)
513 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
514 struct dpsw_link_state state
;
517 /* Interrupts are received even though no one issued an 'ifconfig up'
518 * on the switch interface. Ignore these link state update interrupts
520 if (!netif_running(netdev
))
523 err
= dpsw_if_get_link_state(port_priv
->ethsw_data
->mc_io
, 0,
524 port_priv
->ethsw_data
->dpsw_handle
,
525 port_priv
->idx
, &state
);
527 netdev_err(netdev
, "dpsw_if_get_link_state() err %d\n", err
);
531 WARN_ONCE(state
.up
> 1, "Garbage read into link_state");
533 if (state
.up
!= port_priv
->link_state
) {
535 netif_carrier_on(netdev
);
536 netif_tx_start_all_queues(netdev
);
538 netif_carrier_off(netdev
);
539 netif_tx_stop_all_queues(netdev
);
541 port_priv
->link_state
= state
.up
;
547 /* Manage all NAPI instances for the control interface.
549 * We only have one RX queue and one Tx Conf queue for all
550 * switch ports. Therefore, we only need to enable the NAPI instance once, the
551 * first time one of the switch ports runs .dev_open().
554 static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core
*ethsw
)
558 /* Access to the ethsw->napi_users relies on the RTNL lock */
561 /* a new interface is using the NAPI instance */
564 /* if there is already a user of the instance, return */
565 if (ethsw
->napi_users
> 1)
568 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++)
569 napi_enable(ðsw
->fq
[i
].napi
);
572 static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core
*ethsw
)
576 /* Access to the ethsw->napi_users relies on the RTNL lock */
579 /* If we are not the last interface using the NAPI, return */
581 if (ethsw
->napi_users
)
584 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++)
585 napi_disable(ðsw
->fq
[i
].napi
);
588 static int dpaa2_switch_port_open(struct net_device
*netdev
)
590 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
591 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
594 /* Explicitly set carrier off, otherwise
595 * netif_carrier_ok() will return true and cause 'ip link show'
596 * to report the LOWER_UP flag, even though the link
597 * notification wasn't even received.
599 netif_carrier_off(netdev
);
601 err
= dpsw_if_enable(port_priv
->ethsw_data
->mc_io
, 0,
602 port_priv
->ethsw_data
->dpsw_handle
,
605 netdev_err(netdev
, "dpsw_if_enable err %d\n", err
);
609 /* sync carrier state */
610 err
= dpaa2_switch_port_carrier_state_sync(netdev
);
613 "dpaa2_switch_port_carrier_state_sync err %d\n", err
);
614 goto err_carrier_sync
;
617 dpaa2_switch_enable_ctrl_if_napi(ethsw
);
622 dpsw_if_disable(port_priv
->ethsw_data
->mc_io
, 0,
623 port_priv
->ethsw_data
->dpsw_handle
,
628 static int dpaa2_switch_port_stop(struct net_device
*netdev
)
630 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
631 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
634 err
= dpsw_if_disable(port_priv
->ethsw_data
->mc_io
, 0,
635 port_priv
->ethsw_data
->dpsw_handle
,
638 netdev_err(netdev
, "dpsw_if_disable err %d\n", err
);
642 dpaa2_switch_disable_ctrl_if_napi(ethsw
);
647 static int dpaa2_switch_port_parent_id(struct net_device
*dev
,
648 struct netdev_phys_item_id
*ppid
)
650 struct ethsw_port_priv
*port_priv
= netdev_priv(dev
);
653 ppid
->id
[0] = port_priv
->ethsw_data
->dev_id
;
658 static int dpaa2_switch_port_get_phys_name(struct net_device
*netdev
, char *name
,
661 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
664 err
= snprintf(name
, len
, "p%d", port_priv
->idx
);
671 struct ethsw_dump_ctx
{
672 struct net_device
*dev
;
674 struct netlink_callback
*cb
;
678 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry
*entry
,
679 struct ethsw_dump_ctx
*dump
)
681 int is_dynamic
= entry
->type
& DPSW_FDB_ENTRY_DINAMIC
;
682 u32 portid
= NETLINK_CB(dump
->cb
->skb
).portid
;
683 u32 seq
= dump
->cb
->nlh
->nlmsg_seq
;
684 struct nlmsghdr
*nlh
;
687 if (dump
->idx
< dump
->cb
->args
[2])
690 nlh
= nlmsg_put(dump
->skb
, portid
, seq
, RTM_NEWNEIGH
,
691 sizeof(*ndm
), NLM_F_MULTI
);
695 ndm
= nlmsg_data(nlh
);
696 ndm
->ndm_family
= AF_BRIDGE
;
699 ndm
->ndm_flags
= NTF_SELF
;
701 ndm
->ndm_ifindex
= dump
->dev
->ifindex
;
702 ndm
->ndm_state
= is_dynamic
? NUD_REACHABLE
: NUD_NOARP
;
704 if (nla_put(dump
->skb
, NDA_LLADDR
, ETH_ALEN
, entry
->mac_addr
))
705 goto nla_put_failure
;
707 nlmsg_end(dump
->skb
, nlh
);
714 nlmsg_cancel(dump
->skb
, nlh
);
718 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry
*entry
,
719 struct ethsw_port_priv
*port_priv
)
721 int idx
= port_priv
->idx
;
724 if (entry
->type
& DPSW_FDB_ENTRY_TYPE_UNICAST
)
725 valid
= entry
->if_info
== port_priv
->idx
;
727 valid
= entry
->if_mask
[idx
/ 8] & BIT(idx
% 8);
732 static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv
*port_priv
,
733 dpaa2_switch_fdb_cb_t cb
, void *data
)
735 struct net_device
*net_dev
= port_priv
->netdev
;
736 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
737 struct device
*dev
= net_dev
->dev
.parent
;
738 struct fdb_dump_entry
*fdb_entries
;
739 struct fdb_dump_entry fdb_entry
;
740 dma_addr_t fdb_dump_iova
;
747 fdb_dump_size
= ethsw
->sw_attr
.max_fdb_entries
* sizeof(fdb_entry
);
748 dma_mem
= kzalloc(fdb_dump_size
, GFP_KERNEL
);
752 fdb_dump_iova
= dma_map_single(dev
, dma_mem
, fdb_dump_size
,
754 if (dma_mapping_error(dev
, fdb_dump_iova
)) {
755 netdev_err(net_dev
, "dma_map_single() failed\n");
760 fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
761 err
= dpsw_fdb_dump(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, fdb_id
,
762 fdb_dump_iova
, fdb_dump_size
, &num_fdb_entries
);
764 netdev_err(net_dev
, "dpsw_fdb_dump() = %d\n", err
);
768 dma_unmap_single(dev
, fdb_dump_iova
, fdb_dump_size
, DMA_FROM_DEVICE
);
770 fdb_entries
= (struct fdb_dump_entry
*)dma_mem
;
771 for (i
= 0; i
< num_fdb_entries
; i
++) {
772 fdb_entry
= fdb_entries
[i
];
774 err
= cb(port_priv
, &fdb_entry
, data
);
785 dma_unmap_single(dev
, fdb_dump_iova
, fdb_dump_size
, DMA_TO_DEVICE
);
791 static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv
*port_priv
,
792 struct fdb_dump_entry
*fdb_entry
,
795 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry
, port_priv
))
798 return dpaa2_switch_fdb_dump_nl(fdb_entry
, data
);
801 static int dpaa2_switch_port_fdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
,
802 struct net_device
*net_dev
,
803 struct net_device
*filter_dev
, int *idx
)
805 struct ethsw_port_priv
*port_priv
= netdev_priv(net_dev
);
806 struct ethsw_dump_ctx dump
= {
814 err
= dpaa2_switch_fdb_iterate(port_priv
, dpaa2_switch_fdb_entry_dump
, &dump
);
820 static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv
*port_priv
,
821 struct fdb_dump_entry
*fdb_entry
,
822 void *data __always_unused
)
824 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry
, port_priv
))
827 if (!(fdb_entry
->type
& DPSW_FDB_ENTRY_TYPE_DYNAMIC
))
830 if (fdb_entry
->type
& DPSW_FDB_ENTRY_TYPE_UNICAST
)
831 dpaa2_switch_port_fdb_del_uc(port_priv
, fdb_entry
->mac_addr
);
833 dpaa2_switch_port_fdb_del_mc(port_priv
, fdb_entry
->mac_addr
);
838 static void dpaa2_switch_port_fast_age(struct ethsw_port_priv
*port_priv
)
840 dpaa2_switch_fdb_iterate(port_priv
,
841 dpaa2_switch_fdb_entry_fast_age
, NULL
);
844 static int dpaa2_switch_port_vlan_add(struct net_device
*netdev
, __be16 proto
,
847 struct switchdev_obj_port_vlan vlan
= {
848 .obj
.id
= SWITCHDEV_OBJ_ID_PORT_VLAN
,
850 .obj
.orig_dev
= netdev
,
851 /* This API only allows programming tagged, non-PVID VIDs */
855 return dpaa2_switch_port_vlans_add(netdev
, &vlan
);
858 static int dpaa2_switch_port_vlan_kill(struct net_device
*netdev
, __be16 proto
,
861 struct switchdev_obj_port_vlan vlan
= {
862 .obj
.id
= SWITCHDEV_OBJ_ID_PORT_VLAN
,
864 .obj
.orig_dev
= netdev
,
865 /* This API only allows programming tagged, non-PVID VIDs */
869 return dpaa2_switch_port_vlans_del(netdev
, &vlan
);
872 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv
*port_priv
)
874 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
875 struct net_device
*net_dev
= port_priv
->netdev
;
876 struct device
*dev
= net_dev
->dev
.parent
;
877 u8 mac_addr
[ETH_ALEN
];
880 if (!(ethsw
->features
& ETHSW_FEATURE_MAC_ADDR
))
883 /* Get firmware address, if any */
884 err
= dpsw_if_get_port_mac_addr(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
885 port_priv
->idx
, mac_addr
);
887 dev_err(dev
, "dpsw_if_get_port_mac_addr() failed\n");
891 /* First check if firmware has any address configured by bootloader */
892 if (!is_zero_ether_addr(mac_addr
)) {
893 memcpy(net_dev
->dev_addr
, mac_addr
, net_dev
->addr_len
);
895 /* No MAC address configured, fill in net_dev->dev_addr
898 eth_hw_addr_random(net_dev
);
899 dev_dbg_once(dev
, "device(s) have all-zero hwaddr, replaced with random\n");
901 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
902 * practical purposes, this will be our "permanent" mac address,
903 * at least until the next reboot. This move will also permit
904 * register_netdevice() to properly fill up net_dev->perm_addr.
906 net_dev
->addr_assign_type
= NET_ADDR_PERM
;
912 static void dpaa2_switch_free_fd(const struct ethsw_core
*ethsw
,
913 const struct dpaa2_fd
*fd
)
915 struct device
*dev
= ethsw
->dev
;
916 unsigned char *buffer_start
;
917 struct sk_buff
**skbh
, *skb
;
920 fd_addr
= dpaa2_fd_get_addr(fd
);
921 skbh
= dpaa2_iova_to_virt(ethsw
->iommu_domain
, fd_addr
);
924 buffer_start
= (unsigned char *)skbh
;
926 dma_unmap_single(dev
, fd_addr
,
927 skb_tail_pointer(skb
) - buffer_start
,
930 /* Move on with skb release */
934 static int dpaa2_switch_build_single_fd(struct ethsw_core
*ethsw
,
938 struct device
*dev
= ethsw
->dev
;
939 struct sk_buff
**skbh
;
944 buff_start
= PTR_ALIGN(skb
->data
- DPAA2_SWITCH_TX_DATA_OFFSET
-
945 DPAA2_SWITCH_TX_BUF_ALIGN
,
946 DPAA2_SWITCH_TX_BUF_ALIGN
);
948 /* Clear FAS to have consistent values for TX confirmation. It is
949 * located in the first 8 bytes of the buffer's hardware annotation
952 hwa
= buff_start
+ DPAA2_SWITCH_SWA_SIZE
;
955 /* Store a backpointer to the skb at the beginning of the buffer
956 * (in the private data area) such that we can release it
959 skbh
= (struct sk_buff
**)buff_start
;
962 addr
= dma_map_single(dev
, buff_start
,
963 skb_tail_pointer(skb
) - buff_start
,
965 if (unlikely(dma_mapping_error(dev
, addr
)))
968 /* Setup the FD fields */
969 memset(fd
, 0, sizeof(*fd
));
971 dpaa2_fd_set_addr(fd
, addr
);
972 dpaa2_fd_set_offset(fd
, (u16
)(skb
->data
- buff_start
));
973 dpaa2_fd_set_len(fd
, skb
->len
);
974 dpaa2_fd_set_format(fd
, dpaa2_fd_single
);
979 static netdev_tx_t
dpaa2_switch_port_tx(struct sk_buff
*skb
,
980 struct net_device
*net_dev
)
982 struct ethsw_port_priv
*port_priv
= netdev_priv(net_dev
);
983 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
984 int retries
= DPAA2_SWITCH_SWP_BUSY_RETRIES
;
988 if (unlikely(skb_headroom(skb
) < DPAA2_SWITCH_NEEDED_HEADROOM
)) {
991 ns
= skb_realloc_headroom(skb
, DPAA2_SWITCH_NEEDED_HEADROOM
);
993 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev
->name
);
996 dev_consume_skb_any(skb
);
1000 /* We'll be holding a back-reference to the skb until Tx confirmation */
1001 skb
= skb_unshare(skb
, GFP_ATOMIC
);
1002 if (unlikely(!skb
)) {
1003 /* skb_unshare() has already freed the skb */
1004 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev
->name
);
1008 /* At this stage, we do not support non-linear skbs so just try to
1009 * linearize the skb and if that's not working, just drop the packet.
1011 err
= skb_linearize(skb
);
1013 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev
->name
, err
);
1017 err
= dpaa2_switch_build_single_fd(ethsw
, skb
, &fd
);
1018 if (unlikely(err
)) {
1019 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev
->name
, err
);
1024 err
= dpaa2_io_service_enqueue_qd(NULL
,
1028 } while (err
== -EBUSY
&& retries
);
1030 if (unlikely(err
< 0)) {
1031 dpaa2_switch_free_fd(ethsw
, &fd
);
1035 return NETDEV_TX_OK
;
1040 return NETDEV_TX_OK
;
1043 static const struct net_device_ops dpaa2_switch_port_ops
= {
1044 .ndo_open
= dpaa2_switch_port_open
,
1045 .ndo_stop
= dpaa2_switch_port_stop
,
1047 .ndo_set_mac_address
= eth_mac_addr
,
1048 .ndo_get_stats64
= dpaa2_switch_port_get_stats
,
1049 .ndo_change_mtu
= dpaa2_switch_port_change_mtu
,
1050 .ndo_has_offload_stats
= dpaa2_switch_port_has_offload_stats
,
1051 .ndo_get_offload_stats
= dpaa2_switch_port_get_offload_stats
,
1052 .ndo_fdb_dump
= dpaa2_switch_port_fdb_dump
,
1053 .ndo_vlan_rx_add_vid
= dpaa2_switch_port_vlan_add
,
1054 .ndo_vlan_rx_kill_vid
= dpaa2_switch_port_vlan_kill
,
1056 .ndo_start_xmit
= dpaa2_switch_port_tx
,
1057 .ndo_get_port_parent_id
= dpaa2_switch_port_parent_id
,
1058 .ndo_get_phys_port_name
= dpaa2_switch_port_get_phys_name
,
1061 bool dpaa2_switch_port_dev_check(const struct net_device
*netdev
)
1063 return netdev
->netdev_ops
== &dpaa2_switch_port_ops
;
1066 static void dpaa2_switch_links_state_update(struct ethsw_core
*ethsw
)
1070 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++) {
1071 dpaa2_switch_port_carrier_state_sync(ethsw
->ports
[i
]->netdev
);
1072 dpaa2_switch_port_set_mac_addr(ethsw
->ports
[i
]);
1076 static irqreturn_t
dpaa2_switch_irq0_handler_thread(int irq_num
, void *arg
)
1078 struct device
*dev
= (struct device
*)arg
;
1079 struct ethsw_core
*ethsw
= dev_get_drvdata(dev
);
1081 /* Mask the events and the if_id reserved bits to be cleared on read */
1082 u32 status
= DPSW_IRQ_EVENT_LINK_CHANGED
| 0xFFFF0000;
1085 err
= dpsw_get_irq_status(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1086 DPSW_IRQ_INDEX_IF
, &status
);
1088 dev_err(dev
, "Can't get irq status (err %d)\n", err
);
1090 err
= dpsw_clear_irq_status(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1091 DPSW_IRQ_INDEX_IF
, 0xFFFFFFFF);
1093 dev_err(dev
, "Can't clear irq status (err %d)\n", err
);
1097 if (status
& DPSW_IRQ_EVENT_LINK_CHANGED
)
1098 dpaa2_switch_links_state_update(ethsw
);
1104 static int dpaa2_switch_setup_irqs(struct fsl_mc_device
*sw_dev
)
1106 struct device
*dev
= &sw_dev
->dev
;
1107 struct ethsw_core
*ethsw
= dev_get_drvdata(dev
);
1108 u32 mask
= DPSW_IRQ_EVENT_LINK_CHANGED
;
1109 struct fsl_mc_device_irq
*irq
;
1112 err
= fsl_mc_allocate_irqs(sw_dev
);
1114 dev_err(dev
, "MC irqs allocation failed\n");
1118 if (WARN_ON(sw_dev
->obj_desc
.irq_count
!= DPSW_IRQ_NUM
)) {
1123 err
= dpsw_set_irq_enable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1124 DPSW_IRQ_INDEX_IF
, 0);
1126 dev_err(dev
, "dpsw_set_irq_enable err %d\n", err
);
1130 irq
= sw_dev
->irqs
[DPSW_IRQ_INDEX_IF
];
1132 err
= devm_request_threaded_irq(dev
, irq
->msi_desc
->irq
,
1134 dpaa2_switch_irq0_handler_thread
,
1135 IRQF_NO_SUSPEND
| IRQF_ONESHOT
,
1136 dev_name(dev
), dev
);
1138 dev_err(dev
, "devm_request_threaded_irq(): %d\n", err
);
1142 err
= dpsw_set_irq_mask(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1143 DPSW_IRQ_INDEX_IF
, mask
);
1145 dev_err(dev
, "dpsw_set_irq_mask(): %d\n", err
);
1149 err
= dpsw_set_irq_enable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1150 DPSW_IRQ_INDEX_IF
, 1);
1152 dev_err(dev
, "dpsw_set_irq_enable(): %d\n", err
);
1159 devm_free_irq(dev
, irq
->msi_desc
->irq
, dev
);
1161 fsl_mc_free_irqs(sw_dev
);
1165 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device
*sw_dev
)
1167 struct device
*dev
= &sw_dev
->dev
;
1168 struct ethsw_core
*ethsw
= dev_get_drvdata(dev
);
1171 err
= dpsw_set_irq_enable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1172 DPSW_IRQ_INDEX_IF
, 0);
1174 dev_err(dev
, "dpsw_set_irq_enable err %d\n", err
);
1176 fsl_mc_free_irqs(sw_dev
);
1179 static int dpaa2_switch_port_attr_stp_state_set(struct net_device
*netdev
,
1182 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1184 return dpaa2_switch_port_set_stp_state(port_priv
, state
);
1187 static int dpaa2_switch_port_attr_set(struct net_device
*netdev
,
1188 const struct switchdev_attr
*attr
,
1189 struct netlink_ext_ack
*extack
)
1194 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
1195 err
= dpaa2_switch_port_attr_stp_state_set(netdev
,
1198 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
1199 if (!attr
->u
.vlan_filtering
) {
1200 NL_SET_ERR_MSG_MOD(extack
,
1201 "The DPAA2 switch does not support VLAN-unaware operation");
1213 int dpaa2_switch_port_vlans_add(struct net_device
*netdev
,
1214 const struct switchdev_obj_port_vlan
*vlan
)
1216 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1217 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1218 struct dpsw_attr
*attr
= ðsw
->sw_attr
;
1221 /* Make sure that the VLAN is not already configured
1222 * on the switch port
1224 if (port_priv
->vlans
[vlan
->vid
] & ETHSW_VLAN_MEMBER
)
1227 /* Check if there is space for a new VLAN */
1228 err
= dpsw_get_attributes(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1231 netdev_err(netdev
, "dpsw_get_attributes err %d\n", err
);
1234 if (attr
->max_vlans
- attr
->num_vlans
< 1)
1237 /* Check if there is space for a new VLAN */
1238 err
= dpsw_get_attributes(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1241 netdev_err(netdev
, "dpsw_get_attributes err %d\n", err
);
1244 if (attr
->max_vlans
- attr
->num_vlans
< 1)
1247 if (!port_priv
->ethsw_data
->vlans
[vlan
->vid
]) {
1248 /* this is a new VLAN */
1249 err
= dpaa2_switch_add_vlan(port_priv
, vlan
->vid
);
1253 port_priv
->ethsw_data
->vlans
[vlan
->vid
] |= ETHSW_VLAN_GLOBAL
;
1256 return dpaa2_switch_port_add_vlan(port_priv
, vlan
->vid
, vlan
->flags
);
1259 static int dpaa2_switch_port_lookup_address(struct net_device
*netdev
, int is_uc
,
1260 const unsigned char *addr
)
1262 struct netdev_hw_addr_list
*list
= (is_uc
) ? &netdev
->uc
: &netdev
->mc
;
1263 struct netdev_hw_addr
*ha
;
1265 netif_addr_lock_bh(netdev
);
1266 list_for_each_entry(ha
, &list
->list
, list
) {
1267 if (ether_addr_equal(ha
->addr
, addr
)) {
1268 netif_addr_unlock_bh(netdev
);
1272 netif_addr_unlock_bh(netdev
);
1276 static int dpaa2_switch_port_mdb_add(struct net_device
*netdev
,
1277 const struct switchdev_obj_port_mdb
*mdb
)
1279 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1282 /* Check if address is already set on this port */
1283 if (dpaa2_switch_port_lookup_address(netdev
, 0, mdb
->addr
))
1286 err
= dpaa2_switch_port_fdb_add_mc(port_priv
, mdb
->addr
);
1290 err
= dev_mc_add(netdev
, mdb
->addr
);
1292 netdev_err(netdev
, "dev_mc_add err %d\n", err
);
1293 dpaa2_switch_port_fdb_del_mc(port_priv
, mdb
->addr
);
1299 static int dpaa2_switch_port_obj_add(struct net_device
*netdev
,
1300 const struct switchdev_obj
*obj
)
1305 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1306 err
= dpaa2_switch_port_vlans_add(netdev
,
1307 SWITCHDEV_OBJ_PORT_VLAN(obj
));
1309 case SWITCHDEV_OBJ_ID_PORT_MDB
:
1310 err
= dpaa2_switch_port_mdb_add(netdev
,
1311 SWITCHDEV_OBJ_PORT_MDB(obj
));
1321 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv
*port_priv
, u16 vid
)
1323 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1324 struct net_device
*netdev
= port_priv
->netdev
;
1325 struct dpsw_vlan_if_cfg vcfg
;
1328 if (!port_priv
->vlans
[vid
])
1331 if (port_priv
->vlans
[vid
] & ETHSW_VLAN_PVID
) {
1332 /* If we are deleting the PVID of a port, use VLAN 4095 instead
1333 * as we are sure that neither the bridge nor the 8021q module
1336 err
= dpaa2_switch_port_set_pvid(port_priv
, 4095);
1342 vcfg
.if_id
[0] = port_priv
->idx
;
1343 if (port_priv
->vlans
[vid
] & ETHSW_VLAN_UNTAGGED
) {
1344 err
= dpsw_vlan_remove_if_untagged(ethsw
->mc_io
, 0,
1349 "dpsw_vlan_remove_if_untagged err %d\n",
1352 port_priv
->vlans
[vid
] &= ~ETHSW_VLAN_UNTAGGED
;
1355 if (port_priv
->vlans
[vid
] & ETHSW_VLAN_MEMBER
) {
1356 err
= dpsw_vlan_remove_if(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1360 "dpsw_vlan_remove_if err %d\n", err
);
1363 port_priv
->vlans
[vid
] &= ~ETHSW_VLAN_MEMBER
;
1365 /* Delete VLAN from switch if it is no longer configured on
1368 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++)
1369 if (ethsw
->ports
[i
]->vlans
[vid
] & ETHSW_VLAN_MEMBER
)
1370 return 0; /* Found a port member in VID */
1372 ethsw
->vlans
[vid
] &= ~ETHSW_VLAN_GLOBAL
;
1374 err
= dpaa2_switch_dellink(ethsw
, vid
);
1382 int dpaa2_switch_port_vlans_del(struct net_device
*netdev
,
1383 const struct switchdev_obj_port_vlan
*vlan
)
1385 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1387 if (netif_is_bridge_master(vlan
->obj
.orig_dev
))
1390 return dpaa2_switch_port_del_vlan(port_priv
, vlan
->vid
);
1393 static int dpaa2_switch_port_mdb_del(struct net_device
*netdev
,
1394 const struct switchdev_obj_port_mdb
*mdb
)
1396 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1399 if (!dpaa2_switch_port_lookup_address(netdev
, 0, mdb
->addr
))
1402 err
= dpaa2_switch_port_fdb_del_mc(port_priv
, mdb
->addr
);
1406 err
= dev_mc_del(netdev
, mdb
->addr
);
1408 netdev_err(netdev
, "dev_mc_del err %d\n", err
);
1415 static int dpaa2_switch_port_obj_del(struct net_device
*netdev
,
1416 const struct switchdev_obj
*obj
)
1421 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1422 err
= dpaa2_switch_port_vlans_del(netdev
, SWITCHDEV_OBJ_PORT_VLAN(obj
));
1424 case SWITCHDEV_OBJ_ID_PORT_MDB
:
1425 err
= dpaa2_switch_port_mdb_del(netdev
, SWITCHDEV_OBJ_PORT_MDB(obj
));
1434 static int dpaa2_switch_port_attr_set_event(struct net_device
*netdev
,
1435 struct switchdev_notifier_port_attr_info
*ptr
)
1439 err
= switchdev_handle_port_attr_set(netdev
, ptr
,
1440 dpaa2_switch_port_dev_check
,
1441 dpaa2_switch_port_attr_set
);
1442 return notifier_from_errno(err
);
1445 static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core
*ethsw
, u16 fdb_id
)
1447 struct dpsw_egress_flood_cfg flood_cfg
;
1451 /* Add all the DPAA2 switch ports found in the same bridging domain to
1452 * the egress flooding domain
1454 for (j
= 0; j
< ethsw
->sw_attr
.num_ifs
; j
++)
1455 if (ethsw
->ports
[j
] && ethsw
->ports
[j
]->fdb
->fdb_id
== fdb_id
)
1456 flood_cfg
.if_id
[i
++] = ethsw
->ports
[j
]->idx
;
1458 /* Add the CTRL interface to the egress flooding domain */
1459 flood_cfg
.if_id
[i
++] = ethsw
->sw_attr
.num_ifs
;
1461 /* Use the FDB of the first dpaa2 switch port added to the bridge */
1462 flood_cfg
.fdb_id
= fdb_id
;
1464 /* Setup broadcast flooding domain */
1465 flood_cfg
.flood_type
= DPSW_BROADCAST
;
1466 flood_cfg
.num_ifs
= i
;
1467 err
= dpsw_set_egress_flood(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1470 dev_err(ethsw
->dev
, "dpsw_set_egress_flood() = %d\n", err
);
1474 /* Setup unknown flooding domain */
1475 flood_cfg
.flood_type
= DPSW_FLOODING
;
1476 flood_cfg
.num_ifs
= i
;
1477 err
= dpsw_set_egress_flood(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1480 dev_err(ethsw
->dev
, "dpsw_set_egress_flood() = %d\n", err
);
1487 static int dpaa2_switch_port_bridge_join(struct net_device
*netdev
,
1488 struct net_device
*upper_dev
)
1490 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1491 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1492 struct ethsw_port_priv
*other_port_priv
;
1493 struct net_device
*other_dev
;
1494 struct list_head
*iter
;
1497 netdev_for_each_lower_dev(upper_dev
, other_dev
, iter
) {
1498 if (!dpaa2_switch_port_dev_check(other_dev
))
1501 other_port_priv
= netdev_priv(other_dev
);
1502 if (other_port_priv
->ethsw_data
!= port_priv
->ethsw_data
) {
1504 "Interface from a different DPSW is in the bridge already!\n");
1509 /* Delete the previously manually installed VLAN 1 */
1510 err
= dpaa2_switch_port_del_vlan(port_priv
, 1);
1514 dpaa2_switch_port_set_fdb(port_priv
, upper_dev
);
1516 /* Setup the egress flood policy (broadcast, unknown unicast) */
1517 err
= dpaa2_switch_fdb_set_egress_flood(ethsw
, port_priv
->fdb
->fdb_id
);
1519 goto err_egress_flood
;
1524 dpaa2_switch_port_set_fdb(port_priv
, NULL
);
1528 static int dpaa2_switch_port_clear_rxvlan(struct net_device
*vdev
, int vid
, void *arg
)
1530 __be16 vlan_proto
= htons(ETH_P_8021Q
);
1533 vlan_proto
= vlan_dev_vlan_proto(vdev
);
1535 return dpaa2_switch_port_vlan_kill(arg
, vlan_proto
, vid
);
1538 static int dpaa2_switch_port_restore_rxvlan(struct net_device
*vdev
, int vid
, void *arg
)
1540 __be16 vlan_proto
= htons(ETH_P_8021Q
);
1543 vlan_proto
= vlan_dev_vlan_proto(vdev
);
1545 return dpaa2_switch_port_vlan_add(arg
, vlan_proto
, vid
);
1548 static int dpaa2_switch_port_bridge_leave(struct net_device
*netdev
)
1550 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1551 struct dpaa2_switch_fdb
*old_fdb
= port_priv
->fdb
;
1552 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1555 /* First of all, fast age any learn FDB addresses on this switch port */
1556 dpaa2_switch_port_fast_age(port_priv
);
1558 /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN
1559 * upper devices or otherwise from the FDB table that we are about to
1562 err
= vlan_for_each(netdev
, dpaa2_switch_port_clear_rxvlan
, netdev
);
1564 netdev_err(netdev
, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err
);
1566 dpaa2_switch_port_set_fdb(port_priv
, NULL
);
1568 /* Restore all RX VLANs into the new FDB table that we just joined */
1569 err
= vlan_for_each(netdev
, dpaa2_switch_port_restore_rxvlan
, netdev
);
1571 netdev_err(netdev
, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err
);
1573 /* Setup the egress flood policy (broadcast, unknown unicast).
1574 * When the port is not under a bridge, only the CTRL interface is part
1575 * of the flooding domain besides the actual port
1577 err
= dpaa2_switch_fdb_set_egress_flood(ethsw
, port_priv
->fdb
->fdb_id
);
1581 /* Recreate the egress flood domain of the FDB that we just left */
1582 err
= dpaa2_switch_fdb_set_egress_flood(ethsw
, old_fdb
->fdb_id
);
1586 /* Add the VLAN 1 as PVID when not under a bridge. We need this since
1587 * the dpaa2 switch interfaces are not capable to be VLAN unaware
1589 return dpaa2_switch_port_add_vlan(port_priv
, DEFAULT_VLAN_ID
,
1590 BRIDGE_VLAN_INFO_UNTAGGED
| BRIDGE_VLAN_INFO_PVID
);
1593 static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device
*netdev
)
1595 struct net_device
*upper_dev
;
1596 struct list_head
*iter
;
1598 /* RCU read lock not necessary because we have write-side protection
1599 * (rtnl_mutex), however a non-rcu iterator does not exist.
1601 netdev_for_each_upper_dev_rcu(netdev
, upper_dev
, iter
)
1602 if (is_vlan_dev(upper_dev
))
1608 static int dpaa2_switch_port_netdevice_event(struct notifier_block
*nb
,
1609 unsigned long event
, void *ptr
)
1611 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
1612 struct netdev_notifier_changeupper_info
*info
= ptr
;
1613 struct netlink_ext_ack
*extack
;
1614 struct net_device
*upper_dev
;
1617 if (!dpaa2_switch_port_dev_check(netdev
))
1620 extack
= netdev_notifier_info_to_extack(&info
->info
);
1623 case NETDEV_PRECHANGEUPPER
:
1624 upper_dev
= info
->upper_dev
;
1625 if (!netif_is_bridge_master(upper_dev
))
1628 if (!br_vlan_enabled(upper_dev
)) {
1629 NL_SET_ERR_MSG_MOD(extack
, "Cannot join a VLAN-unaware bridge");
1634 err
= dpaa2_switch_prevent_bridging_with_8021q_upper(netdev
);
1636 NL_SET_ERR_MSG_MOD(extack
,
1637 "Cannot join a bridge while VLAN uppers are present");
1642 case NETDEV_CHANGEUPPER
:
1643 upper_dev
= info
->upper_dev
;
1644 if (netif_is_bridge_master(upper_dev
)) {
1646 err
= dpaa2_switch_port_bridge_join(netdev
, upper_dev
);
1648 err
= dpaa2_switch_port_bridge_leave(netdev
);
1654 return notifier_from_errno(err
);
1657 struct ethsw_switchdev_event_work
{
1658 struct work_struct work
;
1659 struct switchdev_notifier_fdb_info fdb_info
;
1660 struct net_device
*dev
;
1661 unsigned long event
;
1664 static void dpaa2_switch_event_work(struct work_struct
*work
)
1666 struct ethsw_switchdev_event_work
*switchdev_work
=
1667 container_of(work
, struct ethsw_switchdev_event_work
, work
);
1668 struct net_device
*dev
= switchdev_work
->dev
;
1669 struct switchdev_notifier_fdb_info
*fdb_info
;
1673 fdb_info
= &switchdev_work
->fdb_info
;
1675 switch (switchdev_work
->event
) {
1676 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
1677 if (!fdb_info
->added_by_user
)
1679 if (is_unicast_ether_addr(fdb_info
->addr
))
1680 err
= dpaa2_switch_port_fdb_add_uc(netdev_priv(dev
),
1683 err
= dpaa2_switch_port_fdb_add_mc(netdev_priv(dev
),
1687 fdb_info
->offloaded
= true;
1688 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED
, dev
,
1689 &fdb_info
->info
, NULL
);
1691 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
1692 if (!fdb_info
->added_by_user
)
1694 if (is_unicast_ether_addr(fdb_info
->addr
))
1695 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev
), fdb_info
->addr
);
1697 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev
), fdb_info
->addr
);
1702 kfree(switchdev_work
->fdb_info
.addr
);
1703 kfree(switchdev_work
);
1707 /* Called under rcu_read_lock() */
1708 static int dpaa2_switch_port_event(struct notifier_block
*nb
,
1709 unsigned long event
, void *ptr
)
1711 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
1712 struct ethsw_port_priv
*port_priv
= netdev_priv(dev
);
1713 struct ethsw_switchdev_event_work
*switchdev_work
;
1714 struct switchdev_notifier_fdb_info
*fdb_info
= ptr
;
1715 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1717 if (event
== SWITCHDEV_PORT_ATTR_SET
)
1718 return dpaa2_switch_port_attr_set_event(dev
, ptr
);
1720 if (!dpaa2_switch_port_dev_check(dev
))
1723 switchdev_work
= kzalloc(sizeof(*switchdev_work
), GFP_ATOMIC
);
1724 if (!switchdev_work
)
1727 INIT_WORK(&switchdev_work
->work
, dpaa2_switch_event_work
);
1728 switchdev_work
->dev
= dev
;
1729 switchdev_work
->event
= event
;
1732 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
1733 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
1734 memcpy(&switchdev_work
->fdb_info
, ptr
,
1735 sizeof(switchdev_work
->fdb_info
));
1736 switchdev_work
->fdb_info
.addr
= kzalloc(ETH_ALEN
, GFP_ATOMIC
);
1737 if (!switchdev_work
->fdb_info
.addr
)
1738 goto err_addr_alloc
;
1740 ether_addr_copy((u8
*)switchdev_work
->fdb_info
.addr
,
1743 /* Take a reference on the device to avoid being freed. */
1747 kfree(switchdev_work
);
1751 queue_work(ethsw
->workqueue
, &switchdev_work
->work
);
1756 kfree(switchdev_work
);
1760 static int dpaa2_switch_port_obj_event(unsigned long event
,
1761 struct net_device
*netdev
,
1762 struct switchdev_notifier_port_obj_info
*port_obj_info
)
1764 int err
= -EOPNOTSUPP
;
1766 if (!dpaa2_switch_port_dev_check(netdev
))
1770 case SWITCHDEV_PORT_OBJ_ADD
:
1771 err
= dpaa2_switch_port_obj_add(netdev
, port_obj_info
->obj
);
1773 case SWITCHDEV_PORT_OBJ_DEL
:
1774 err
= dpaa2_switch_port_obj_del(netdev
, port_obj_info
->obj
);
1778 port_obj_info
->handled
= true;
1779 return notifier_from_errno(err
);
1782 static int dpaa2_switch_port_blocking_event(struct notifier_block
*nb
,
1783 unsigned long event
, void *ptr
)
1785 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
1788 case SWITCHDEV_PORT_OBJ_ADD
:
1789 case SWITCHDEV_PORT_OBJ_DEL
:
1790 return dpaa2_switch_port_obj_event(event
, dev
, ptr
);
1791 case SWITCHDEV_PORT_ATTR_SET
:
1792 return dpaa2_switch_port_attr_set_event(dev
, ptr
);
1798 /* Build a linear skb based on a single-buffer frame descriptor */
1799 static struct sk_buff
*dpaa2_switch_build_linear_skb(struct ethsw_core
*ethsw
,
1800 const struct dpaa2_fd
*fd
)
1802 u16 fd_offset
= dpaa2_fd_get_offset(fd
);
1803 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
1804 u32 fd_length
= dpaa2_fd_get_len(fd
);
1805 struct device
*dev
= ethsw
->dev
;
1806 struct sk_buff
*skb
= NULL
;
1809 fd_vaddr
= dpaa2_iova_to_virt(ethsw
->iommu_domain
, addr
);
1810 dma_unmap_page(dev
, addr
, DPAA2_SWITCH_RX_BUF_SIZE
,
1813 skb
= build_skb(fd_vaddr
, DPAA2_SWITCH_RX_BUF_SIZE
+
1814 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)));
1815 if (unlikely(!skb
)) {
1816 dev_err(dev
, "build_skb() failed\n");
1820 skb_reserve(skb
, fd_offset
);
1821 skb_put(skb
, fd_length
);
1828 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq
*fq
,
1829 const struct dpaa2_fd
*fd
)
1831 dpaa2_switch_free_fd(fq
->ethsw
, fd
);
1834 static void dpaa2_switch_rx(struct dpaa2_switch_fq
*fq
,
1835 const struct dpaa2_fd
*fd
)
1837 struct ethsw_core
*ethsw
= fq
->ethsw
;
1838 struct ethsw_port_priv
*port_priv
;
1839 struct net_device
*netdev
;
1840 struct vlan_ethhdr
*hdr
;
1841 struct sk_buff
*skb
;
1845 /* get switch ingress interface ID */
1846 if_id
= upper_32_bits(dpaa2_fd_get_flc(fd
)) & 0x0000FFFF;
1848 if (if_id
>= ethsw
->sw_attr
.num_ifs
) {
1849 dev_err(ethsw
->dev
, "Frame received from unknown interface!\n");
1852 port_priv
= ethsw
->ports
[if_id
];
1853 netdev
= port_priv
->netdev
;
1855 /* build the SKB based on the FD received */
1856 if (dpaa2_fd_get_format(fd
) != dpaa2_fd_single
) {
1857 if (net_ratelimit()) {
1858 netdev_err(netdev
, "Received invalid frame format\n");
1863 skb
= dpaa2_switch_build_linear_skb(ethsw
, fd
);
1867 skb_reset_mac_header(skb
);
1869 /* Remove the VLAN header if the packet that we just received has a vid
1870 * equal to the port PVIDs. Since the dpaa2-switch can operate only in
1871 * VLAN-aware mode and no alterations are made on the packet when it's
1872 * redirected/mirrored to the control interface, we are sure that there
1873 * will always be a VLAN header present.
1875 hdr
= vlan_eth_hdr(skb
);
1876 vid
= ntohs(hdr
->h_vlan_TCI
) & VLAN_VID_MASK
;
1877 if (vid
== port_priv
->pvid
) {
1878 err
= __skb_vlan_pop(skb
, &vlan_tci
);
1880 dev_info(ethsw
->dev
, "__skb_vlan_pop() returned %d", err
);
1886 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1888 netif_receive_skb(skb
);
1893 dpaa2_switch_free_fd(ethsw
, fd
);
1896 static void dpaa2_switch_detect_features(struct ethsw_core
*ethsw
)
1898 ethsw
->features
= 0;
1900 if (ethsw
->major
> 8 || (ethsw
->major
== 8 && ethsw
->minor
>= 6))
1901 ethsw
->features
|= ETHSW_FEATURE_MAC_ADDR
;
1904 static int dpaa2_switch_setup_fqs(struct ethsw_core
*ethsw
)
1906 struct dpsw_ctrl_if_attr ctrl_if_attr
;
1907 struct device
*dev
= ethsw
->dev
;
1911 err
= dpsw_ctrl_if_get_attributes(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1914 dev_err(dev
, "dpsw_ctrl_if_get_attributes() = %d\n", err
);
1918 ethsw
->fq
[i
].fqid
= ctrl_if_attr
.rx_fqid
;
1919 ethsw
->fq
[i
].ethsw
= ethsw
;
1920 ethsw
->fq
[i
++].type
= DPSW_QUEUE_RX
;
1922 ethsw
->fq
[i
].fqid
= ctrl_if_attr
.tx_err_conf_fqid
;
1923 ethsw
->fq
[i
].ethsw
= ethsw
;
1924 ethsw
->fq
[i
++].type
= DPSW_QUEUE_TX_ERR_CONF
;
1929 /* Free buffers acquired from the buffer pool or which were meant to
1930 * be released in the pool
1932 static void dpaa2_switch_free_bufs(struct ethsw_core
*ethsw
, u64
*buf_array
, int count
)
1934 struct device
*dev
= ethsw
->dev
;
1938 for (i
= 0; i
< count
; i
++) {
1939 vaddr
= dpaa2_iova_to_virt(ethsw
->iommu_domain
, buf_array
[i
]);
1940 dma_unmap_page(dev
, buf_array
[i
], DPAA2_SWITCH_RX_BUF_SIZE
,
1942 free_pages((unsigned long)vaddr
, 0);
1946 /* Perform a single release command to add buffers
1947 * to the specified buffer pool
1949 static int dpaa2_switch_add_bufs(struct ethsw_core
*ethsw
, u16 bpid
)
1951 struct device
*dev
= ethsw
->dev
;
1952 u64 buf_array
[BUFS_PER_CMD
];
1959 for (i
= 0; i
< BUFS_PER_CMD
; i
++) {
1960 /* Allocate one page for each Rx buffer. WRIOP sees
1961 * the entire page except for a tailroom reserved for
1964 page
= dev_alloc_pages(0);
1966 dev_err(dev
, "buffer allocation failed\n");
1970 addr
= dma_map_page(dev
, page
, 0, DPAA2_SWITCH_RX_BUF_SIZE
,
1972 if (dma_mapping_error(dev
, addr
)) {
1973 dev_err(dev
, "dma_map_single() failed\n");
1976 buf_array
[i
] = addr
;
1980 /* In case the portal is busy, retry until successful or
1983 while ((err
= dpaa2_io_service_release(NULL
, bpid
,
1984 buf_array
, i
)) == -EBUSY
) {
1985 if (retries
++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES
)
1991 /* If release command failed, clean up and bail out. */
1993 dpaa2_switch_free_bufs(ethsw
, buf_array
, i
);
2000 __free_pages(page
, 0);
2002 /* If we managed to allocate at least some buffers,
2003 * release them to hardware
2011 static int dpaa2_switch_refill_bp(struct ethsw_core
*ethsw
)
2013 int *count
= ðsw
->buf_count
;
2017 if (unlikely(*count
< DPAA2_ETHSW_REFILL_THRESH
)) {
2019 new_count
= dpaa2_switch_add_bufs(ethsw
, ethsw
->bpid
);
2020 if (unlikely(!new_count
)) {
2021 /* Out of memory; abort for now, we'll
2026 *count
+= new_count
;
2027 } while (*count
< DPAA2_ETHSW_NUM_BUFS
);
2029 if (unlikely(*count
< DPAA2_ETHSW_NUM_BUFS
))
2036 static int dpaa2_switch_seed_bp(struct ethsw_core
*ethsw
)
2040 for (i
= 0; i
< DPAA2_ETHSW_NUM_BUFS
; i
+= BUFS_PER_CMD
) {
2041 count
= ðsw
->buf_count
;
2042 *count
+= dpaa2_switch_add_bufs(ethsw
, ethsw
->bpid
);
2044 if (unlikely(*count
< BUFS_PER_CMD
))
2051 static void dpaa2_switch_drain_bp(struct ethsw_core
*ethsw
)
2053 u64 buf_array
[BUFS_PER_CMD
];
2057 ret
= dpaa2_io_service_acquire(NULL
, ethsw
->bpid
,
2058 buf_array
, BUFS_PER_CMD
);
2061 "dpaa2_io_service_acquire() = %d\n", ret
);
2064 dpaa2_switch_free_bufs(ethsw
, buf_array
, ret
);
2069 static int dpaa2_switch_setup_dpbp(struct ethsw_core
*ethsw
)
2071 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg
= { 0 };
2072 struct device
*dev
= ethsw
->dev
;
2073 struct fsl_mc_device
*dpbp_dev
;
2074 struct dpbp_attr dpbp_attrs
;
2077 err
= fsl_mc_object_allocate(to_fsl_mc_device(dev
), FSL_MC_POOL_DPBP
,
2081 err
= -EPROBE_DEFER
;
2083 dev_err(dev
, "DPBP device allocation failed\n");
2086 ethsw
->dpbp_dev
= dpbp_dev
;
2088 err
= dpbp_open(ethsw
->mc_io
, 0, dpbp_dev
->obj_desc
.id
,
2089 &dpbp_dev
->mc_handle
);
2091 dev_err(dev
, "dpbp_open() failed\n");
2095 err
= dpbp_reset(ethsw
->mc_io
, 0, dpbp_dev
->mc_handle
);
2097 dev_err(dev
, "dpbp_reset() failed\n");
2101 err
= dpbp_enable(ethsw
->mc_io
, 0, dpbp_dev
->mc_handle
);
2103 dev_err(dev
, "dpbp_enable() failed\n");
2107 err
= dpbp_get_attributes(ethsw
->mc_io
, 0, dpbp_dev
->mc_handle
,
2110 dev_err(dev
, "dpbp_get_attributes() failed\n");
2114 dpsw_ctrl_if_pools_cfg
.num_dpbp
= 1;
2115 dpsw_ctrl_if_pools_cfg
.pools
[0].dpbp_id
= dpbp_attrs
.id
;
2116 dpsw_ctrl_if_pools_cfg
.pools
[0].buffer_size
= DPAA2_SWITCH_RX_BUF_SIZE
;
2117 dpsw_ctrl_if_pools_cfg
.pools
[0].backup_pool
= 0;
2119 err
= dpsw_ctrl_if_set_pools(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
2120 &dpsw_ctrl_if_pools_cfg
);
2122 dev_err(dev
, "dpsw_ctrl_if_set_pools() failed\n");
2125 ethsw
->bpid
= dpbp_attrs
.id
;
2130 dpbp_disable(ethsw
->mc_io
, 0, dpbp_dev
->mc_handle
);
2133 dpbp_close(ethsw
->mc_io
, 0, dpbp_dev
->mc_handle
);
2135 fsl_mc_object_free(dpbp_dev
);
2139 static void dpaa2_switch_free_dpbp(struct ethsw_core
*ethsw
)
2141 dpbp_disable(ethsw
->mc_io
, 0, ethsw
->dpbp_dev
->mc_handle
);
2142 dpbp_close(ethsw
->mc_io
, 0, ethsw
->dpbp_dev
->mc_handle
);
2143 fsl_mc_object_free(ethsw
->dpbp_dev
);
2146 static int dpaa2_switch_alloc_rings(struct ethsw_core
*ethsw
)
2150 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++) {
2151 ethsw
->fq
[i
].store
=
2152 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE
,
2154 if (!ethsw
->fq
[i
].store
) {
2155 dev_err(ethsw
->dev
, "dpaa2_io_store_create failed\n");
2157 dpaa2_io_store_destroy(ethsw
->fq
[i
].store
);
2165 static void dpaa2_switch_destroy_rings(struct ethsw_core
*ethsw
)
2169 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++)
2170 dpaa2_io_store_destroy(ethsw
->fq
[i
].store
);
2173 static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq
*fq
)
2175 int err
, retries
= 0;
2177 /* Try to pull from the FQ while the portal is busy and we didn't hit
2178 * the maximum number fo retries
2181 err
= dpaa2_io_service_pull_fq(NULL
, fq
->fqid
, fq
->store
);
2183 } while (err
== -EBUSY
&& retries
++ < DPAA2_SWITCH_SWP_BUSY_RETRIES
);
2186 dev_err(fq
->ethsw
->dev
, "dpaa2_io_service_pull err %d", err
);
2191 /* Consume all frames pull-dequeued into the store */
2192 static int dpaa2_switch_store_consume(struct dpaa2_switch_fq
*fq
)
2194 struct ethsw_core
*ethsw
= fq
->ethsw
;
2195 int cleaned
= 0, is_last
;
2196 struct dpaa2_dq
*dq
;
2200 /* Get the next available FD from the store */
2201 dq
= dpaa2_io_store_next(fq
->store
, &is_last
);
2202 if (unlikely(!dq
)) {
2203 if (retries
++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES
) {
2204 dev_err_once(ethsw
->dev
,
2205 "No valid dequeue response\n");
2211 if (fq
->type
== DPSW_QUEUE_RX
)
2212 dpaa2_switch_rx(fq
, dpaa2_dq_fd(dq
));
2214 dpaa2_switch_tx_conf(fq
, dpaa2_dq_fd(dq
));
2222 /* NAPI poll routine */
2223 static int dpaa2_switch_poll(struct napi_struct
*napi
, int budget
)
2225 int err
, cleaned
= 0, store_cleaned
, work_done
;
2226 struct dpaa2_switch_fq
*fq
;
2229 fq
= container_of(napi
, struct dpaa2_switch_fq
, napi
);
2232 err
= dpaa2_switch_pull_fq(fq
);
2236 /* Refill pool if appropriate */
2237 dpaa2_switch_refill_bp(fq
->ethsw
);
2239 store_cleaned
= dpaa2_switch_store_consume(fq
);
2240 cleaned
+= store_cleaned
;
2242 if (cleaned
>= budget
) {
2247 } while (store_cleaned
);
2249 /* We didn't consume the entire budget, so finish napi and re-enable
2250 * data availability notifications
2252 napi_complete_done(napi
, cleaned
);
2254 err
= dpaa2_io_service_rearm(NULL
, &fq
->nctx
);
2256 } while (err
== -EBUSY
&& retries
++ < DPAA2_SWITCH_SWP_BUSY_RETRIES
);
2258 work_done
= max(cleaned
, 1);
2264 static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx
*nctx
)
2266 struct dpaa2_switch_fq
*fq
;
2268 fq
= container_of(nctx
, struct dpaa2_switch_fq
, nctx
);
2270 napi_schedule(&fq
->napi
);
2273 static int dpaa2_switch_setup_dpio(struct ethsw_core
*ethsw
)
2275 struct dpsw_ctrl_if_queue_cfg queue_cfg
;
2276 struct dpaa2_io_notification_ctx
*nctx
;
2279 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++) {
2280 nctx
= ðsw
->fq
[i
].nctx
;
2282 /* Register a new software context for the FQID.
2283 * By using NULL as the first parameter, we specify that we do
2284 * not care on which cpu are interrupts received for this queue
2287 nctx
->id
= ethsw
->fq
[i
].fqid
;
2288 nctx
->desired_cpu
= DPAA2_IO_ANY_CPU
;
2289 nctx
->cb
= dpaa2_switch_fqdan_cb
;
2290 err
= dpaa2_io_service_register(NULL
, nctx
, ethsw
->dev
);
2292 err
= -EPROBE_DEFER
;
2296 queue_cfg
.options
= DPSW_CTRL_IF_QUEUE_OPT_DEST
|
2297 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX
;
2298 queue_cfg
.dest_cfg
.dest_type
= DPSW_CTRL_IF_DEST_DPIO
;
2299 queue_cfg
.dest_cfg
.dest_id
= nctx
->dpio_id
;
2300 queue_cfg
.dest_cfg
.priority
= 0;
2301 queue_cfg
.user_ctx
= nctx
->qman64
;
2303 err
= dpsw_ctrl_if_set_queue(ethsw
->mc_io
, 0,
2314 dpaa2_io_service_deregister(NULL
, nctx
, ethsw
->dev
);
2316 for (j
= 0; j
< i
; j
++)
2317 dpaa2_io_service_deregister(NULL
, ðsw
->fq
[j
].nctx
,
2323 static void dpaa2_switch_free_dpio(struct ethsw_core
*ethsw
)
2327 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++)
2328 dpaa2_io_service_deregister(NULL
, ðsw
->fq
[i
].nctx
,
2332 static int dpaa2_switch_ctrl_if_setup(struct ethsw_core
*ethsw
)
2336 /* setup FQs for Rx and Tx Conf */
2337 err
= dpaa2_switch_setup_fqs(ethsw
);
2341 /* setup the buffer pool needed on the Rx path */
2342 err
= dpaa2_switch_setup_dpbp(ethsw
);
2346 err
= dpaa2_switch_seed_bp(ethsw
);
2350 err
= dpaa2_switch_alloc_rings(ethsw
);
2352 goto err_drain_dpbp
;
2354 err
= dpaa2_switch_setup_dpio(ethsw
);
2356 goto err_destroy_rings
;
2358 err
= dpsw_ctrl_if_enable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
2360 dev_err(ethsw
->dev
, "dpsw_ctrl_if_enable err %d\n", err
);
2361 goto err_deregister_dpio
;
2366 err_deregister_dpio
:
2367 dpaa2_switch_free_dpio(ethsw
);
2369 dpaa2_switch_destroy_rings(ethsw
);
2371 dpaa2_switch_drain_bp(ethsw
);
2373 dpaa2_switch_free_dpbp(ethsw
);
2378 static int dpaa2_switch_init(struct fsl_mc_device
*sw_dev
)
2380 struct device
*dev
= &sw_dev
->dev
;
2381 struct ethsw_core
*ethsw
= dev_get_drvdata(dev
);
2382 struct dpsw_vlan_if_cfg vcfg
= {0};
2383 struct dpsw_tci_cfg tci_cfg
= {0};
2384 struct dpsw_stp_cfg stp_cfg
;
2388 ethsw
->dev_id
= sw_dev
->obj_desc
.id
;
2390 err
= dpsw_open(ethsw
->mc_io
, 0, ethsw
->dev_id
, ðsw
->dpsw_handle
);
2392 dev_err(dev
, "dpsw_open err %d\n", err
);
2396 err
= dpsw_get_attributes(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
2399 dev_err(dev
, "dpsw_get_attributes err %d\n", err
);
2403 err
= dpsw_get_api_version(ethsw
->mc_io
, 0,
2407 dev_err(dev
, "dpsw_get_api_version err %d\n", err
);
2411 /* Minimum supported DPSW version check */
2412 if (ethsw
->major
< DPSW_MIN_VER_MAJOR
||
2413 (ethsw
->major
== DPSW_MIN_VER_MAJOR
&&
2414 ethsw
->minor
< DPSW_MIN_VER_MINOR
)) {
2415 dev_err(dev
, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
2416 ethsw
->major
, ethsw
->minor
);
2421 if (!dpaa2_switch_supports_cpu_traffic(ethsw
)) {
2426 dpaa2_switch_detect_features(ethsw
);
2428 err
= dpsw_reset(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
2430 dev_err(dev
, "dpsw_reset err %d\n", err
);
2434 stp_cfg
.vlan_id
= DEFAULT_VLAN_ID
;
2435 stp_cfg
.state
= DPSW_STP_STATE_FORWARDING
;
2437 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++) {
2438 err
= dpsw_if_disable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, i
);
2440 dev_err(dev
, "dpsw_if_disable err %d\n", err
);
2444 err
= dpsw_if_set_stp(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, i
,
2447 dev_err(dev
, "dpsw_if_set_stp err %d for port %d\n",
2452 /* Switch starts with all ports configured to VLAN 1. Need to
2453 * remove this setting to allow configuration at bridge join
2457 err
= dpsw_vlan_remove_if_untagged(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
2458 DEFAULT_VLAN_ID
, &vcfg
);
2460 dev_err(dev
, "dpsw_vlan_remove_if_untagged err %d\n",
2465 tci_cfg
.vlan_id
= 4095;
2466 err
= dpsw_if_set_tci(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, i
, &tci_cfg
);
2468 dev_err(dev
, "dpsw_if_set_tci err %d\n", err
);
2472 err
= dpsw_vlan_remove_if(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
2473 DEFAULT_VLAN_ID
, &vcfg
);
2475 dev_err(dev
, "dpsw_vlan_remove_if err %d\n", err
);
2480 err
= dpsw_vlan_remove(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, DEFAULT_VLAN_ID
);
2482 dev_err(dev
, "dpsw_vlan_remove err %d\n", err
);
2486 ethsw
->workqueue
= alloc_ordered_workqueue("%s_%d_ordered",
2487 WQ_MEM_RECLAIM
, "ethsw",
2489 if (!ethsw
->workqueue
) {
2494 err
= dpsw_fdb_remove(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, 0);
2496 goto err_destroy_ordered_workqueue
;
2498 err
= dpaa2_switch_ctrl_if_setup(ethsw
);
2500 goto err_destroy_ordered_workqueue
;
2504 err_destroy_ordered_workqueue
:
2505 destroy_workqueue(ethsw
->workqueue
);
2508 dpsw_close(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
2512 static int dpaa2_switch_port_init(struct ethsw_port_priv
*port_priv
, u16 port
)
2514 struct switchdev_obj_port_vlan vlan
= {
2515 .obj
.id
= SWITCHDEV_OBJ_ID_PORT_VLAN
,
2516 .vid
= DEFAULT_VLAN_ID
,
2517 .flags
= BRIDGE_VLAN_INFO_UNTAGGED
| BRIDGE_VLAN_INFO_PVID
,
2519 struct net_device
*netdev
= port_priv
->netdev
;
2520 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
2521 struct dpsw_fdb_cfg fdb_cfg
= {0};
2522 struct dpaa2_switch_fdb
*fdb
;
2523 struct dpsw_if_attr dpsw_if_attr
;
2527 /* Get the Tx queue for this specific port */
2528 err
= dpsw_if_get_attributes(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
2529 port_priv
->idx
, &dpsw_if_attr
);
2531 netdev_err(netdev
, "dpsw_if_get_attributes err %d\n", err
);
2534 port_priv
->tx_qdid
= dpsw_if_attr
.qdid
;
2536 /* Create a FDB table for this particular switch port */
2537 fdb_cfg
.num_fdb_entries
= ethsw
->sw_attr
.max_fdb_entries
/ ethsw
->sw_attr
.num_ifs
;
2538 err
= dpsw_fdb_add(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
2541 netdev_err(netdev
, "dpsw_fdb_add err %d\n", err
);
2545 /* Find an unused dpaa2_switch_fdb structure and use it */
2546 fdb
= dpaa2_switch_fdb_get_unused(ethsw
);
2547 fdb
->fdb_id
= fdb_id
;
2549 fdb
->bridge_dev
= NULL
;
2550 port_priv
->fdb
= fdb
;
2552 /* We need to add VLAN 1 as the PVID on this port until it is under a
2553 * bridge since the DPAA2 switch is not able to handle the traffic in a
2554 * VLAN unaware fashion
2556 err
= dpaa2_switch_port_vlans_add(netdev
, &vlan
);
2560 /* Setup the egress flooding domains (broadcast, unknown unicast */
2561 err
= dpaa2_switch_fdb_set_egress_flood(ethsw
, port_priv
->fdb
->fdb_id
);
2568 static void dpaa2_switch_takedown(struct fsl_mc_device
*sw_dev
)
2570 struct device
*dev
= &sw_dev
->dev
;
2571 struct ethsw_core
*ethsw
= dev_get_drvdata(dev
);
2574 err
= dpsw_close(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
2576 dev_warn(dev
, "dpsw_close err %d\n", err
);
2579 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core
*ethsw
)
2581 dpsw_ctrl_if_disable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
2582 dpaa2_switch_free_dpio(ethsw
);
2583 dpaa2_switch_destroy_rings(ethsw
);
2584 dpaa2_switch_drain_bp(ethsw
);
2585 dpaa2_switch_free_dpbp(ethsw
);
2588 static int dpaa2_switch_remove(struct fsl_mc_device
*sw_dev
)
2590 struct ethsw_port_priv
*port_priv
;
2591 struct ethsw_core
*ethsw
;
2596 ethsw
= dev_get_drvdata(dev
);
2598 dpaa2_switch_ctrl_if_teardown(ethsw
);
2600 dpaa2_switch_teardown_irqs(sw_dev
);
2602 dpsw_disable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
2604 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++) {
2605 port_priv
= ethsw
->ports
[i
];
2606 unregister_netdev(port_priv
->netdev
);
2607 free_netdev(port_priv
->netdev
);
2611 kfree(ethsw
->ports
);
2613 dpaa2_switch_takedown(sw_dev
);
2615 destroy_workqueue(ethsw
->workqueue
);
2617 fsl_mc_portal_free(ethsw
->mc_io
);
2621 dev_set_drvdata(dev
, NULL
);
2626 static int dpaa2_switch_probe_port(struct ethsw_core
*ethsw
,
2629 struct ethsw_port_priv
*port_priv
;
2630 struct device
*dev
= ethsw
->dev
;
2631 struct net_device
*port_netdev
;
2634 port_netdev
= alloc_etherdev(sizeof(struct ethsw_port_priv
));
2636 dev_err(dev
, "alloc_etherdev error\n");
2640 port_priv
= netdev_priv(port_netdev
);
2641 port_priv
->netdev
= port_netdev
;
2642 port_priv
->ethsw_data
= ethsw
;
2644 port_priv
->idx
= port_idx
;
2645 port_priv
->stp_state
= BR_STATE_FORWARDING
;
2647 SET_NETDEV_DEV(port_netdev
, dev
);
2648 port_netdev
->netdev_ops
= &dpaa2_switch_port_ops
;
2649 port_netdev
->ethtool_ops
= &dpaa2_switch_port_ethtool_ops
;
2651 port_netdev
->needed_headroom
= DPAA2_SWITCH_NEEDED_HEADROOM
;
2653 /* Set MTU limits */
2654 port_netdev
->min_mtu
= ETH_MIN_MTU
;
2655 port_netdev
->max_mtu
= ETHSW_MAX_FRAME_LENGTH
;
2657 /* Populate the private port structure so that later calls to
2658 * dpaa2_switch_port_init() can use it.
2660 ethsw
->ports
[port_idx
] = port_priv
;
2662 /* The DPAA2 switch's ingress path depends on the VLAN table,
2663 * thus we are not able to disable VLAN filtering.
2665 port_netdev
->features
= NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_VLAN_STAG_FILTER
;
2667 err
= dpaa2_switch_port_init(port_priv
, port_idx
);
2669 goto err_port_probe
;
2671 err
= dpaa2_switch_port_set_mac_addr(port_priv
);
2673 goto err_port_probe
;
2678 free_netdev(port_netdev
);
2679 ethsw
->ports
[port_idx
] = NULL
;
2684 static int dpaa2_switch_probe(struct fsl_mc_device
*sw_dev
)
2686 struct device
*dev
= &sw_dev
->dev
;
2687 struct ethsw_core
*ethsw
;
2690 /* Allocate switch core*/
2691 ethsw
= kzalloc(sizeof(*ethsw
), GFP_KERNEL
);
2697 ethsw
->iommu_domain
= iommu_get_domain_for_dev(dev
);
2698 dev_set_drvdata(dev
, ethsw
);
2700 err
= fsl_mc_portal_allocate(sw_dev
, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL
,
2704 err
= -EPROBE_DEFER
;
2706 dev_err(dev
, "fsl_mc_portal_allocate err %d\n", err
);
2707 goto err_free_drvdata
;
2710 err
= dpaa2_switch_init(sw_dev
);
2712 goto err_free_cmdport
;
2714 ethsw
->ports
= kcalloc(ethsw
->sw_attr
.num_ifs
, sizeof(*ethsw
->ports
),
2716 if (!(ethsw
->ports
)) {
2721 ethsw
->fdbs
= kcalloc(ethsw
->sw_attr
.num_ifs
, sizeof(*ethsw
->fdbs
),
2725 goto err_free_ports
;
2728 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++) {
2729 err
= dpaa2_switch_probe_port(ethsw
, i
);
2731 goto err_free_netdev
;
2734 /* Add a NAPI instance for each of the Rx queues. The first port's
2735 * net_device will be associated with the instances since we do not have
2736 * different queues for each switch ports.
2738 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++)
2739 netif_napi_add(ethsw
->ports
[0]->netdev
,
2740 ðsw
->fq
[i
].napi
, dpaa2_switch_poll
,
2743 err
= dpsw_enable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
2745 dev_err(ethsw
->dev
, "dpsw_enable err %d\n", err
);
2746 goto err_free_netdev
;
2750 err
= dpaa2_switch_setup_irqs(sw_dev
);
2754 /* Register the netdev only when the entire setup is done and the
2755 * switch port interfaces are ready to receive traffic
2757 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++) {
2758 err
= register_netdev(ethsw
->ports
[i
]->netdev
);
2760 dev_err(dev
, "register_netdev error %d\n", err
);
2761 goto err_unregister_ports
;
2767 err_unregister_ports
:
2768 for (i
--; i
>= 0; i
--)
2769 unregister_netdev(ethsw
->ports
[i
]->netdev
);
2770 dpaa2_switch_teardown_irqs(sw_dev
);
2772 dpsw_disable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
2774 for (i
--; i
>= 0; i
--)
2775 free_netdev(ethsw
->ports
[i
]->netdev
);
2778 kfree(ethsw
->ports
);
2781 dpaa2_switch_takedown(sw_dev
);
2784 fsl_mc_portal_free(ethsw
->mc_io
);
2788 dev_set_drvdata(dev
, NULL
);
2793 static const struct fsl_mc_device_id dpaa2_switch_match_id_table
[] = {
2795 .vendor
= FSL_MC_VENDOR_FREESCALE
,
2800 MODULE_DEVICE_TABLE(fslmc
, dpaa2_switch_match_id_table
);
2802 static struct fsl_mc_driver dpaa2_switch_drv
= {
2804 .name
= KBUILD_MODNAME
,
2805 .owner
= THIS_MODULE
,
2807 .probe
= dpaa2_switch_probe
,
2808 .remove
= dpaa2_switch_remove
,
2809 .match_id_table
= dpaa2_switch_match_id_table
2812 static struct notifier_block dpaa2_switch_port_nb __read_mostly
= {
2813 .notifier_call
= dpaa2_switch_port_netdevice_event
,
2816 static struct notifier_block dpaa2_switch_port_switchdev_nb
= {
2817 .notifier_call
= dpaa2_switch_port_event
,
2820 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb
= {
2821 .notifier_call
= dpaa2_switch_port_blocking_event
,
2824 static int dpaa2_switch_register_notifiers(void)
2828 err
= register_netdevice_notifier(&dpaa2_switch_port_nb
);
2830 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err
);
2834 err
= register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb
);
2836 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err
);
2837 goto err_switchdev_nb
;
2840 err
= register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb
);
2842 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err
);
2843 goto err_switchdev_blocking_nb
;
2848 err_switchdev_blocking_nb
:
2849 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb
);
2851 unregister_netdevice_notifier(&dpaa2_switch_port_nb
);
2856 static void dpaa2_switch_unregister_notifiers(void)
2860 err
= unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb
);
2862 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
2865 err
= unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb
);
2867 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err
);
2869 err
= unregister_netdevice_notifier(&dpaa2_switch_port_nb
);
2871 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err
);
2874 static int __init
dpaa2_switch_driver_init(void)
2878 err
= fsl_mc_driver_register(&dpaa2_switch_drv
);
2882 err
= dpaa2_switch_register_notifiers();
2884 fsl_mc_driver_unregister(&dpaa2_switch_drv
);
2891 static void __exit
dpaa2_switch_driver_exit(void)
2893 dpaa2_switch_unregister_notifiers();
2894 fsl_mc_driver_unregister(&dpaa2_switch_drv
);
2897 module_init(dpaa2_switch_driver_init
);
2898 module_exit(dpaa2_switch_driver_exit
);
2900 MODULE_LICENSE("GPL v2");
2901 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");