1 // SPDX-License-Identifier: GPL-2.0
3 * DPAA2 Ethernet Switch driver
5 * Copyright 2014-2016 Freescale Semiconductor Inc.
6 * Copyright 2017-2021 NXP
10 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/msi.h>
14 #include <linux/kthread.h>
15 #include <linux/workqueue.h>
16 #include <linux/iommu.h>
17 #include <net/pkt_cls.h>
19 #include <linux/fsl/mc.h>
21 #include "dpaa2-switch.h"
23 /* Minimal supported DPSW version */
24 #define DPSW_MIN_VER_MAJOR 8
25 #define DPSW_MIN_VER_MINOR 9
27 #define DEFAULT_VLAN_ID 1
29 static u16
dpaa2_switch_port_get_fdb_id(struct ethsw_port_priv
*port_priv
)
31 return port_priv
->fdb
->fdb_id
;
34 static struct dpaa2_switch_fdb
*dpaa2_switch_fdb_get_unused(struct ethsw_core
*ethsw
)
38 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++)
39 if (!ethsw
->fdbs
[i
].in_use
)
40 return ðsw
->fdbs
[i
];
44 static struct dpaa2_switch_filter_block
*
45 dpaa2_switch_filter_block_get_unused(struct ethsw_core
*ethsw
)
49 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++)
50 if (!ethsw
->filter_blocks
[i
].in_use
)
51 return ðsw
->filter_blocks
[i
];
55 static u16
dpaa2_switch_port_set_fdb(struct ethsw_port_priv
*port_priv
,
56 struct net_device
*bridge_dev
)
58 struct ethsw_port_priv
*other_port_priv
= NULL
;
59 struct dpaa2_switch_fdb
*fdb
;
60 struct net_device
*other_dev
;
61 struct list_head
*iter
;
63 /* If we leave a bridge (bridge_dev is NULL), find an unused
67 fdb
= dpaa2_switch_fdb_get_unused(port_priv
->ethsw_data
);
69 /* If there is no unused FDB, we must be the last port that
70 * leaves the last bridge, all the others are standalone. We
71 * can just keep the FDB that we already have.
75 port_priv
->fdb
->bridge_dev
= NULL
;
80 port_priv
->fdb
->in_use
= true;
81 port_priv
->fdb
->bridge_dev
= NULL
;
85 /* The below call to netdev_for_each_lower_dev() demands the RTNL lock
86 * being held. Assert on it so that it's easier to catch new code
87 * paths that reach this point without the RTNL lock.
91 /* If part of a bridge, use the FDB of the first dpaa2 switch interface
92 * to be present in that bridge
94 netdev_for_each_lower_dev(bridge_dev
, other_dev
, iter
) {
95 if (!dpaa2_switch_port_dev_check(other_dev
))
98 if (other_dev
== port_priv
->netdev
)
101 other_port_priv
= netdev_priv(other_dev
);
105 /* The current port is about to change its FDB to the one used by the
106 * first port that joined the bridge.
108 if (other_port_priv
) {
109 /* The previous FDB is about to become unused, since the
110 * interface is no longer standalone.
112 port_priv
->fdb
->in_use
= false;
113 port_priv
->fdb
->bridge_dev
= NULL
;
115 /* Get a reference to the new FDB */
116 port_priv
->fdb
= other_port_priv
->fdb
;
119 /* Keep track of the new upper bridge device */
120 port_priv
->fdb
->bridge_dev
= bridge_dev
;
125 static void dpaa2_switch_fdb_get_flood_cfg(struct ethsw_core
*ethsw
, u16 fdb_id
,
126 enum dpsw_flood_type type
,
127 struct dpsw_egress_flood_cfg
*cfg
)
131 memset(cfg
, 0, sizeof(*cfg
));
133 /* Add all the DPAA2 switch ports found in the same bridging domain to
134 * the egress flooding domain
136 for (j
= 0; j
< ethsw
->sw_attr
.num_ifs
; j
++) {
137 if (!ethsw
->ports
[j
])
139 if (ethsw
->ports
[j
]->fdb
->fdb_id
!= fdb_id
)
142 if (type
== DPSW_BROADCAST
&& ethsw
->ports
[j
]->bcast_flood
)
143 cfg
->if_id
[i
++] = ethsw
->ports
[j
]->idx
;
144 else if (type
== DPSW_FLOODING
&& ethsw
->ports
[j
]->ucast_flood
)
145 cfg
->if_id
[i
++] = ethsw
->ports
[j
]->idx
;
148 /* Add the CTRL interface to the egress flooding domain */
149 cfg
->if_id
[i
++] = ethsw
->sw_attr
.num_ifs
;
151 cfg
->fdb_id
= fdb_id
;
152 cfg
->flood_type
= type
;
156 static int dpaa2_switch_fdb_set_egress_flood(struct ethsw_core
*ethsw
, u16 fdb_id
)
158 struct dpsw_egress_flood_cfg flood_cfg
;
161 /* Setup broadcast flooding domain */
162 dpaa2_switch_fdb_get_flood_cfg(ethsw
, fdb_id
, DPSW_BROADCAST
, &flood_cfg
);
163 err
= dpsw_set_egress_flood(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
166 dev_err(ethsw
->dev
, "dpsw_set_egress_flood() = %d\n", err
);
170 /* Setup unknown flooding domain */
171 dpaa2_switch_fdb_get_flood_cfg(ethsw
, fdb_id
, DPSW_FLOODING
, &flood_cfg
);
172 err
= dpsw_set_egress_flood(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
175 dev_err(ethsw
->dev
, "dpsw_set_egress_flood() = %d\n", err
);
182 static void *dpaa2_iova_to_virt(struct iommu_domain
*domain
,
183 dma_addr_t iova_addr
)
185 phys_addr_t phys_addr
;
187 phys_addr
= domain
? iommu_iova_to_phys(domain
, iova_addr
) : iova_addr
;
189 return phys_to_virt(phys_addr
);
192 static int dpaa2_switch_add_vlan(struct ethsw_port_priv
*port_priv
, u16 vid
)
194 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
195 struct dpsw_vlan_cfg vcfg
= {0};
198 vcfg
.fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
199 err
= dpsw_vlan_add(ethsw
->mc_io
, 0,
200 ethsw
->dpsw_handle
, vid
, &vcfg
);
202 dev_err(ethsw
->dev
, "dpsw_vlan_add err %d\n", err
);
205 ethsw
->vlans
[vid
] = ETHSW_VLAN_MEMBER
;
210 static bool dpaa2_switch_port_is_up(struct ethsw_port_priv
*port_priv
)
212 struct net_device
*netdev
= port_priv
->netdev
;
213 struct dpsw_link_state state
;
216 err
= dpsw_if_get_link_state(port_priv
->ethsw_data
->mc_io
, 0,
217 port_priv
->ethsw_data
->dpsw_handle
,
218 port_priv
->idx
, &state
);
220 netdev_err(netdev
, "dpsw_if_get_link_state() err %d\n", err
);
224 WARN_ONCE(state
.up
> 1, "Garbage read into link_state");
226 return state
.up
? true : false;
229 static int dpaa2_switch_port_set_pvid(struct ethsw_port_priv
*port_priv
, u16 pvid
)
231 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
232 struct net_device
*netdev
= port_priv
->netdev
;
233 struct dpsw_tci_cfg tci_cfg
= { 0 };
237 err
= dpsw_if_get_tci(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
238 port_priv
->idx
, &tci_cfg
);
240 netdev_err(netdev
, "dpsw_if_get_tci err %d\n", err
);
244 tci_cfg
.vlan_id
= pvid
;
246 /* Interface needs to be down to change PVID */
247 up
= dpaa2_switch_port_is_up(port_priv
);
249 err
= dpsw_if_disable(ethsw
->mc_io
, 0,
253 netdev_err(netdev
, "dpsw_if_disable err %d\n", err
);
258 err
= dpsw_if_set_tci(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
259 port_priv
->idx
, &tci_cfg
);
261 netdev_err(netdev
, "dpsw_if_set_tci err %d\n", err
);
265 /* Delete previous PVID info and mark the new one */
266 port_priv
->vlans
[port_priv
->pvid
] &= ~ETHSW_VLAN_PVID
;
267 port_priv
->vlans
[pvid
] |= ETHSW_VLAN_PVID
;
268 port_priv
->pvid
= pvid
;
272 ret
= dpsw_if_enable(ethsw
->mc_io
, 0,
276 netdev_err(netdev
, "dpsw_if_enable err %d\n", ret
);
284 static int dpaa2_switch_port_add_vlan(struct ethsw_port_priv
*port_priv
,
287 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
288 struct net_device
*netdev
= port_priv
->netdev
;
289 struct dpsw_vlan_if_cfg vcfg
= {0};
292 if (port_priv
->vlans
[vid
]) {
293 netdev_warn(netdev
, "VLAN %d already configured\n", vid
);
297 /* If hit, this VLAN rule will lead the packet into the FDB table
298 * specified in the vlan configuration below
301 vcfg
.if_id
[0] = port_priv
->idx
;
302 vcfg
.fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
303 vcfg
.options
|= DPSW_VLAN_ADD_IF_OPT_FDB_ID
;
304 err
= dpsw_vlan_add_if(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, vid
, &vcfg
);
306 netdev_err(netdev
, "dpsw_vlan_add_if err %d\n", err
);
310 port_priv
->vlans
[vid
] = ETHSW_VLAN_MEMBER
;
312 if (flags
& BRIDGE_VLAN_INFO_UNTAGGED
) {
313 err
= dpsw_vlan_add_if_untagged(ethsw
->mc_io
, 0,
318 "dpsw_vlan_add_if_untagged err %d\n", err
);
321 port_priv
->vlans
[vid
] |= ETHSW_VLAN_UNTAGGED
;
324 if (flags
& BRIDGE_VLAN_INFO_PVID
) {
325 err
= dpaa2_switch_port_set_pvid(port_priv
, vid
);
333 static enum dpsw_stp_state
br_stp_state_to_dpsw(u8 state
)
336 case BR_STATE_DISABLED
:
337 return DPSW_STP_STATE_DISABLED
;
338 case BR_STATE_LISTENING
:
339 return DPSW_STP_STATE_LISTENING
;
340 case BR_STATE_LEARNING
:
341 return DPSW_STP_STATE_LEARNING
;
342 case BR_STATE_FORWARDING
:
343 return DPSW_STP_STATE_FORWARDING
;
344 case BR_STATE_BLOCKING
:
345 return DPSW_STP_STATE_BLOCKING
;
347 return DPSW_STP_STATE_DISABLED
;
351 static int dpaa2_switch_port_set_stp_state(struct ethsw_port_priv
*port_priv
, u8 state
)
353 struct dpsw_stp_cfg stp_cfg
= {0};
357 if (!netif_running(port_priv
->netdev
) || state
== port_priv
->stp_state
)
358 return 0; /* Nothing to do */
360 stp_cfg
.state
= br_stp_state_to_dpsw(state
);
361 for (vid
= 0; vid
<= VLAN_VID_MASK
; vid
++) {
362 if (port_priv
->vlans
[vid
] & ETHSW_VLAN_MEMBER
) {
363 stp_cfg
.vlan_id
= vid
;
364 err
= dpsw_if_set_stp(port_priv
->ethsw_data
->mc_io
, 0,
365 port_priv
->ethsw_data
->dpsw_handle
,
366 port_priv
->idx
, &stp_cfg
);
368 netdev_err(port_priv
->netdev
,
369 "dpsw_if_set_stp err %d\n", err
);
375 port_priv
->stp_state
= state
;
380 static int dpaa2_switch_dellink(struct ethsw_core
*ethsw
, u16 vid
)
382 struct ethsw_port_priv
*ppriv_local
= NULL
;
385 if (!ethsw
->vlans
[vid
])
388 err
= dpsw_vlan_remove(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, vid
);
390 dev_err(ethsw
->dev
, "dpsw_vlan_remove err %d\n", err
);
393 ethsw
->vlans
[vid
] = 0;
395 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++) {
396 ppriv_local
= ethsw
->ports
[i
];
397 ppriv_local
->vlans
[vid
] = 0;
403 static int dpaa2_switch_port_fdb_add_uc(struct ethsw_port_priv
*port_priv
,
404 const unsigned char *addr
)
406 struct dpsw_fdb_unicast_cfg entry
= {0};
410 entry
.if_egress
= port_priv
->idx
;
411 entry
.type
= DPSW_FDB_ENTRY_STATIC
;
412 ether_addr_copy(entry
.mac_addr
, addr
);
414 fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
415 err
= dpsw_fdb_add_unicast(port_priv
->ethsw_data
->mc_io
, 0,
416 port_priv
->ethsw_data
->dpsw_handle
,
419 netdev_err(port_priv
->netdev
,
420 "dpsw_fdb_add_unicast err %d\n", err
);
424 static int dpaa2_switch_port_fdb_del_uc(struct ethsw_port_priv
*port_priv
,
425 const unsigned char *addr
)
427 struct dpsw_fdb_unicast_cfg entry
= {0};
431 entry
.if_egress
= port_priv
->idx
;
432 entry
.type
= DPSW_FDB_ENTRY_STATIC
;
433 ether_addr_copy(entry
.mac_addr
, addr
);
435 fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
436 err
= dpsw_fdb_remove_unicast(port_priv
->ethsw_data
->mc_io
, 0,
437 port_priv
->ethsw_data
->dpsw_handle
,
439 /* Silently discard error for calling multiple times the del command */
440 if (err
&& err
!= -ENXIO
)
441 netdev_err(port_priv
->netdev
,
442 "dpsw_fdb_remove_unicast err %d\n", err
);
446 static int dpaa2_switch_port_fdb_add_mc(struct ethsw_port_priv
*port_priv
,
447 const unsigned char *addr
)
449 struct dpsw_fdb_multicast_cfg entry
= {0};
453 ether_addr_copy(entry
.mac_addr
, addr
);
454 entry
.type
= DPSW_FDB_ENTRY_STATIC
;
456 entry
.if_id
[0] = port_priv
->idx
;
458 fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
459 err
= dpsw_fdb_add_multicast(port_priv
->ethsw_data
->mc_io
, 0,
460 port_priv
->ethsw_data
->dpsw_handle
,
462 /* Silently discard error for calling multiple times the add command */
463 if (err
&& err
!= -ENXIO
)
464 netdev_err(port_priv
->netdev
, "dpsw_fdb_add_multicast err %d\n",
469 static int dpaa2_switch_port_fdb_del_mc(struct ethsw_port_priv
*port_priv
,
470 const unsigned char *addr
)
472 struct dpsw_fdb_multicast_cfg entry
= {0};
476 ether_addr_copy(entry
.mac_addr
, addr
);
477 entry
.type
= DPSW_FDB_ENTRY_STATIC
;
479 entry
.if_id
[0] = port_priv
->idx
;
481 fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
482 err
= dpsw_fdb_remove_multicast(port_priv
->ethsw_data
->mc_io
, 0,
483 port_priv
->ethsw_data
->dpsw_handle
,
485 /* Silently discard error for calling multiple times the del command */
486 if (err
&& err
!= -ENAVAIL
)
487 netdev_err(port_priv
->netdev
,
488 "dpsw_fdb_remove_multicast err %d\n", err
);
492 static void dpaa2_switch_port_get_stats(struct net_device
*netdev
,
493 struct rtnl_link_stats64
*stats
)
495 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
499 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
500 port_priv
->ethsw_data
->dpsw_handle
,
502 DPSW_CNT_ING_FRAME
, &stats
->rx_packets
);
506 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
507 port_priv
->ethsw_data
->dpsw_handle
,
509 DPSW_CNT_EGR_FRAME
, &stats
->tx_packets
);
513 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
514 port_priv
->ethsw_data
->dpsw_handle
,
516 DPSW_CNT_ING_BYTE
, &stats
->rx_bytes
);
520 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
521 port_priv
->ethsw_data
->dpsw_handle
,
523 DPSW_CNT_EGR_BYTE
, &stats
->tx_bytes
);
527 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
528 port_priv
->ethsw_data
->dpsw_handle
,
530 DPSW_CNT_ING_FRAME_DISCARD
,
535 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
536 port_priv
->ethsw_data
->dpsw_handle
,
538 DPSW_CNT_ING_FLTR_FRAME
,
542 stats
->rx_dropped
+= tmp
;
544 err
= dpsw_if_get_counter(port_priv
->ethsw_data
->mc_io
, 0,
545 port_priv
->ethsw_data
->dpsw_handle
,
547 DPSW_CNT_EGR_FRAME_DISCARD
,
555 netdev_err(netdev
, "dpsw_if_get_counter err %d\n", err
);
558 static bool dpaa2_switch_port_has_offload_stats(const struct net_device
*netdev
,
561 return (attr_id
== IFLA_OFFLOAD_XSTATS_CPU_HIT
);
564 static int dpaa2_switch_port_get_offload_stats(int attr_id
,
565 const struct net_device
*netdev
,
569 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
570 dpaa2_switch_port_get_stats((struct net_device
*)netdev
, sp
);
577 static int dpaa2_switch_port_change_mtu(struct net_device
*netdev
, int mtu
)
579 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
582 err
= dpsw_if_set_max_frame_length(port_priv
->ethsw_data
->mc_io
,
584 port_priv
->ethsw_data
->dpsw_handle
,
586 (u16
)ETHSW_L2_MAX_FRM(mtu
));
589 "dpsw_if_set_max_frame_length() err %d\n", err
);
597 static int dpaa2_switch_port_link_state_update(struct net_device
*netdev
)
599 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
600 struct dpsw_link_state state
;
603 /* When we manage the MAC/PHY using phylink there is no need
604 * to manually update the netif_carrier.
606 if (dpaa2_switch_port_is_type_phy(port_priv
))
609 /* Interrupts are received even though no one issued an 'ifconfig up'
610 * on the switch interface. Ignore these link state update interrupts
612 if (!netif_running(netdev
))
615 err
= dpsw_if_get_link_state(port_priv
->ethsw_data
->mc_io
, 0,
616 port_priv
->ethsw_data
->dpsw_handle
,
617 port_priv
->idx
, &state
);
619 netdev_err(netdev
, "dpsw_if_get_link_state() err %d\n", err
);
623 WARN_ONCE(state
.up
> 1, "Garbage read into link_state");
625 if (state
.up
!= port_priv
->link_state
) {
627 netif_carrier_on(netdev
);
628 netif_tx_start_all_queues(netdev
);
630 netif_carrier_off(netdev
);
631 netif_tx_stop_all_queues(netdev
);
633 port_priv
->link_state
= state
.up
;
639 /* Manage all NAPI instances for the control interface.
641 * We only have one RX queue and one Tx Conf queue for all
642 * switch ports. Therefore, we only need to enable the NAPI instance once, the
643 * first time one of the switch ports runs .dev_open().
646 static void dpaa2_switch_enable_ctrl_if_napi(struct ethsw_core
*ethsw
)
650 /* Access to the ethsw->napi_users relies on the RTNL lock */
653 /* a new interface is using the NAPI instance */
656 /* if there is already a user of the instance, return */
657 if (ethsw
->napi_users
> 1)
660 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++)
661 napi_enable(ðsw
->fq
[i
].napi
);
664 static void dpaa2_switch_disable_ctrl_if_napi(struct ethsw_core
*ethsw
)
668 /* Access to the ethsw->napi_users relies on the RTNL lock */
671 /* If we are not the last interface using the NAPI, return */
673 if (ethsw
->napi_users
)
676 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++)
677 napi_disable(ðsw
->fq
[i
].napi
);
680 static int dpaa2_switch_port_open(struct net_device
*netdev
)
682 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
683 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
686 if (!dpaa2_switch_port_is_type_phy(port_priv
)) {
687 /* Explicitly set carrier off, otherwise
688 * netif_carrier_ok() will return true and cause 'ip link show'
689 * to report the LOWER_UP flag, even though the link
690 * notification wasn't even received.
692 netif_carrier_off(netdev
);
695 err
= dpsw_if_enable(port_priv
->ethsw_data
->mc_io
, 0,
696 port_priv
->ethsw_data
->dpsw_handle
,
699 netdev_err(netdev
, "dpsw_if_enable err %d\n", err
);
703 dpaa2_switch_enable_ctrl_if_napi(ethsw
);
705 if (dpaa2_switch_port_is_type_phy(port_priv
))
706 phylink_start(port_priv
->mac
->phylink
);
711 static int dpaa2_switch_port_stop(struct net_device
*netdev
)
713 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
714 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
717 if (dpaa2_switch_port_is_type_phy(port_priv
)) {
718 phylink_stop(port_priv
->mac
->phylink
);
720 netif_tx_stop_all_queues(netdev
);
721 netif_carrier_off(netdev
);
724 err
= dpsw_if_disable(port_priv
->ethsw_data
->mc_io
, 0,
725 port_priv
->ethsw_data
->dpsw_handle
,
728 netdev_err(netdev
, "dpsw_if_disable err %d\n", err
);
732 dpaa2_switch_disable_ctrl_if_napi(ethsw
);
737 static int dpaa2_switch_port_parent_id(struct net_device
*dev
,
738 struct netdev_phys_item_id
*ppid
)
740 struct ethsw_port_priv
*port_priv
= netdev_priv(dev
);
743 ppid
->id
[0] = port_priv
->ethsw_data
->dev_id
;
748 static int dpaa2_switch_port_get_phys_name(struct net_device
*netdev
, char *name
,
751 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
754 err
= snprintf(name
, len
, "p%d", port_priv
->idx
);
761 struct ethsw_dump_ctx
{
762 struct net_device
*dev
;
764 struct netlink_callback
*cb
;
768 static int dpaa2_switch_fdb_dump_nl(struct fdb_dump_entry
*entry
,
769 struct ethsw_dump_ctx
*dump
)
771 int is_dynamic
= entry
->type
& DPSW_FDB_ENTRY_DINAMIC
;
772 u32 portid
= NETLINK_CB(dump
->cb
->skb
).portid
;
773 u32 seq
= dump
->cb
->nlh
->nlmsg_seq
;
774 struct nlmsghdr
*nlh
;
777 if (dump
->idx
< dump
->cb
->args
[2])
780 nlh
= nlmsg_put(dump
->skb
, portid
, seq
, RTM_NEWNEIGH
,
781 sizeof(*ndm
), NLM_F_MULTI
);
785 ndm
= nlmsg_data(nlh
);
786 ndm
->ndm_family
= AF_BRIDGE
;
789 ndm
->ndm_flags
= NTF_SELF
;
791 ndm
->ndm_ifindex
= dump
->dev
->ifindex
;
792 ndm
->ndm_state
= is_dynamic
? NUD_REACHABLE
: NUD_NOARP
;
794 if (nla_put(dump
->skb
, NDA_LLADDR
, ETH_ALEN
, entry
->mac_addr
))
795 goto nla_put_failure
;
797 nlmsg_end(dump
->skb
, nlh
);
804 nlmsg_cancel(dump
->skb
, nlh
);
808 static int dpaa2_switch_port_fdb_valid_entry(struct fdb_dump_entry
*entry
,
809 struct ethsw_port_priv
*port_priv
)
811 int idx
= port_priv
->idx
;
814 if (entry
->type
& DPSW_FDB_ENTRY_TYPE_UNICAST
)
815 valid
= entry
->if_info
== port_priv
->idx
;
817 valid
= entry
->if_mask
[idx
/ 8] & BIT(idx
% 8);
822 static int dpaa2_switch_fdb_iterate(struct ethsw_port_priv
*port_priv
,
823 dpaa2_switch_fdb_cb_t cb
, void *data
)
825 struct net_device
*net_dev
= port_priv
->netdev
;
826 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
827 struct device
*dev
= net_dev
->dev
.parent
;
828 struct fdb_dump_entry
*fdb_entries
;
829 struct fdb_dump_entry fdb_entry
;
830 dma_addr_t fdb_dump_iova
;
837 fdb_dump_size
= ethsw
->sw_attr
.max_fdb_entries
* sizeof(fdb_entry
);
838 dma_mem
= kzalloc(fdb_dump_size
, GFP_KERNEL
);
842 fdb_dump_iova
= dma_map_single(dev
, dma_mem
, fdb_dump_size
,
844 if (dma_mapping_error(dev
, fdb_dump_iova
)) {
845 netdev_err(net_dev
, "dma_map_single() failed\n");
850 fdb_id
= dpaa2_switch_port_get_fdb_id(port_priv
);
851 err
= dpsw_fdb_dump(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, fdb_id
,
852 fdb_dump_iova
, fdb_dump_size
, &num_fdb_entries
);
854 netdev_err(net_dev
, "dpsw_fdb_dump() = %d\n", err
);
858 dma_unmap_single(dev
, fdb_dump_iova
, fdb_dump_size
, DMA_FROM_DEVICE
);
860 fdb_entries
= (struct fdb_dump_entry
*)dma_mem
;
861 for (i
= 0; i
< num_fdb_entries
; i
++) {
862 fdb_entry
= fdb_entries
[i
];
864 err
= cb(port_priv
, &fdb_entry
, data
);
875 dma_unmap_single(dev
, fdb_dump_iova
, fdb_dump_size
, DMA_TO_DEVICE
);
881 static int dpaa2_switch_fdb_entry_dump(struct ethsw_port_priv
*port_priv
,
882 struct fdb_dump_entry
*fdb_entry
,
885 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry
, port_priv
))
888 return dpaa2_switch_fdb_dump_nl(fdb_entry
, data
);
891 static int dpaa2_switch_port_fdb_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
,
892 struct net_device
*net_dev
,
893 struct net_device
*filter_dev
, int *idx
)
895 struct ethsw_port_priv
*port_priv
= netdev_priv(net_dev
);
896 struct ethsw_dump_ctx dump
= {
904 err
= dpaa2_switch_fdb_iterate(port_priv
, dpaa2_switch_fdb_entry_dump
, &dump
);
910 static int dpaa2_switch_fdb_entry_fast_age(struct ethsw_port_priv
*port_priv
,
911 struct fdb_dump_entry
*fdb_entry
,
912 void *data __always_unused
)
914 if (!dpaa2_switch_port_fdb_valid_entry(fdb_entry
, port_priv
))
917 if (!(fdb_entry
->type
& DPSW_FDB_ENTRY_TYPE_DYNAMIC
))
920 if (fdb_entry
->type
& DPSW_FDB_ENTRY_TYPE_UNICAST
)
921 dpaa2_switch_port_fdb_del_uc(port_priv
, fdb_entry
->mac_addr
);
923 dpaa2_switch_port_fdb_del_mc(port_priv
, fdb_entry
->mac_addr
);
928 static void dpaa2_switch_port_fast_age(struct ethsw_port_priv
*port_priv
)
930 dpaa2_switch_fdb_iterate(port_priv
,
931 dpaa2_switch_fdb_entry_fast_age
, NULL
);
934 static int dpaa2_switch_port_vlan_add(struct net_device
*netdev
, __be16 proto
,
937 struct switchdev_obj_port_vlan vlan
= {
938 .obj
.id
= SWITCHDEV_OBJ_ID_PORT_VLAN
,
940 .obj
.orig_dev
= netdev
,
941 /* This API only allows programming tagged, non-PVID VIDs */
945 return dpaa2_switch_port_vlans_add(netdev
, &vlan
);
948 static int dpaa2_switch_port_vlan_kill(struct net_device
*netdev
, __be16 proto
,
951 struct switchdev_obj_port_vlan vlan
= {
952 .obj
.id
= SWITCHDEV_OBJ_ID_PORT_VLAN
,
954 .obj
.orig_dev
= netdev
,
955 /* This API only allows programming tagged, non-PVID VIDs */
959 return dpaa2_switch_port_vlans_del(netdev
, &vlan
);
962 static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv
*port_priv
)
964 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
965 struct net_device
*net_dev
= port_priv
->netdev
;
966 struct device
*dev
= net_dev
->dev
.parent
;
967 u8 mac_addr
[ETH_ALEN
];
970 if (!(ethsw
->features
& ETHSW_FEATURE_MAC_ADDR
))
973 /* Get firmware address, if any */
974 err
= dpsw_if_get_port_mac_addr(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
975 port_priv
->idx
, mac_addr
);
977 dev_err(dev
, "dpsw_if_get_port_mac_addr() failed\n");
981 /* First check if firmware has any address configured by bootloader */
982 if (!is_zero_ether_addr(mac_addr
)) {
983 memcpy(net_dev
->dev_addr
, mac_addr
, net_dev
->addr_len
);
985 /* No MAC address configured, fill in net_dev->dev_addr
988 eth_hw_addr_random(net_dev
);
989 dev_dbg_once(dev
, "device(s) have all-zero hwaddr, replaced with random\n");
991 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
992 * practical purposes, this will be our "permanent" mac address,
993 * at least until the next reboot. This move will also permit
994 * register_netdevice() to properly fill up net_dev->perm_addr.
996 net_dev
->addr_assign_type
= NET_ADDR_PERM
;
1002 static void dpaa2_switch_free_fd(const struct ethsw_core
*ethsw
,
1003 const struct dpaa2_fd
*fd
)
1005 struct device
*dev
= ethsw
->dev
;
1006 unsigned char *buffer_start
;
1007 struct sk_buff
**skbh
, *skb
;
1010 fd_addr
= dpaa2_fd_get_addr(fd
);
1011 skbh
= dpaa2_iova_to_virt(ethsw
->iommu_domain
, fd_addr
);
1014 buffer_start
= (unsigned char *)skbh
;
1016 dma_unmap_single(dev
, fd_addr
,
1017 skb_tail_pointer(skb
) - buffer_start
,
1020 /* Move on with skb release */
1024 static int dpaa2_switch_build_single_fd(struct ethsw_core
*ethsw
,
1025 struct sk_buff
*skb
,
1026 struct dpaa2_fd
*fd
)
1028 struct device
*dev
= ethsw
->dev
;
1029 struct sk_buff
**skbh
;
1034 buff_start
= PTR_ALIGN(skb
->data
- DPAA2_SWITCH_TX_DATA_OFFSET
-
1035 DPAA2_SWITCH_TX_BUF_ALIGN
,
1036 DPAA2_SWITCH_TX_BUF_ALIGN
);
1038 /* Clear FAS to have consistent values for TX confirmation. It is
1039 * located in the first 8 bytes of the buffer's hardware annotation
1042 hwa
= buff_start
+ DPAA2_SWITCH_SWA_SIZE
;
1045 /* Store a backpointer to the skb at the beginning of the buffer
1046 * (in the private data area) such that we can release it
1049 skbh
= (struct sk_buff
**)buff_start
;
1052 addr
= dma_map_single(dev
, buff_start
,
1053 skb_tail_pointer(skb
) - buff_start
,
1055 if (unlikely(dma_mapping_error(dev
, addr
)))
1058 /* Setup the FD fields */
1059 memset(fd
, 0, sizeof(*fd
));
1061 dpaa2_fd_set_addr(fd
, addr
);
1062 dpaa2_fd_set_offset(fd
, (u16
)(skb
->data
- buff_start
));
1063 dpaa2_fd_set_len(fd
, skb
->len
);
1064 dpaa2_fd_set_format(fd
, dpaa2_fd_single
);
1069 static netdev_tx_t
dpaa2_switch_port_tx(struct sk_buff
*skb
,
1070 struct net_device
*net_dev
)
1072 struct ethsw_port_priv
*port_priv
= netdev_priv(net_dev
);
1073 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1074 int retries
= DPAA2_SWITCH_SWP_BUSY_RETRIES
;
1078 if (unlikely(skb_headroom(skb
) < DPAA2_SWITCH_NEEDED_HEADROOM
)) {
1081 ns
= skb_realloc_headroom(skb
, DPAA2_SWITCH_NEEDED_HEADROOM
);
1082 if (unlikely(!ns
)) {
1083 net_err_ratelimited("%s: Error reallocating skb headroom\n", net_dev
->name
);
1086 dev_consume_skb_any(skb
);
1090 /* We'll be holding a back-reference to the skb until Tx confirmation */
1091 skb
= skb_unshare(skb
, GFP_ATOMIC
);
1092 if (unlikely(!skb
)) {
1093 /* skb_unshare() has already freed the skb */
1094 net_err_ratelimited("%s: Error copying the socket buffer\n", net_dev
->name
);
1098 /* At this stage, we do not support non-linear skbs so just try to
1099 * linearize the skb and if that's not working, just drop the packet.
1101 err
= skb_linearize(skb
);
1103 net_err_ratelimited("%s: skb_linearize error (%d)!\n", net_dev
->name
, err
);
1107 err
= dpaa2_switch_build_single_fd(ethsw
, skb
, &fd
);
1108 if (unlikely(err
)) {
1109 net_err_ratelimited("%s: ethsw_build_*_fd() %d\n", net_dev
->name
, err
);
1114 err
= dpaa2_io_service_enqueue_qd(NULL
,
1118 } while (err
== -EBUSY
&& retries
);
1120 if (unlikely(err
< 0)) {
1121 dpaa2_switch_free_fd(ethsw
, &fd
);
1125 return NETDEV_TX_OK
;
1130 return NETDEV_TX_OK
;
1134 dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block
*filter_block
,
1135 struct flow_cls_offload
*f
)
1137 switch (f
->command
) {
1138 case FLOW_CLS_REPLACE
:
1139 return dpaa2_switch_cls_flower_replace(filter_block
, f
);
1140 case FLOW_CLS_DESTROY
:
1141 return dpaa2_switch_cls_flower_destroy(filter_block
, f
);
1148 dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block
*block
,
1149 struct tc_cls_matchall_offload
*f
)
1151 switch (f
->command
) {
1152 case TC_CLSMATCHALL_REPLACE
:
1153 return dpaa2_switch_cls_matchall_replace(block
, f
);
1154 case TC_CLSMATCHALL_DESTROY
:
1155 return dpaa2_switch_cls_matchall_destroy(block
, f
);
1161 static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type
,
1166 case TC_SETUP_CLSFLOWER
:
1167 return dpaa2_switch_setup_tc_cls_flower(cb_priv
, type_data
);
1168 case TC_SETUP_CLSMATCHALL
:
1169 return dpaa2_switch_setup_tc_cls_matchall(cb_priv
, type_data
);
1175 static LIST_HEAD(dpaa2_switch_block_cb_list
);
1178 dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv
*port_priv
,
1179 struct dpaa2_switch_filter_block
*block
)
1181 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1182 struct net_device
*netdev
= port_priv
->netdev
;
1183 struct dpsw_acl_if_cfg acl_if_cfg
;
1186 if (port_priv
->filter_block
)
1189 acl_if_cfg
.if_id
[0] = port_priv
->idx
;
1190 acl_if_cfg
.num_ifs
= 1;
1191 err
= dpsw_acl_add_if(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1192 block
->acl_id
, &acl_if_cfg
);
1194 netdev_err(netdev
, "dpsw_acl_add_if err %d\n", err
);
1198 block
->ports
|= BIT(port_priv
->idx
);
1199 port_priv
->filter_block
= block
;
1205 dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv
*port_priv
,
1206 struct dpaa2_switch_filter_block
*block
)
1208 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1209 struct net_device
*netdev
= port_priv
->netdev
;
1210 struct dpsw_acl_if_cfg acl_if_cfg
;
1213 if (port_priv
->filter_block
!= block
)
1216 acl_if_cfg
.if_id
[0] = port_priv
->idx
;
1217 acl_if_cfg
.num_ifs
= 1;
1218 err
= dpsw_acl_remove_if(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1219 block
->acl_id
, &acl_if_cfg
);
1221 netdev_err(netdev
, "dpsw_acl_add_if err %d\n", err
);
1225 block
->ports
&= ~BIT(port_priv
->idx
);
1226 port_priv
->filter_block
= NULL
;
1230 static int dpaa2_switch_port_block_bind(struct ethsw_port_priv
*port_priv
,
1231 struct dpaa2_switch_filter_block
*block
)
1233 struct dpaa2_switch_filter_block
*old_block
= port_priv
->filter_block
;
1236 /* Offload all the mirror entries found in the block on this new port
1239 err
= dpaa2_switch_block_offload_mirror(block
, port_priv
);
1243 /* If the port is already bound to this ACL table then do nothing. This
1244 * can happen when this port is the first one to join a tc block
1246 if (port_priv
->filter_block
== block
)
1249 err
= dpaa2_switch_port_acl_tbl_unbind(port_priv
, old_block
);
1253 /* Mark the previous ACL table as being unused if this was the last
1254 * port that was using it.
1256 if (old_block
->ports
== 0)
1257 old_block
->in_use
= false;
1259 return dpaa2_switch_port_acl_tbl_bind(port_priv
, block
);
1263 dpaa2_switch_port_block_unbind(struct ethsw_port_priv
*port_priv
,
1264 struct dpaa2_switch_filter_block
*block
)
1266 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1267 struct dpaa2_switch_filter_block
*new_block
;
1270 /* Unoffload all the mirror entries found in the block from the
1273 err
= dpaa2_switch_block_unoffload_mirror(block
, port_priv
);
1277 /* We are the last port that leaves a block (an ACL table).
1278 * We'll continue to use this table.
1280 if (block
->ports
== BIT(port_priv
->idx
))
1283 err
= dpaa2_switch_port_acl_tbl_unbind(port_priv
, block
);
1287 if (block
->ports
== 0)
1288 block
->in_use
= false;
1290 new_block
= dpaa2_switch_filter_block_get_unused(ethsw
);
1291 new_block
->in_use
= true;
1292 return dpaa2_switch_port_acl_tbl_bind(port_priv
, new_block
);
1295 static int dpaa2_switch_setup_tc_block_bind(struct net_device
*netdev
,
1296 struct flow_block_offload
*f
)
1298 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1299 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1300 struct dpaa2_switch_filter_block
*filter_block
;
1301 struct flow_block_cb
*block_cb
;
1302 bool register_block
= false;
1305 block_cb
= flow_block_cb_lookup(f
->block
,
1306 dpaa2_switch_port_setup_tc_block_cb_ig
,
1310 /* If the filter block is not already known, then this port
1311 * must be the first to join it. In this case, we can just
1312 * continue to use our private table
1314 filter_block
= port_priv
->filter_block
;
1316 block_cb
= flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig
,
1317 ethsw
, filter_block
, NULL
);
1318 if (IS_ERR(block_cb
))
1319 return PTR_ERR(block_cb
);
1321 register_block
= true;
1323 filter_block
= flow_block_cb_priv(block_cb
);
1326 flow_block_cb_incref(block_cb
);
1327 err
= dpaa2_switch_port_block_bind(port_priv
, filter_block
);
1329 goto err_block_bind
;
1331 if (register_block
) {
1332 flow_block_cb_add(block_cb
, f
);
1333 list_add_tail(&block_cb
->driver_list
,
1334 &dpaa2_switch_block_cb_list
);
1340 if (!flow_block_cb_decref(block_cb
))
1341 flow_block_cb_free(block_cb
);
1345 static void dpaa2_switch_setup_tc_block_unbind(struct net_device
*netdev
,
1346 struct flow_block_offload
*f
)
1348 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1349 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1350 struct dpaa2_switch_filter_block
*filter_block
;
1351 struct flow_block_cb
*block_cb
;
1354 block_cb
= flow_block_cb_lookup(f
->block
,
1355 dpaa2_switch_port_setup_tc_block_cb_ig
,
1360 filter_block
= flow_block_cb_priv(block_cb
);
1361 err
= dpaa2_switch_port_block_unbind(port_priv
, filter_block
);
1362 if (!err
&& !flow_block_cb_decref(block_cb
)) {
1363 flow_block_cb_remove(block_cb
, f
);
1364 list_del(&block_cb
->driver_list
);
1368 static int dpaa2_switch_setup_tc_block(struct net_device
*netdev
,
1369 struct flow_block_offload
*f
)
1371 if (f
->binder_type
!= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
1374 f
->driver_block_list
= &dpaa2_switch_block_cb_list
;
1376 switch (f
->command
) {
1377 case FLOW_BLOCK_BIND
:
1378 return dpaa2_switch_setup_tc_block_bind(netdev
, f
);
1379 case FLOW_BLOCK_UNBIND
:
1380 dpaa2_switch_setup_tc_block_unbind(netdev
, f
);
1387 static int dpaa2_switch_port_setup_tc(struct net_device
*netdev
,
1388 enum tc_setup_type type
,
1392 case TC_SETUP_BLOCK
: {
1393 return dpaa2_switch_setup_tc_block(netdev
, type_data
);
1402 static const struct net_device_ops dpaa2_switch_port_ops
= {
1403 .ndo_open
= dpaa2_switch_port_open
,
1404 .ndo_stop
= dpaa2_switch_port_stop
,
1406 .ndo_set_mac_address
= eth_mac_addr
,
1407 .ndo_get_stats64
= dpaa2_switch_port_get_stats
,
1408 .ndo_change_mtu
= dpaa2_switch_port_change_mtu
,
1409 .ndo_has_offload_stats
= dpaa2_switch_port_has_offload_stats
,
1410 .ndo_get_offload_stats
= dpaa2_switch_port_get_offload_stats
,
1411 .ndo_fdb_dump
= dpaa2_switch_port_fdb_dump
,
1412 .ndo_vlan_rx_add_vid
= dpaa2_switch_port_vlan_add
,
1413 .ndo_vlan_rx_kill_vid
= dpaa2_switch_port_vlan_kill
,
1415 .ndo_start_xmit
= dpaa2_switch_port_tx
,
1416 .ndo_get_port_parent_id
= dpaa2_switch_port_parent_id
,
1417 .ndo_get_phys_port_name
= dpaa2_switch_port_get_phys_name
,
1418 .ndo_setup_tc
= dpaa2_switch_port_setup_tc
,
1421 bool dpaa2_switch_port_dev_check(const struct net_device
*netdev
)
1423 return netdev
->netdev_ops
== &dpaa2_switch_port_ops
;
1426 static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv
*port_priv
)
1428 struct fsl_mc_device
*dpsw_port_dev
, *dpmac_dev
;
1429 struct dpaa2_mac
*mac
;
1432 dpsw_port_dev
= to_fsl_mc_device(port_priv
->netdev
->dev
.parent
);
1433 dpmac_dev
= fsl_mc_get_endpoint(dpsw_port_dev
, port_priv
->idx
);
1435 if (PTR_ERR(dpmac_dev
) == -EPROBE_DEFER
)
1436 return PTR_ERR(dpmac_dev
);
1438 if (IS_ERR(dpmac_dev
) || dpmac_dev
->dev
.type
!= &fsl_mc_bus_dpmac_type
)
1441 mac
= kzalloc(sizeof(*mac
), GFP_KERNEL
);
1445 mac
->mc_dev
= dpmac_dev
;
1446 mac
->mc_io
= port_priv
->ethsw_data
->mc_io
;
1447 mac
->net_dev
= port_priv
->netdev
;
1449 err
= dpaa2_mac_open(mac
);
1452 port_priv
->mac
= mac
;
1454 if (dpaa2_switch_port_is_type_phy(port_priv
)) {
1455 err
= dpaa2_mac_connect(mac
);
1457 netdev_err(port_priv
->netdev
,
1458 "Error connecting to the MAC endpoint %pe\n",
1467 dpaa2_mac_close(mac
);
1468 port_priv
->mac
= NULL
;
1474 static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv
*port_priv
)
1476 if (dpaa2_switch_port_is_type_phy(port_priv
))
1477 dpaa2_mac_disconnect(port_priv
->mac
);
1479 if (!dpaa2_switch_port_has_mac(port_priv
))
1482 dpaa2_mac_close(port_priv
->mac
);
1483 kfree(port_priv
->mac
);
1484 port_priv
->mac
= NULL
;
1487 static irqreturn_t
dpaa2_switch_irq0_handler_thread(int irq_num
, void *arg
)
1489 struct device
*dev
= (struct device
*)arg
;
1490 struct ethsw_core
*ethsw
= dev_get_drvdata(dev
);
1491 struct ethsw_port_priv
*port_priv
;
1495 err
= dpsw_get_irq_status(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1496 DPSW_IRQ_INDEX_IF
, &status
);
1498 dev_err(dev
, "Can't get irq status (err %d)\n", err
);
1502 if_id
= (status
& 0xFFFF0000) >> 16;
1503 port_priv
= ethsw
->ports
[if_id
];
1505 if (status
& DPSW_IRQ_EVENT_LINK_CHANGED
) {
1506 dpaa2_switch_port_link_state_update(port_priv
->netdev
);
1507 dpaa2_switch_port_set_mac_addr(port_priv
);
1510 if (status
& DPSW_IRQ_EVENT_ENDPOINT_CHANGED
) {
1511 if (dpaa2_switch_port_has_mac(port_priv
))
1512 dpaa2_switch_port_disconnect_mac(port_priv
);
1514 dpaa2_switch_port_connect_mac(port_priv
);
1518 err
= dpsw_clear_irq_status(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1519 DPSW_IRQ_INDEX_IF
, status
);
1521 dev_err(dev
, "Can't clear irq status (err %d)\n", err
);
1526 static int dpaa2_switch_setup_irqs(struct fsl_mc_device
*sw_dev
)
1528 struct device
*dev
= &sw_dev
->dev
;
1529 struct ethsw_core
*ethsw
= dev_get_drvdata(dev
);
1530 u32 mask
= DPSW_IRQ_EVENT_LINK_CHANGED
;
1531 struct fsl_mc_device_irq
*irq
;
1534 err
= fsl_mc_allocate_irqs(sw_dev
);
1536 dev_err(dev
, "MC irqs allocation failed\n");
1540 if (WARN_ON(sw_dev
->obj_desc
.irq_count
!= DPSW_IRQ_NUM
)) {
1545 err
= dpsw_set_irq_enable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1546 DPSW_IRQ_INDEX_IF
, 0);
1548 dev_err(dev
, "dpsw_set_irq_enable err %d\n", err
);
1552 irq
= sw_dev
->irqs
[DPSW_IRQ_INDEX_IF
];
1554 err
= devm_request_threaded_irq(dev
, irq
->msi_desc
->irq
,
1556 dpaa2_switch_irq0_handler_thread
,
1557 IRQF_NO_SUSPEND
| IRQF_ONESHOT
,
1558 dev_name(dev
), dev
);
1560 dev_err(dev
, "devm_request_threaded_irq(): %d\n", err
);
1564 err
= dpsw_set_irq_mask(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1565 DPSW_IRQ_INDEX_IF
, mask
);
1567 dev_err(dev
, "dpsw_set_irq_mask(): %d\n", err
);
1571 err
= dpsw_set_irq_enable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1572 DPSW_IRQ_INDEX_IF
, 1);
1574 dev_err(dev
, "dpsw_set_irq_enable(): %d\n", err
);
1581 devm_free_irq(dev
, irq
->msi_desc
->irq
, dev
);
1583 fsl_mc_free_irqs(sw_dev
);
1587 static void dpaa2_switch_teardown_irqs(struct fsl_mc_device
*sw_dev
)
1589 struct device
*dev
= &sw_dev
->dev
;
1590 struct ethsw_core
*ethsw
= dev_get_drvdata(dev
);
1593 err
= dpsw_set_irq_enable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1594 DPSW_IRQ_INDEX_IF
, 0);
1596 dev_err(dev
, "dpsw_set_irq_enable err %d\n", err
);
1598 fsl_mc_free_irqs(sw_dev
);
1601 static int dpaa2_switch_port_set_learning(struct ethsw_port_priv
*port_priv
, bool enable
)
1603 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1604 enum dpsw_learning_mode learn_mode
;
1608 learn_mode
= DPSW_LEARNING_MODE_HW
;
1610 learn_mode
= DPSW_LEARNING_MODE_DIS
;
1612 err
= dpsw_if_set_learning_mode(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1613 port_priv
->idx
, learn_mode
);
1615 netdev_err(port_priv
->netdev
, "dpsw_if_set_learning_mode err %d\n", err
);
1618 dpaa2_switch_port_fast_age(port_priv
);
1623 static int dpaa2_switch_port_attr_stp_state_set(struct net_device
*netdev
,
1626 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1629 err
= dpaa2_switch_port_set_stp_state(port_priv
, state
);
1634 case BR_STATE_DISABLED
:
1635 case BR_STATE_BLOCKING
:
1636 case BR_STATE_LISTENING
:
1637 err
= dpaa2_switch_port_set_learning(port_priv
, false);
1639 case BR_STATE_LEARNING
:
1640 case BR_STATE_FORWARDING
:
1641 err
= dpaa2_switch_port_set_learning(port_priv
,
1642 port_priv
->learn_ena
);
1649 static int dpaa2_switch_port_flood(struct ethsw_port_priv
*port_priv
,
1650 struct switchdev_brport_flags flags
)
1652 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1654 if (flags
.mask
& BR_BCAST_FLOOD
)
1655 port_priv
->bcast_flood
= !!(flags
.val
& BR_BCAST_FLOOD
);
1657 if (flags
.mask
& BR_FLOOD
)
1658 port_priv
->ucast_flood
= !!(flags
.val
& BR_FLOOD
);
1660 return dpaa2_switch_fdb_set_egress_flood(ethsw
, port_priv
->fdb
->fdb_id
);
1663 static int dpaa2_switch_port_pre_bridge_flags(struct net_device
*netdev
,
1664 struct switchdev_brport_flags flags
,
1665 struct netlink_ext_ack
*extack
)
1667 if (flags
.mask
& ~(BR_LEARNING
| BR_BCAST_FLOOD
| BR_FLOOD
|
1671 if (flags
.mask
& (BR_FLOOD
| BR_MCAST_FLOOD
)) {
1672 bool multicast
= !!(flags
.val
& BR_MCAST_FLOOD
);
1673 bool unicast
= !!(flags
.val
& BR_FLOOD
);
1675 if (unicast
!= multicast
) {
1676 NL_SET_ERR_MSG_MOD(extack
,
1677 "Cannot configure multicast flooding independently of unicast");
1685 static int dpaa2_switch_port_bridge_flags(struct net_device
*netdev
,
1686 struct switchdev_brport_flags flags
,
1687 struct netlink_ext_ack
*extack
)
1689 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1692 if (flags
.mask
& BR_LEARNING
) {
1693 bool learn_ena
= !!(flags
.val
& BR_LEARNING
);
1695 err
= dpaa2_switch_port_set_learning(port_priv
, learn_ena
);
1698 port_priv
->learn_ena
= learn_ena
;
1701 if (flags
.mask
& (BR_BCAST_FLOOD
| BR_FLOOD
| BR_MCAST_FLOOD
)) {
1702 err
= dpaa2_switch_port_flood(port_priv
, flags
);
1710 static int dpaa2_switch_port_attr_set(struct net_device
*netdev
, const void *ctx
,
1711 const struct switchdev_attr
*attr
,
1712 struct netlink_ext_ack
*extack
)
1717 case SWITCHDEV_ATTR_ID_PORT_STP_STATE
:
1718 err
= dpaa2_switch_port_attr_stp_state_set(netdev
,
1721 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING
:
1722 if (!attr
->u
.vlan_filtering
) {
1723 NL_SET_ERR_MSG_MOD(extack
,
1724 "The DPAA2 switch does not support VLAN-unaware operation");
1728 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS
:
1729 err
= dpaa2_switch_port_pre_bridge_flags(netdev
, attr
->u
.brport_flags
, extack
);
1731 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS
:
1732 err
= dpaa2_switch_port_bridge_flags(netdev
, attr
->u
.brport_flags
, extack
);
1742 int dpaa2_switch_port_vlans_add(struct net_device
*netdev
,
1743 const struct switchdev_obj_port_vlan
*vlan
)
1745 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1746 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1747 struct dpsw_attr
*attr
= ðsw
->sw_attr
;
1750 /* Make sure that the VLAN is not already configured
1751 * on the switch port
1753 if (port_priv
->vlans
[vlan
->vid
] & ETHSW_VLAN_MEMBER
)
1756 /* Check if there is space for a new VLAN */
1757 err
= dpsw_get_attributes(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1760 netdev_err(netdev
, "dpsw_get_attributes err %d\n", err
);
1763 if (attr
->max_vlans
- attr
->num_vlans
< 1)
1766 /* Check if there is space for a new VLAN */
1767 err
= dpsw_get_attributes(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1770 netdev_err(netdev
, "dpsw_get_attributes err %d\n", err
);
1773 if (attr
->max_vlans
- attr
->num_vlans
< 1)
1776 if (!port_priv
->ethsw_data
->vlans
[vlan
->vid
]) {
1777 /* this is a new VLAN */
1778 err
= dpaa2_switch_add_vlan(port_priv
, vlan
->vid
);
1782 port_priv
->ethsw_data
->vlans
[vlan
->vid
] |= ETHSW_VLAN_GLOBAL
;
1785 return dpaa2_switch_port_add_vlan(port_priv
, vlan
->vid
, vlan
->flags
);
1788 static int dpaa2_switch_port_lookup_address(struct net_device
*netdev
, int is_uc
,
1789 const unsigned char *addr
)
1791 struct netdev_hw_addr_list
*list
= (is_uc
) ? &netdev
->uc
: &netdev
->mc
;
1792 struct netdev_hw_addr
*ha
;
1794 netif_addr_lock_bh(netdev
);
1795 list_for_each_entry(ha
, &list
->list
, list
) {
1796 if (ether_addr_equal(ha
->addr
, addr
)) {
1797 netif_addr_unlock_bh(netdev
);
1801 netif_addr_unlock_bh(netdev
);
1805 static int dpaa2_switch_port_mdb_add(struct net_device
*netdev
,
1806 const struct switchdev_obj_port_mdb
*mdb
)
1808 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1811 /* Check if address is already set on this port */
1812 if (dpaa2_switch_port_lookup_address(netdev
, 0, mdb
->addr
))
1815 err
= dpaa2_switch_port_fdb_add_mc(port_priv
, mdb
->addr
);
1819 err
= dev_mc_add(netdev
, mdb
->addr
);
1821 netdev_err(netdev
, "dev_mc_add err %d\n", err
);
1822 dpaa2_switch_port_fdb_del_mc(port_priv
, mdb
->addr
);
1828 static int dpaa2_switch_port_obj_add(struct net_device
*netdev
,
1829 const struct switchdev_obj
*obj
)
1834 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1835 err
= dpaa2_switch_port_vlans_add(netdev
,
1836 SWITCHDEV_OBJ_PORT_VLAN(obj
));
1838 case SWITCHDEV_OBJ_ID_PORT_MDB
:
1839 err
= dpaa2_switch_port_mdb_add(netdev
,
1840 SWITCHDEV_OBJ_PORT_MDB(obj
));
1850 static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv
*port_priv
, u16 vid
)
1852 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1853 struct net_device
*netdev
= port_priv
->netdev
;
1854 struct dpsw_vlan_if_cfg vcfg
;
1857 if (!port_priv
->vlans
[vid
])
1860 if (port_priv
->vlans
[vid
] & ETHSW_VLAN_PVID
) {
1861 /* If we are deleting the PVID of a port, use VLAN 4095 instead
1862 * as we are sure that neither the bridge nor the 8021q module
1865 err
= dpaa2_switch_port_set_pvid(port_priv
, 4095);
1871 vcfg
.if_id
[0] = port_priv
->idx
;
1872 if (port_priv
->vlans
[vid
] & ETHSW_VLAN_UNTAGGED
) {
1873 err
= dpsw_vlan_remove_if_untagged(ethsw
->mc_io
, 0,
1878 "dpsw_vlan_remove_if_untagged err %d\n",
1881 port_priv
->vlans
[vid
] &= ~ETHSW_VLAN_UNTAGGED
;
1884 if (port_priv
->vlans
[vid
] & ETHSW_VLAN_MEMBER
) {
1885 err
= dpsw_vlan_remove_if(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
1889 "dpsw_vlan_remove_if err %d\n", err
);
1892 port_priv
->vlans
[vid
] &= ~ETHSW_VLAN_MEMBER
;
1894 /* Delete VLAN from switch if it is no longer configured on
1897 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++)
1898 if (ethsw
->ports
[i
]->vlans
[vid
] & ETHSW_VLAN_MEMBER
)
1899 return 0; /* Found a port member in VID */
1901 ethsw
->vlans
[vid
] &= ~ETHSW_VLAN_GLOBAL
;
1903 err
= dpaa2_switch_dellink(ethsw
, vid
);
1911 int dpaa2_switch_port_vlans_del(struct net_device
*netdev
,
1912 const struct switchdev_obj_port_vlan
*vlan
)
1914 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1916 if (netif_is_bridge_master(vlan
->obj
.orig_dev
))
1919 return dpaa2_switch_port_del_vlan(port_priv
, vlan
->vid
);
1922 static int dpaa2_switch_port_mdb_del(struct net_device
*netdev
,
1923 const struct switchdev_obj_port_mdb
*mdb
)
1925 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1928 if (!dpaa2_switch_port_lookup_address(netdev
, 0, mdb
->addr
))
1931 err
= dpaa2_switch_port_fdb_del_mc(port_priv
, mdb
->addr
);
1935 err
= dev_mc_del(netdev
, mdb
->addr
);
1937 netdev_err(netdev
, "dev_mc_del err %d\n", err
);
1944 static int dpaa2_switch_port_obj_del(struct net_device
*netdev
,
1945 const struct switchdev_obj
*obj
)
1950 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
1951 err
= dpaa2_switch_port_vlans_del(netdev
, SWITCHDEV_OBJ_PORT_VLAN(obj
));
1953 case SWITCHDEV_OBJ_ID_PORT_MDB
:
1954 err
= dpaa2_switch_port_mdb_del(netdev
, SWITCHDEV_OBJ_PORT_MDB(obj
));
1963 static int dpaa2_switch_port_attr_set_event(struct net_device
*netdev
,
1964 struct switchdev_notifier_port_attr_info
*ptr
)
1968 err
= switchdev_handle_port_attr_set(netdev
, ptr
,
1969 dpaa2_switch_port_dev_check
,
1970 dpaa2_switch_port_attr_set
);
1971 return notifier_from_errno(err
);
1974 static struct notifier_block dpaa2_switch_port_switchdev_nb
;
1975 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb
;
1977 static int dpaa2_switch_port_bridge_join(struct net_device
*netdev
,
1978 struct net_device
*upper_dev
,
1979 struct netlink_ext_ack
*extack
)
1981 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
1982 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
1983 struct ethsw_port_priv
*other_port_priv
;
1984 struct net_device
*other_dev
;
1985 struct list_head
*iter
;
1989 netdev_for_each_lower_dev(upper_dev
, other_dev
, iter
) {
1990 if (!dpaa2_switch_port_dev_check(other_dev
))
1993 other_port_priv
= netdev_priv(other_dev
);
1994 if (other_port_priv
->ethsw_data
!= port_priv
->ethsw_data
) {
1995 NL_SET_ERR_MSG_MOD(extack
,
1996 "Interface from a different DPSW is in the bridge already");
2001 /* Delete the previously manually installed VLAN 1 */
2002 err
= dpaa2_switch_port_del_vlan(port_priv
, 1);
2006 dpaa2_switch_port_set_fdb(port_priv
, upper_dev
);
2008 /* Inherit the initial bridge port learning state */
2009 learn_ena
= br_port_flag_is_set(netdev
, BR_LEARNING
);
2010 err
= dpaa2_switch_port_set_learning(port_priv
, learn_ena
);
2011 port_priv
->learn_ena
= learn_ena
;
2013 /* Setup the egress flood policy (broadcast, unknown unicast) */
2014 err
= dpaa2_switch_fdb_set_egress_flood(ethsw
, port_priv
->fdb
->fdb_id
);
2016 goto err_egress_flood
;
2018 err
= switchdev_bridge_port_offload(netdev
, netdev
, NULL
,
2019 &dpaa2_switch_port_switchdev_nb
,
2020 &dpaa2_switch_port_switchdev_blocking_nb
,
2023 goto err_switchdev_offload
;
2027 err_switchdev_offload
:
2029 dpaa2_switch_port_set_fdb(port_priv
, NULL
);
2033 static int dpaa2_switch_port_clear_rxvlan(struct net_device
*vdev
, int vid
, void *arg
)
2035 __be16 vlan_proto
= htons(ETH_P_8021Q
);
2038 vlan_proto
= vlan_dev_vlan_proto(vdev
);
2040 return dpaa2_switch_port_vlan_kill(arg
, vlan_proto
, vid
);
2043 static int dpaa2_switch_port_restore_rxvlan(struct net_device
*vdev
, int vid
, void *arg
)
2045 __be16 vlan_proto
= htons(ETH_P_8021Q
);
2048 vlan_proto
= vlan_dev_vlan_proto(vdev
);
2050 return dpaa2_switch_port_vlan_add(arg
, vlan_proto
, vid
);
2053 static void dpaa2_switch_port_pre_bridge_leave(struct net_device
*netdev
)
2055 switchdev_bridge_port_unoffload(netdev
, NULL
,
2056 &dpaa2_switch_port_switchdev_nb
,
2057 &dpaa2_switch_port_switchdev_blocking_nb
);
2060 static int dpaa2_switch_port_bridge_leave(struct net_device
*netdev
)
2062 struct ethsw_port_priv
*port_priv
= netdev_priv(netdev
);
2063 struct dpaa2_switch_fdb
*old_fdb
= port_priv
->fdb
;
2064 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
2067 /* First of all, fast age any learn FDB addresses on this switch port */
2068 dpaa2_switch_port_fast_age(port_priv
);
2070 /* Clear all RX VLANs installed through vlan_vid_add() either as VLAN
2071 * upper devices or otherwise from the FDB table that we are about to
2074 err
= vlan_for_each(netdev
, dpaa2_switch_port_clear_rxvlan
, netdev
);
2076 netdev_err(netdev
, "Unable to clear RX VLANs from old FDB table, err (%d)\n", err
);
2078 dpaa2_switch_port_set_fdb(port_priv
, NULL
);
2080 /* Restore all RX VLANs into the new FDB table that we just joined */
2081 err
= vlan_for_each(netdev
, dpaa2_switch_port_restore_rxvlan
, netdev
);
2083 netdev_err(netdev
, "Unable to restore RX VLANs to the new FDB, err (%d)\n", err
);
2085 /* Reset the flooding state to denote that this port can send any
2086 * packet in standalone mode. With this, we are also ensuring that any
2087 * later bridge join will have the flooding flag on.
2089 port_priv
->bcast_flood
= true;
2090 port_priv
->ucast_flood
= true;
2092 /* Setup the egress flood policy (broadcast, unknown unicast).
2093 * When the port is not under a bridge, only the CTRL interface is part
2094 * of the flooding domain besides the actual port
2096 err
= dpaa2_switch_fdb_set_egress_flood(ethsw
, port_priv
->fdb
->fdb_id
);
2100 /* Recreate the egress flood domain of the FDB that we just left */
2101 err
= dpaa2_switch_fdb_set_egress_flood(ethsw
, old_fdb
->fdb_id
);
2105 /* No HW learning when not under a bridge */
2106 err
= dpaa2_switch_port_set_learning(port_priv
, false);
2109 port_priv
->learn_ena
= false;
2111 /* Add the VLAN 1 as PVID when not under a bridge. We need this since
2112 * the dpaa2 switch interfaces are not capable to be VLAN unaware
2114 return dpaa2_switch_port_add_vlan(port_priv
, DEFAULT_VLAN_ID
,
2115 BRIDGE_VLAN_INFO_UNTAGGED
| BRIDGE_VLAN_INFO_PVID
);
2118 static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device
*netdev
)
2120 struct net_device
*upper_dev
;
2121 struct list_head
*iter
;
2123 /* RCU read lock not necessary because we have write-side protection
2124 * (rtnl_mutex), however a non-rcu iterator does not exist.
2126 netdev_for_each_upper_dev_rcu(netdev
, upper_dev
, iter
)
2127 if (is_vlan_dev(upper_dev
))
2134 dpaa2_switch_prechangeupper_sanity_checks(struct net_device
*netdev
,
2135 struct net_device
*upper_dev
,
2136 struct netlink_ext_ack
*extack
)
2140 if (!br_vlan_enabled(upper_dev
)) {
2141 NL_SET_ERR_MSG_MOD(extack
, "Cannot join a VLAN-unaware bridge");
2145 err
= dpaa2_switch_prevent_bridging_with_8021q_upper(netdev
);
2147 NL_SET_ERR_MSG_MOD(extack
,
2148 "Cannot join a bridge while VLAN uppers are present");
2155 static int dpaa2_switch_port_netdevice_event(struct notifier_block
*nb
,
2156 unsigned long event
, void *ptr
)
2158 struct net_device
*netdev
= netdev_notifier_info_to_dev(ptr
);
2159 struct netdev_notifier_changeupper_info
*info
= ptr
;
2160 struct netlink_ext_ack
*extack
;
2161 struct net_device
*upper_dev
;
2164 if (!dpaa2_switch_port_dev_check(netdev
))
2167 extack
= netdev_notifier_info_to_extack(&info
->info
);
2170 case NETDEV_PRECHANGEUPPER
:
2171 upper_dev
= info
->upper_dev
;
2172 if (!netif_is_bridge_master(upper_dev
))
2175 err
= dpaa2_switch_prechangeupper_sanity_checks(netdev
,
2182 dpaa2_switch_port_pre_bridge_leave(netdev
);
2185 case NETDEV_CHANGEUPPER
:
2186 upper_dev
= info
->upper_dev
;
2187 if (netif_is_bridge_master(upper_dev
)) {
2189 err
= dpaa2_switch_port_bridge_join(netdev
,
2193 err
= dpaa2_switch_port_bridge_leave(netdev
);
2199 return notifier_from_errno(err
);
2202 struct ethsw_switchdev_event_work
{
2203 struct work_struct work
;
2204 struct switchdev_notifier_fdb_info fdb_info
;
2205 struct net_device
*dev
;
2206 unsigned long event
;
2209 static void dpaa2_switch_event_work(struct work_struct
*work
)
2211 struct ethsw_switchdev_event_work
*switchdev_work
=
2212 container_of(work
, struct ethsw_switchdev_event_work
, work
);
2213 struct net_device
*dev
= switchdev_work
->dev
;
2214 struct switchdev_notifier_fdb_info
*fdb_info
;
2218 fdb_info
= &switchdev_work
->fdb_info
;
2220 switch (switchdev_work
->event
) {
2221 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
2222 if (!fdb_info
->added_by_user
|| fdb_info
->is_local
)
2224 if (is_unicast_ether_addr(fdb_info
->addr
))
2225 err
= dpaa2_switch_port_fdb_add_uc(netdev_priv(dev
),
2228 err
= dpaa2_switch_port_fdb_add_mc(netdev_priv(dev
),
2232 fdb_info
->offloaded
= true;
2233 call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED
, dev
,
2234 &fdb_info
->info
, NULL
);
2236 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
2237 if (!fdb_info
->added_by_user
|| fdb_info
->is_local
)
2239 if (is_unicast_ether_addr(fdb_info
->addr
))
2240 dpaa2_switch_port_fdb_del_uc(netdev_priv(dev
), fdb_info
->addr
);
2242 dpaa2_switch_port_fdb_del_mc(netdev_priv(dev
), fdb_info
->addr
);
2247 kfree(switchdev_work
->fdb_info
.addr
);
2248 kfree(switchdev_work
);
2252 /* Called under rcu_read_lock() */
2253 static int dpaa2_switch_port_event(struct notifier_block
*nb
,
2254 unsigned long event
, void *ptr
)
2256 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
2257 struct ethsw_port_priv
*port_priv
= netdev_priv(dev
);
2258 struct ethsw_switchdev_event_work
*switchdev_work
;
2259 struct switchdev_notifier_fdb_info
*fdb_info
= ptr
;
2260 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
2262 if (event
== SWITCHDEV_PORT_ATTR_SET
)
2263 return dpaa2_switch_port_attr_set_event(dev
, ptr
);
2265 if (!dpaa2_switch_port_dev_check(dev
))
2268 switchdev_work
= kzalloc(sizeof(*switchdev_work
), GFP_ATOMIC
);
2269 if (!switchdev_work
)
2272 INIT_WORK(&switchdev_work
->work
, dpaa2_switch_event_work
);
2273 switchdev_work
->dev
= dev
;
2274 switchdev_work
->event
= event
;
2277 case SWITCHDEV_FDB_ADD_TO_DEVICE
:
2278 case SWITCHDEV_FDB_DEL_TO_DEVICE
:
2279 memcpy(&switchdev_work
->fdb_info
, ptr
,
2280 sizeof(switchdev_work
->fdb_info
));
2281 switchdev_work
->fdb_info
.addr
= kzalloc(ETH_ALEN
, GFP_ATOMIC
);
2282 if (!switchdev_work
->fdb_info
.addr
)
2283 goto err_addr_alloc
;
2285 ether_addr_copy((u8
*)switchdev_work
->fdb_info
.addr
,
2288 /* Take a reference on the device to avoid being freed. */
2292 kfree(switchdev_work
);
2296 queue_work(ethsw
->workqueue
, &switchdev_work
->work
);
2301 kfree(switchdev_work
);
2305 static int dpaa2_switch_port_obj_event(unsigned long event
,
2306 struct net_device
*netdev
,
2307 struct switchdev_notifier_port_obj_info
*port_obj_info
)
2309 int err
= -EOPNOTSUPP
;
2311 if (!dpaa2_switch_port_dev_check(netdev
))
2315 case SWITCHDEV_PORT_OBJ_ADD
:
2316 err
= dpaa2_switch_port_obj_add(netdev
, port_obj_info
->obj
);
2318 case SWITCHDEV_PORT_OBJ_DEL
:
2319 err
= dpaa2_switch_port_obj_del(netdev
, port_obj_info
->obj
);
2323 port_obj_info
->handled
= true;
2324 return notifier_from_errno(err
);
2327 static int dpaa2_switch_port_blocking_event(struct notifier_block
*nb
,
2328 unsigned long event
, void *ptr
)
2330 struct net_device
*dev
= switchdev_notifier_info_to_dev(ptr
);
2333 case SWITCHDEV_PORT_OBJ_ADD
:
2334 case SWITCHDEV_PORT_OBJ_DEL
:
2335 return dpaa2_switch_port_obj_event(event
, dev
, ptr
);
2336 case SWITCHDEV_PORT_ATTR_SET
:
2337 return dpaa2_switch_port_attr_set_event(dev
, ptr
);
2343 /* Build a linear skb based on a single-buffer frame descriptor */
2344 static struct sk_buff
*dpaa2_switch_build_linear_skb(struct ethsw_core
*ethsw
,
2345 const struct dpaa2_fd
*fd
)
2347 u16 fd_offset
= dpaa2_fd_get_offset(fd
);
2348 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
2349 u32 fd_length
= dpaa2_fd_get_len(fd
);
2350 struct device
*dev
= ethsw
->dev
;
2351 struct sk_buff
*skb
= NULL
;
2354 fd_vaddr
= dpaa2_iova_to_virt(ethsw
->iommu_domain
, addr
);
2355 dma_unmap_page(dev
, addr
, DPAA2_SWITCH_RX_BUF_SIZE
,
2358 skb
= build_skb(fd_vaddr
, DPAA2_SWITCH_RX_BUF_SIZE
+
2359 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)));
2360 if (unlikely(!skb
)) {
2361 dev_err(dev
, "build_skb() failed\n");
2365 skb_reserve(skb
, fd_offset
);
2366 skb_put(skb
, fd_length
);
2373 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq
*fq
,
2374 const struct dpaa2_fd
*fd
)
2376 dpaa2_switch_free_fd(fq
->ethsw
, fd
);
2379 static void dpaa2_switch_rx(struct dpaa2_switch_fq
*fq
,
2380 const struct dpaa2_fd
*fd
)
2382 struct ethsw_core
*ethsw
= fq
->ethsw
;
2383 struct ethsw_port_priv
*port_priv
;
2384 struct net_device
*netdev
;
2385 struct vlan_ethhdr
*hdr
;
2386 struct sk_buff
*skb
;
2390 /* get switch ingress interface ID */
2391 if_id
= upper_32_bits(dpaa2_fd_get_flc(fd
)) & 0x0000FFFF;
2393 if (if_id
>= ethsw
->sw_attr
.num_ifs
) {
2394 dev_err(ethsw
->dev
, "Frame received from unknown interface!\n");
2397 port_priv
= ethsw
->ports
[if_id
];
2398 netdev
= port_priv
->netdev
;
2400 /* build the SKB based on the FD received */
2401 if (dpaa2_fd_get_format(fd
) != dpaa2_fd_single
) {
2402 if (net_ratelimit()) {
2403 netdev_err(netdev
, "Received invalid frame format\n");
2408 skb
= dpaa2_switch_build_linear_skb(ethsw
, fd
);
2412 skb_reset_mac_header(skb
);
2414 /* Remove the VLAN header if the packet that we just received has a vid
2415 * equal to the port PVIDs. Since the dpaa2-switch can operate only in
2416 * VLAN-aware mode and no alterations are made on the packet when it's
2417 * redirected/mirrored to the control interface, we are sure that there
2418 * will always be a VLAN header present.
2420 hdr
= vlan_eth_hdr(skb
);
2421 vid
= ntohs(hdr
->h_vlan_TCI
) & VLAN_VID_MASK
;
2422 if (vid
== port_priv
->pvid
) {
2423 err
= __skb_vlan_pop(skb
, &vlan_tci
);
2425 dev_info(ethsw
->dev
, "__skb_vlan_pop() returned %d", err
);
2431 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
2433 /* Setup the offload_fwd_mark only if the port is under a bridge */
2434 skb
->offload_fwd_mark
= !!(port_priv
->fdb
->bridge_dev
);
2436 netif_receive_skb(skb
);
2441 dpaa2_switch_free_fd(ethsw
, fd
);
2444 static void dpaa2_switch_detect_features(struct ethsw_core
*ethsw
)
2446 ethsw
->features
= 0;
2448 if (ethsw
->major
> 8 || (ethsw
->major
== 8 && ethsw
->minor
>= 6))
2449 ethsw
->features
|= ETHSW_FEATURE_MAC_ADDR
;
2452 static int dpaa2_switch_setup_fqs(struct ethsw_core
*ethsw
)
2454 struct dpsw_ctrl_if_attr ctrl_if_attr
;
2455 struct device
*dev
= ethsw
->dev
;
2459 err
= dpsw_ctrl_if_get_attributes(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
2462 dev_err(dev
, "dpsw_ctrl_if_get_attributes() = %d\n", err
);
2466 ethsw
->fq
[i
].fqid
= ctrl_if_attr
.rx_fqid
;
2467 ethsw
->fq
[i
].ethsw
= ethsw
;
2468 ethsw
->fq
[i
++].type
= DPSW_QUEUE_RX
;
2470 ethsw
->fq
[i
].fqid
= ctrl_if_attr
.tx_err_conf_fqid
;
2471 ethsw
->fq
[i
].ethsw
= ethsw
;
2472 ethsw
->fq
[i
++].type
= DPSW_QUEUE_TX_ERR_CONF
;
2477 /* Free buffers acquired from the buffer pool or which were meant to
2478 * be released in the pool
2480 static void dpaa2_switch_free_bufs(struct ethsw_core
*ethsw
, u64
*buf_array
, int count
)
2482 struct device
*dev
= ethsw
->dev
;
2486 for (i
= 0; i
< count
; i
++) {
2487 vaddr
= dpaa2_iova_to_virt(ethsw
->iommu_domain
, buf_array
[i
]);
2488 dma_unmap_page(dev
, buf_array
[i
], DPAA2_SWITCH_RX_BUF_SIZE
,
2490 free_pages((unsigned long)vaddr
, 0);
2494 /* Perform a single release command to add buffers
2495 * to the specified buffer pool
2497 static int dpaa2_switch_add_bufs(struct ethsw_core
*ethsw
, u16 bpid
)
2499 struct device
*dev
= ethsw
->dev
;
2500 u64 buf_array
[BUFS_PER_CMD
];
2507 for (i
= 0; i
< BUFS_PER_CMD
; i
++) {
2508 /* Allocate one page for each Rx buffer. WRIOP sees
2509 * the entire page except for a tailroom reserved for
2512 page
= dev_alloc_pages(0);
2514 dev_err(dev
, "buffer allocation failed\n");
2518 addr
= dma_map_page(dev
, page
, 0, DPAA2_SWITCH_RX_BUF_SIZE
,
2520 if (dma_mapping_error(dev
, addr
)) {
2521 dev_err(dev
, "dma_map_single() failed\n");
2524 buf_array
[i
] = addr
;
2528 /* In case the portal is busy, retry until successful or
2531 while ((err
= dpaa2_io_service_release(NULL
, bpid
,
2532 buf_array
, i
)) == -EBUSY
) {
2533 if (retries
++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES
)
2539 /* If release command failed, clean up and bail out. */
2541 dpaa2_switch_free_bufs(ethsw
, buf_array
, i
);
2548 __free_pages(page
, 0);
2550 /* If we managed to allocate at least some buffers,
2551 * release them to hardware
2559 static int dpaa2_switch_refill_bp(struct ethsw_core
*ethsw
)
2561 int *count
= ðsw
->buf_count
;
2565 if (unlikely(*count
< DPAA2_ETHSW_REFILL_THRESH
)) {
2567 new_count
= dpaa2_switch_add_bufs(ethsw
, ethsw
->bpid
);
2568 if (unlikely(!new_count
)) {
2569 /* Out of memory; abort for now, we'll
2574 *count
+= new_count
;
2575 } while (*count
< DPAA2_ETHSW_NUM_BUFS
);
2577 if (unlikely(*count
< DPAA2_ETHSW_NUM_BUFS
))
2584 static int dpaa2_switch_seed_bp(struct ethsw_core
*ethsw
)
2588 for (i
= 0; i
< DPAA2_ETHSW_NUM_BUFS
; i
+= BUFS_PER_CMD
) {
2589 count
= ðsw
->buf_count
;
2590 *count
+= dpaa2_switch_add_bufs(ethsw
, ethsw
->bpid
);
2592 if (unlikely(*count
< BUFS_PER_CMD
))
2599 static void dpaa2_switch_drain_bp(struct ethsw_core
*ethsw
)
2601 u64 buf_array
[BUFS_PER_CMD
];
2605 ret
= dpaa2_io_service_acquire(NULL
, ethsw
->bpid
,
2606 buf_array
, BUFS_PER_CMD
);
2609 "dpaa2_io_service_acquire() = %d\n", ret
);
2612 dpaa2_switch_free_bufs(ethsw
, buf_array
, ret
);
2617 static int dpaa2_switch_setup_dpbp(struct ethsw_core
*ethsw
)
2619 struct dpsw_ctrl_if_pools_cfg dpsw_ctrl_if_pools_cfg
= { 0 };
2620 struct device
*dev
= ethsw
->dev
;
2621 struct fsl_mc_device
*dpbp_dev
;
2622 struct dpbp_attr dpbp_attrs
;
2625 err
= fsl_mc_object_allocate(to_fsl_mc_device(dev
), FSL_MC_POOL_DPBP
,
2629 err
= -EPROBE_DEFER
;
2631 dev_err(dev
, "DPBP device allocation failed\n");
2634 ethsw
->dpbp_dev
= dpbp_dev
;
2636 err
= dpbp_open(ethsw
->mc_io
, 0, dpbp_dev
->obj_desc
.id
,
2637 &dpbp_dev
->mc_handle
);
2639 dev_err(dev
, "dpbp_open() failed\n");
2643 err
= dpbp_reset(ethsw
->mc_io
, 0, dpbp_dev
->mc_handle
);
2645 dev_err(dev
, "dpbp_reset() failed\n");
2649 err
= dpbp_enable(ethsw
->mc_io
, 0, dpbp_dev
->mc_handle
);
2651 dev_err(dev
, "dpbp_enable() failed\n");
2655 err
= dpbp_get_attributes(ethsw
->mc_io
, 0, dpbp_dev
->mc_handle
,
2658 dev_err(dev
, "dpbp_get_attributes() failed\n");
2662 dpsw_ctrl_if_pools_cfg
.num_dpbp
= 1;
2663 dpsw_ctrl_if_pools_cfg
.pools
[0].dpbp_id
= dpbp_attrs
.id
;
2664 dpsw_ctrl_if_pools_cfg
.pools
[0].buffer_size
= DPAA2_SWITCH_RX_BUF_SIZE
;
2665 dpsw_ctrl_if_pools_cfg
.pools
[0].backup_pool
= 0;
2667 err
= dpsw_ctrl_if_set_pools(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
2668 &dpsw_ctrl_if_pools_cfg
);
2670 dev_err(dev
, "dpsw_ctrl_if_set_pools() failed\n");
2673 ethsw
->bpid
= dpbp_attrs
.id
;
2678 dpbp_disable(ethsw
->mc_io
, 0, dpbp_dev
->mc_handle
);
2681 dpbp_close(ethsw
->mc_io
, 0, dpbp_dev
->mc_handle
);
2683 fsl_mc_object_free(dpbp_dev
);
2687 static void dpaa2_switch_free_dpbp(struct ethsw_core
*ethsw
)
2689 dpbp_disable(ethsw
->mc_io
, 0, ethsw
->dpbp_dev
->mc_handle
);
2690 dpbp_close(ethsw
->mc_io
, 0, ethsw
->dpbp_dev
->mc_handle
);
2691 fsl_mc_object_free(ethsw
->dpbp_dev
);
2694 static int dpaa2_switch_alloc_rings(struct ethsw_core
*ethsw
)
2698 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++) {
2699 ethsw
->fq
[i
].store
=
2700 dpaa2_io_store_create(DPAA2_SWITCH_STORE_SIZE
,
2702 if (!ethsw
->fq
[i
].store
) {
2703 dev_err(ethsw
->dev
, "dpaa2_io_store_create failed\n");
2705 dpaa2_io_store_destroy(ethsw
->fq
[i
].store
);
2713 static void dpaa2_switch_destroy_rings(struct ethsw_core
*ethsw
)
2717 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++)
2718 dpaa2_io_store_destroy(ethsw
->fq
[i
].store
);
2721 static int dpaa2_switch_pull_fq(struct dpaa2_switch_fq
*fq
)
2723 int err
, retries
= 0;
2725 /* Try to pull from the FQ while the portal is busy and we didn't hit
2726 * the maximum number fo retries
2729 err
= dpaa2_io_service_pull_fq(NULL
, fq
->fqid
, fq
->store
);
2731 } while (err
== -EBUSY
&& retries
++ < DPAA2_SWITCH_SWP_BUSY_RETRIES
);
2734 dev_err(fq
->ethsw
->dev
, "dpaa2_io_service_pull err %d", err
);
2739 /* Consume all frames pull-dequeued into the store */
2740 static int dpaa2_switch_store_consume(struct dpaa2_switch_fq
*fq
)
2742 struct ethsw_core
*ethsw
= fq
->ethsw
;
2743 int cleaned
= 0, is_last
;
2744 struct dpaa2_dq
*dq
;
2748 /* Get the next available FD from the store */
2749 dq
= dpaa2_io_store_next(fq
->store
, &is_last
);
2750 if (unlikely(!dq
)) {
2751 if (retries
++ >= DPAA2_SWITCH_SWP_BUSY_RETRIES
) {
2752 dev_err_once(ethsw
->dev
,
2753 "No valid dequeue response\n");
2759 if (fq
->type
== DPSW_QUEUE_RX
)
2760 dpaa2_switch_rx(fq
, dpaa2_dq_fd(dq
));
2762 dpaa2_switch_tx_conf(fq
, dpaa2_dq_fd(dq
));
2770 /* NAPI poll routine */
2771 static int dpaa2_switch_poll(struct napi_struct
*napi
, int budget
)
2773 int err
, cleaned
= 0, store_cleaned
, work_done
;
2774 struct dpaa2_switch_fq
*fq
;
2777 fq
= container_of(napi
, struct dpaa2_switch_fq
, napi
);
2780 err
= dpaa2_switch_pull_fq(fq
);
2784 /* Refill pool if appropriate */
2785 dpaa2_switch_refill_bp(fq
->ethsw
);
2787 store_cleaned
= dpaa2_switch_store_consume(fq
);
2788 cleaned
+= store_cleaned
;
2790 if (cleaned
>= budget
) {
2795 } while (store_cleaned
);
2797 /* We didn't consume the entire budget, so finish napi and re-enable
2798 * data availability notifications
2800 napi_complete_done(napi
, cleaned
);
2802 err
= dpaa2_io_service_rearm(NULL
, &fq
->nctx
);
2804 } while (err
== -EBUSY
&& retries
++ < DPAA2_SWITCH_SWP_BUSY_RETRIES
);
2806 work_done
= max(cleaned
, 1);
2812 static void dpaa2_switch_fqdan_cb(struct dpaa2_io_notification_ctx
*nctx
)
2814 struct dpaa2_switch_fq
*fq
;
2816 fq
= container_of(nctx
, struct dpaa2_switch_fq
, nctx
);
2818 napi_schedule(&fq
->napi
);
2821 static int dpaa2_switch_setup_dpio(struct ethsw_core
*ethsw
)
2823 struct dpsw_ctrl_if_queue_cfg queue_cfg
;
2824 struct dpaa2_io_notification_ctx
*nctx
;
2827 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++) {
2828 nctx
= ðsw
->fq
[i
].nctx
;
2830 /* Register a new software context for the FQID.
2831 * By using NULL as the first parameter, we specify that we do
2832 * not care on which cpu are interrupts received for this queue
2835 nctx
->id
= ethsw
->fq
[i
].fqid
;
2836 nctx
->desired_cpu
= DPAA2_IO_ANY_CPU
;
2837 nctx
->cb
= dpaa2_switch_fqdan_cb
;
2838 err
= dpaa2_io_service_register(NULL
, nctx
, ethsw
->dev
);
2840 err
= -EPROBE_DEFER
;
2844 queue_cfg
.options
= DPSW_CTRL_IF_QUEUE_OPT_DEST
|
2845 DPSW_CTRL_IF_QUEUE_OPT_USER_CTX
;
2846 queue_cfg
.dest_cfg
.dest_type
= DPSW_CTRL_IF_DEST_DPIO
;
2847 queue_cfg
.dest_cfg
.dest_id
= nctx
->dpio_id
;
2848 queue_cfg
.dest_cfg
.priority
= 0;
2849 queue_cfg
.user_ctx
= nctx
->qman64
;
2851 err
= dpsw_ctrl_if_set_queue(ethsw
->mc_io
, 0,
2862 dpaa2_io_service_deregister(NULL
, nctx
, ethsw
->dev
);
2864 for (j
= 0; j
< i
; j
++)
2865 dpaa2_io_service_deregister(NULL
, ðsw
->fq
[j
].nctx
,
2871 static void dpaa2_switch_free_dpio(struct ethsw_core
*ethsw
)
2875 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++)
2876 dpaa2_io_service_deregister(NULL
, ðsw
->fq
[i
].nctx
,
2880 static int dpaa2_switch_ctrl_if_setup(struct ethsw_core
*ethsw
)
2884 /* setup FQs for Rx and Tx Conf */
2885 err
= dpaa2_switch_setup_fqs(ethsw
);
2889 /* setup the buffer pool needed on the Rx path */
2890 err
= dpaa2_switch_setup_dpbp(ethsw
);
2894 err
= dpaa2_switch_alloc_rings(ethsw
);
2898 err
= dpaa2_switch_setup_dpio(ethsw
);
2900 goto err_destroy_rings
;
2902 err
= dpaa2_switch_seed_bp(ethsw
);
2904 goto err_deregister_dpio
;
2906 err
= dpsw_ctrl_if_enable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
2908 dev_err(ethsw
->dev
, "dpsw_ctrl_if_enable err %d\n", err
);
2909 goto err_drain_dpbp
;
2915 dpaa2_switch_drain_bp(ethsw
);
2916 err_deregister_dpio
:
2917 dpaa2_switch_free_dpio(ethsw
);
2919 dpaa2_switch_destroy_rings(ethsw
);
2921 dpaa2_switch_free_dpbp(ethsw
);
2926 static int dpaa2_switch_init(struct fsl_mc_device
*sw_dev
)
2928 struct device
*dev
= &sw_dev
->dev
;
2929 struct ethsw_core
*ethsw
= dev_get_drvdata(dev
);
2930 struct dpsw_vlan_if_cfg vcfg
= {0};
2931 struct dpsw_tci_cfg tci_cfg
= {0};
2932 struct dpsw_stp_cfg stp_cfg
;
2936 ethsw
->dev_id
= sw_dev
->obj_desc
.id
;
2938 err
= dpsw_open(ethsw
->mc_io
, 0, ethsw
->dev_id
, ðsw
->dpsw_handle
);
2940 dev_err(dev
, "dpsw_open err %d\n", err
);
2944 err
= dpsw_get_attributes(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
2947 dev_err(dev
, "dpsw_get_attributes err %d\n", err
);
2951 err
= dpsw_get_api_version(ethsw
->mc_io
, 0,
2955 dev_err(dev
, "dpsw_get_api_version err %d\n", err
);
2959 /* Minimum supported DPSW version check */
2960 if (ethsw
->major
< DPSW_MIN_VER_MAJOR
||
2961 (ethsw
->major
== DPSW_MIN_VER_MAJOR
&&
2962 ethsw
->minor
< DPSW_MIN_VER_MINOR
)) {
2963 dev_err(dev
, "DPSW version %d:%d not supported. Use firmware 10.28.0 or greater.\n",
2964 ethsw
->major
, ethsw
->minor
);
2969 if (!dpaa2_switch_supports_cpu_traffic(ethsw
)) {
2974 dpaa2_switch_detect_features(ethsw
);
2976 err
= dpsw_reset(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
2978 dev_err(dev
, "dpsw_reset err %d\n", err
);
2982 stp_cfg
.vlan_id
= DEFAULT_VLAN_ID
;
2983 stp_cfg
.state
= DPSW_STP_STATE_FORWARDING
;
2985 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++) {
2986 err
= dpsw_if_disable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, i
);
2988 dev_err(dev
, "dpsw_if_disable err %d\n", err
);
2992 err
= dpsw_if_set_stp(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, i
,
2995 dev_err(dev
, "dpsw_if_set_stp err %d for port %d\n",
3000 /* Switch starts with all ports configured to VLAN 1. Need to
3001 * remove this setting to allow configuration at bridge join
3005 err
= dpsw_vlan_remove_if_untagged(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
3006 DEFAULT_VLAN_ID
, &vcfg
);
3008 dev_err(dev
, "dpsw_vlan_remove_if_untagged err %d\n",
3013 tci_cfg
.vlan_id
= 4095;
3014 err
= dpsw_if_set_tci(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, i
, &tci_cfg
);
3016 dev_err(dev
, "dpsw_if_set_tci err %d\n", err
);
3020 err
= dpsw_vlan_remove_if(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
3021 DEFAULT_VLAN_ID
, &vcfg
);
3023 dev_err(dev
, "dpsw_vlan_remove_if err %d\n", err
);
3028 err
= dpsw_vlan_remove(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, DEFAULT_VLAN_ID
);
3030 dev_err(dev
, "dpsw_vlan_remove err %d\n", err
);
3034 ethsw
->workqueue
= alloc_ordered_workqueue("%s_%d_ordered",
3035 WQ_MEM_RECLAIM
, "ethsw",
3037 if (!ethsw
->workqueue
) {
3042 err
= dpsw_fdb_remove(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
, 0);
3044 goto err_destroy_ordered_workqueue
;
3046 err
= dpaa2_switch_ctrl_if_setup(ethsw
);
3048 goto err_destroy_ordered_workqueue
;
3052 err_destroy_ordered_workqueue
:
3053 destroy_workqueue(ethsw
->workqueue
);
3056 dpsw_close(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
3060 /* Add an ACL to redirect frames with specific destination MAC address to
3063 static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv
*port_priv
,
3066 struct dpaa2_switch_acl_entry acl_entry
= {0};
3068 /* Match on the destination MAC address */
3069 ether_addr_copy(acl_entry
.key
.match
.l2_dest_mac
, mac
);
3070 eth_broadcast_addr(acl_entry
.key
.mask
.l2_dest_mac
);
3073 acl_entry
.cfg
.precedence
= 0;
3074 acl_entry
.cfg
.result
.action
= DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF
;
3076 return dpaa2_switch_acl_entry_add(port_priv
->filter_block
, &acl_entry
);
3079 static int dpaa2_switch_port_init(struct ethsw_port_priv
*port_priv
, u16 port
)
3081 const char stpa
[ETH_ALEN
] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00};
3082 struct switchdev_obj_port_vlan vlan
= {
3083 .obj
.id
= SWITCHDEV_OBJ_ID_PORT_VLAN
,
3084 .vid
= DEFAULT_VLAN_ID
,
3085 .flags
= BRIDGE_VLAN_INFO_UNTAGGED
| BRIDGE_VLAN_INFO_PVID
,
3087 struct net_device
*netdev
= port_priv
->netdev
;
3088 struct ethsw_core
*ethsw
= port_priv
->ethsw_data
;
3089 struct dpaa2_switch_filter_block
*filter_block
;
3090 struct dpsw_fdb_cfg fdb_cfg
= {0};
3091 struct dpsw_if_attr dpsw_if_attr
;
3092 struct dpaa2_switch_fdb
*fdb
;
3093 struct dpsw_acl_cfg acl_cfg
;
3094 u16 fdb_id
, acl_tbl_id
;
3097 /* Get the Tx queue for this specific port */
3098 err
= dpsw_if_get_attributes(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
3099 port_priv
->idx
, &dpsw_if_attr
);
3101 netdev_err(netdev
, "dpsw_if_get_attributes err %d\n", err
);
3104 port_priv
->tx_qdid
= dpsw_if_attr
.qdid
;
3106 /* Create a FDB table for this particular switch port */
3107 fdb_cfg
.num_fdb_entries
= ethsw
->sw_attr
.max_fdb_entries
/ ethsw
->sw_attr
.num_ifs
;
3108 err
= dpsw_fdb_add(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
3111 netdev_err(netdev
, "dpsw_fdb_add err %d\n", err
);
3115 /* Find an unused dpaa2_switch_fdb structure and use it */
3116 fdb
= dpaa2_switch_fdb_get_unused(ethsw
);
3117 fdb
->fdb_id
= fdb_id
;
3119 fdb
->bridge_dev
= NULL
;
3120 port_priv
->fdb
= fdb
;
3122 /* We need to add VLAN 1 as the PVID on this port until it is under a
3123 * bridge since the DPAA2 switch is not able to handle the traffic in a
3124 * VLAN unaware fashion
3126 err
= dpaa2_switch_port_vlans_add(netdev
, &vlan
);
3130 /* Setup the egress flooding domains (broadcast, unknown unicast */
3131 err
= dpaa2_switch_fdb_set_egress_flood(ethsw
, port_priv
->fdb
->fdb_id
);
3135 /* Create an ACL table to be used by this switch port */
3136 acl_cfg
.max_entries
= DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES
;
3137 err
= dpsw_acl_add(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
,
3138 &acl_tbl_id
, &acl_cfg
);
3140 netdev_err(netdev
, "dpsw_acl_add err %d\n", err
);
3144 filter_block
= dpaa2_switch_filter_block_get_unused(ethsw
);
3145 filter_block
->ethsw
= ethsw
;
3146 filter_block
->acl_id
= acl_tbl_id
;
3147 filter_block
->in_use
= true;
3148 filter_block
->num_acl_rules
= 0;
3149 INIT_LIST_HEAD(&filter_block
->acl_entries
);
3150 INIT_LIST_HEAD(&filter_block
->mirror_entries
);
3152 err
= dpaa2_switch_port_acl_tbl_bind(port_priv
, filter_block
);
3156 err
= dpaa2_switch_port_trap_mac_addr(port_priv
, stpa
);
3163 static void dpaa2_switch_ctrl_if_teardown(struct ethsw_core
*ethsw
)
3165 dpsw_ctrl_if_disable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
3166 dpaa2_switch_free_dpio(ethsw
);
3167 dpaa2_switch_destroy_rings(ethsw
);
3168 dpaa2_switch_drain_bp(ethsw
);
3169 dpaa2_switch_free_dpbp(ethsw
);
3172 static void dpaa2_switch_teardown(struct fsl_mc_device
*sw_dev
)
3174 struct device
*dev
= &sw_dev
->dev
;
3175 struct ethsw_core
*ethsw
= dev_get_drvdata(dev
);
3178 dpaa2_switch_ctrl_if_teardown(ethsw
);
3180 destroy_workqueue(ethsw
->workqueue
);
3182 err
= dpsw_close(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
3184 dev_warn(dev
, "dpsw_close err %d\n", err
);
3187 static int dpaa2_switch_remove(struct fsl_mc_device
*sw_dev
)
3189 struct ethsw_port_priv
*port_priv
;
3190 struct ethsw_core
*ethsw
;
3195 ethsw
= dev_get_drvdata(dev
);
3197 dpaa2_switch_teardown_irqs(sw_dev
);
3199 dpsw_disable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
3201 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++) {
3202 port_priv
= ethsw
->ports
[i
];
3203 unregister_netdev(port_priv
->netdev
);
3204 dpaa2_switch_port_disconnect_mac(port_priv
);
3205 free_netdev(port_priv
->netdev
);
3209 kfree(ethsw
->filter_blocks
);
3210 kfree(ethsw
->ports
);
3212 dpaa2_switch_teardown(sw_dev
);
3214 fsl_mc_portal_free(ethsw
->mc_io
);
3218 dev_set_drvdata(dev
, NULL
);
3223 static int dpaa2_switch_probe_port(struct ethsw_core
*ethsw
,
3226 struct ethsw_port_priv
*port_priv
;
3227 struct device
*dev
= ethsw
->dev
;
3228 struct net_device
*port_netdev
;
3231 port_netdev
= alloc_etherdev(sizeof(struct ethsw_port_priv
));
3233 dev_err(dev
, "alloc_etherdev error\n");
3237 port_priv
= netdev_priv(port_netdev
);
3238 port_priv
->netdev
= port_netdev
;
3239 port_priv
->ethsw_data
= ethsw
;
3241 port_priv
->idx
= port_idx
;
3242 port_priv
->stp_state
= BR_STATE_FORWARDING
;
3244 SET_NETDEV_DEV(port_netdev
, dev
);
3245 port_netdev
->netdev_ops
= &dpaa2_switch_port_ops
;
3246 port_netdev
->ethtool_ops
= &dpaa2_switch_port_ethtool_ops
;
3248 port_netdev
->needed_headroom
= DPAA2_SWITCH_NEEDED_HEADROOM
;
3250 port_priv
->bcast_flood
= true;
3251 port_priv
->ucast_flood
= true;
3253 /* Set MTU limits */
3254 port_netdev
->min_mtu
= ETH_MIN_MTU
;
3255 port_netdev
->max_mtu
= ETHSW_MAX_FRAME_LENGTH
;
3257 /* Populate the private port structure so that later calls to
3258 * dpaa2_switch_port_init() can use it.
3260 ethsw
->ports
[port_idx
] = port_priv
;
3262 /* The DPAA2 switch's ingress path depends on the VLAN table,
3263 * thus we are not able to disable VLAN filtering.
3265 port_netdev
->features
= NETIF_F_HW_VLAN_CTAG_FILTER
|
3266 NETIF_F_HW_VLAN_STAG_FILTER
|
3269 err
= dpaa2_switch_port_init(port_priv
, port_idx
);
3271 goto err_port_probe
;
3273 err
= dpaa2_switch_port_set_mac_addr(port_priv
);
3275 goto err_port_probe
;
3277 err
= dpaa2_switch_port_set_learning(port_priv
, false);
3279 goto err_port_probe
;
3280 port_priv
->learn_ena
= false;
3282 err
= dpaa2_switch_port_connect_mac(port_priv
);
3284 goto err_port_probe
;
3289 free_netdev(port_netdev
);
3290 ethsw
->ports
[port_idx
] = NULL
;
3295 static int dpaa2_switch_probe(struct fsl_mc_device
*sw_dev
)
3297 struct device
*dev
= &sw_dev
->dev
;
3298 struct ethsw_core
*ethsw
;
3301 /* Allocate switch core*/
3302 ethsw
= kzalloc(sizeof(*ethsw
), GFP_KERNEL
);
3308 ethsw
->iommu_domain
= iommu_get_domain_for_dev(dev
);
3309 dev_set_drvdata(dev
, ethsw
);
3311 err
= fsl_mc_portal_allocate(sw_dev
, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL
,
3315 err
= -EPROBE_DEFER
;
3317 dev_err(dev
, "fsl_mc_portal_allocate err %d\n", err
);
3318 goto err_free_drvdata
;
3321 err
= dpaa2_switch_init(sw_dev
);
3323 goto err_free_cmdport
;
3325 ethsw
->ports
= kcalloc(ethsw
->sw_attr
.num_ifs
, sizeof(*ethsw
->ports
),
3327 if (!(ethsw
->ports
)) {
3332 ethsw
->fdbs
= kcalloc(ethsw
->sw_attr
.num_ifs
, sizeof(*ethsw
->fdbs
),
3336 goto err_free_ports
;
3339 ethsw
->filter_blocks
= kcalloc(ethsw
->sw_attr
.num_ifs
,
3340 sizeof(*ethsw
->filter_blocks
),
3342 if (!ethsw
->filter_blocks
) {
3347 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++) {
3348 err
= dpaa2_switch_probe_port(ethsw
, i
);
3350 goto err_free_netdev
;
3353 /* Add a NAPI instance for each of the Rx queues. The first port's
3354 * net_device will be associated with the instances since we do not have
3355 * different queues for each switch ports.
3357 for (i
= 0; i
< DPAA2_SWITCH_RX_NUM_FQS
; i
++)
3358 netif_napi_add(ethsw
->ports
[0]->netdev
,
3359 ðsw
->fq
[i
].napi
, dpaa2_switch_poll
,
3363 err
= dpaa2_switch_setup_irqs(sw_dev
);
3367 /* By convention, if the mirror port is equal to the number of switch
3368 * interfaces, then mirroring of any kind is disabled.
3370 ethsw
->mirror_port
= ethsw
->sw_attr
.num_ifs
;
3372 /* Register the netdev only when the entire setup is done and the
3373 * switch port interfaces are ready to receive traffic
3375 for (i
= 0; i
< ethsw
->sw_attr
.num_ifs
; i
++) {
3376 err
= register_netdev(ethsw
->ports
[i
]->netdev
);
3378 dev_err(dev
, "register_netdev error %d\n", err
);
3379 goto err_unregister_ports
;
3385 err_unregister_ports
:
3386 for (i
--; i
>= 0; i
--)
3387 unregister_netdev(ethsw
->ports
[i
]->netdev
);
3388 dpaa2_switch_teardown_irqs(sw_dev
);
3390 dpsw_disable(ethsw
->mc_io
, 0, ethsw
->dpsw_handle
);
3392 for (i
--; i
>= 0; i
--)
3393 free_netdev(ethsw
->ports
[i
]->netdev
);
3394 kfree(ethsw
->filter_blocks
);
3398 kfree(ethsw
->ports
);
3401 dpaa2_switch_teardown(sw_dev
);
3404 fsl_mc_portal_free(ethsw
->mc_io
);
3408 dev_set_drvdata(dev
, NULL
);
3413 static const struct fsl_mc_device_id dpaa2_switch_match_id_table
[] = {
3415 .vendor
= FSL_MC_VENDOR_FREESCALE
,
3420 MODULE_DEVICE_TABLE(fslmc
, dpaa2_switch_match_id_table
);
3422 static struct fsl_mc_driver dpaa2_switch_drv
= {
3424 .name
= KBUILD_MODNAME
,
3425 .owner
= THIS_MODULE
,
3427 .probe
= dpaa2_switch_probe
,
3428 .remove
= dpaa2_switch_remove
,
3429 .match_id_table
= dpaa2_switch_match_id_table
3432 static struct notifier_block dpaa2_switch_port_nb __read_mostly
= {
3433 .notifier_call
= dpaa2_switch_port_netdevice_event
,
3436 static struct notifier_block dpaa2_switch_port_switchdev_nb
= {
3437 .notifier_call
= dpaa2_switch_port_event
,
3440 static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb
= {
3441 .notifier_call
= dpaa2_switch_port_blocking_event
,
3444 static int dpaa2_switch_register_notifiers(void)
3448 err
= register_netdevice_notifier(&dpaa2_switch_port_nb
);
3450 pr_err("dpaa2-switch: failed to register net_device notifier (%d)\n", err
);
3454 err
= register_switchdev_notifier(&dpaa2_switch_port_switchdev_nb
);
3456 pr_err("dpaa2-switch: failed to register switchdev notifier (%d)\n", err
);
3457 goto err_switchdev_nb
;
3460 err
= register_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb
);
3462 pr_err("dpaa2-switch: failed to register switchdev blocking notifier (%d)\n", err
);
3463 goto err_switchdev_blocking_nb
;
3468 err_switchdev_blocking_nb
:
3469 unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb
);
3471 unregister_netdevice_notifier(&dpaa2_switch_port_nb
);
3476 static void dpaa2_switch_unregister_notifiers(void)
3480 err
= unregister_switchdev_blocking_notifier(&dpaa2_switch_port_switchdev_blocking_nb
);
3482 pr_err("dpaa2-switch: failed to unregister switchdev blocking notifier (%d)\n",
3485 err
= unregister_switchdev_notifier(&dpaa2_switch_port_switchdev_nb
);
3487 pr_err("dpaa2-switch: failed to unregister switchdev notifier (%d)\n", err
);
3489 err
= unregister_netdevice_notifier(&dpaa2_switch_port_nb
);
3491 pr_err("dpaa2-switch: failed to unregister net_device notifier (%d)\n", err
);
3494 static int __init
dpaa2_switch_driver_init(void)
3498 err
= fsl_mc_driver_register(&dpaa2_switch_drv
);
3502 err
= dpaa2_switch_register_notifiers();
3504 fsl_mc_driver_unregister(&dpaa2_switch_drv
);
3511 static void __exit
dpaa2_switch_driver_exit(void)
3513 dpaa2_switch_unregister_notifiers();
3514 fsl_mc_driver_unregister(&dpaa2_switch_drv
);
3517 module_init(dpaa2_switch_driver_init
);
3518 module_exit(dpaa2_switch_driver_exit
);
3520 MODULE_LICENSE("GPL v2");
3521 MODULE_DESCRIPTION("DPAA2 Ethernet Switch Driver");