1 /* QLogic qede NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/version.h>
12 #include <linux/device.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/skbuff.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/string.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/interrupt.h>
21 #include <asm/byteorder.h>
22 #include <asm/param.h>
24 #include <linux/netdev_features.h>
25 #include <linux/udp.h>
26 #include <linux/tcp.h>
27 #include <net/udp_tunnel.h>
31 #include <linux/if_ether.h>
32 #include <linux/if_vlan.h>
33 #include <linux/pkt_sched.h>
34 #include <linux/ethtool.h>
36 #include <linux/random.h>
37 #include <net/ip6_checksum.h>
38 #include <linux/bitops.h>
39 #include <linux/qed/qede_roce.h>
42 static char version
[] =
43 "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION
"\n";
45 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(DRV_MODULE_VERSION
);
50 module_param(debug
, uint
, 0);
51 MODULE_PARM_DESC(debug
, " Default debug msglevel");
53 static const struct qed_eth_ops
*qed_ops
;
55 #define CHIP_NUM_57980S_40 0x1634
56 #define CHIP_NUM_57980S_10 0x1666
57 #define CHIP_NUM_57980S_MF 0x1636
58 #define CHIP_NUM_57980S_100 0x1644
59 #define CHIP_NUM_57980S_50 0x1654
60 #define CHIP_NUM_57980S_25 0x1656
61 #define CHIP_NUM_57980S_IOV 0x1664
63 #ifndef PCI_DEVICE_ID_NX2_57980E
64 #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
65 #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
66 #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
67 #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
68 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
69 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
70 #define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
73 enum qede_pci_private
{
78 static const struct pci_device_id qede_pci_tbl
[] = {
79 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_40
), QEDE_PRIVATE_PF
},
80 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_10
), QEDE_PRIVATE_PF
},
81 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_MF
), QEDE_PRIVATE_PF
},
82 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_100
), QEDE_PRIVATE_PF
},
83 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_50
), QEDE_PRIVATE_PF
},
84 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_25
), QEDE_PRIVATE_PF
},
85 #ifdef CONFIG_QED_SRIOV
86 {PCI_VDEVICE(QLOGIC
, PCI_DEVICE_ID_57980S_IOV
), QEDE_PRIVATE_VF
},
91 MODULE_DEVICE_TABLE(pci
, qede_pci_tbl
);
93 static int qede_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
);
95 #define TX_TIMEOUT (5 * HZ)
97 static void qede_remove(struct pci_dev
*pdev
);
98 static int qede_alloc_rx_buffer(struct qede_dev
*edev
,
99 struct qede_rx_queue
*rxq
);
100 static void qede_link_update(void *dev
, struct qed_link_output
*link
);
102 #ifdef CONFIG_QED_SRIOV
103 static int qede_set_vf_vlan(struct net_device
*ndev
, int vf
, u16 vlan
, u8 qos
,
106 struct qede_dev
*edev
= netdev_priv(ndev
);
109 DP_NOTICE(edev
, "Illegal vlan value %d\n", vlan
);
113 if (vlan_proto
!= htons(ETH_P_8021Q
))
114 return -EPROTONOSUPPORT
;
116 DP_VERBOSE(edev
, QED_MSG_IOV
, "Setting Vlan 0x%04x to VF [%d]\n",
119 return edev
->ops
->iov
->set_vlan(edev
->cdev
, vlan
, vf
);
122 static int qede_set_vf_mac(struct net_device
*ndev
, int vfidx
, u8
*mac
)
124 struct qede_dev
*edev
= netdev_priv(ndev
);
126 DP_VERBOSE(edev
, QED_MSG_IOV
,
127 "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
128 mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5], vfidx
);
130 if (!is_valid_ether_addr(mac
)) {
131 DP_VERBOSE(edev
, QED_MSG_IOV
, "MAC address isn't valid\n");
135 return edev
->ops
->iov
->set_mac(edev
->cdev
, mac
, vfidx
);
138 static int qede_sriov_configure(struct pci_dev
*pdev
, int num_vfs_param
)
140 struct qede_dev
*edev
= netdev_priv(pci_get_drvdata(pdev
));
141 struct qed_dev_info
*qed_info
= &edev
->dev_info
.common
;
144 DP_VERBOSE(edev
, QED_MSG_IOV
, "Requested %d VFs\n", num_vfs_param
);
146 rc
= edev
->ops
->iov
->configure(edev
->cdev
, num_vfs_param
);
148 /* Enable/Disable Tx switching for PF */
149 if ((rc
== num_vfs_param
) && netif_running(edev
->ndev
) &&
150 qed_info
->mf_mode
!= QED_MF_NPAR
&& qed_info
->tx_switching
) {
151 struct qed_update_vport_params params
;
153 memset(¶ms
, 0, sizeof(params
));
155 params
.update_tx_switching_flg
= 1;
156 params
.tx_switching_flg
= num_vfs_param
? 1 : 0;
157 edev
->ops
->vport_update(edev
->cdev
, ¶ms
);
164 static struct pci_driver qede_pci_driver
= {
166 .id_table
= qede_pci_tbl
,
168 .remove
= qede_remove
,
169 #ifdef CONFIG_QED_SRIOV
170 .sriov_configure
= qede_sriov_configure
,
174 static void qede_force_mac(void *dev
, u8
*mac
, bool forced
)
176 struct qede_dev
*edev
= dev
;
178 /* MAC hints take effect only if we haven't set one already */
179 if (is_valid_ether_addr(edev
->ndev
->dev_addr
) && !forced
)
182 ether_addr_copy(edev
->ndev
->dev_addr
, mac
);
183 ether_addr_copy(edev
->primary_mac
, mac
);
186 static struct qed_eth_cb_ops qede_ll_ops
= {
188 .link_update
= qede_link_update
,
190 .force_mac
= qede_force_mac
,
193 static int qede_netdev_event(struct notifier_block
*this, unsigned long event
,
196 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
197 struct ethtool_drvinfo drvinfo
;
198 struct qede_dev
*edev
;
200 if (event
!= NETDEV_CHANGENAME
&& event
!= NETDEV_CHANGEADDR
)
203 /* Check whether this is a qede device */
204 if (!ndev
|| !ndev
->ethtool_ops
|| !ndev
->ethtool_ops
->get_drvinfo
)
207 memset(&drvinfo
, 0, sizeof(drvinfo
));
208 ndev
->ethtool_ops
->get_drvinfo(ndev
, &drvinfo
);
209 if (strcmp(drvinfo
.driver
, "qede"))
211 edev
= netdev_priv(ndev
);
214 case NETDEV_CHANGENAME
:
215 /* Notify qed of the name change */
216 if (!edev
->ops
|| !edev
->ops
->common
)
218 edev
->ops
->common
->set_id(edev
->cdev
, edev
->ndev
->name
, "qede");
220 case NETDEV_CHANGEADDR
:
221 edev
= netdev_priv(ndev
);
222 qede_roce_event_changeaddr(edev
);
230 static struct notifier_block qede_netdev_notifier
= {
231 .notifier_call
= qede_netdev_event
,
235 int __init
qede_init(void)
239 pr_info("qede_init: %s\n", version
);
241 qed_ops
= qed_get_eth_ops();
243 pr_notice("Failed to get qed ethtool operations\n");
247 /* Must register notifier before pci ops, since we might miss
248 * interface rename after pci probe and netdev registeration.
250 ret
= register_netdevice_notifier(&qede_netdev_notifier
);
252 pr_notice("Failed to register netdevice_notifier\n");
257 ret
= pci_register_driver(&qede_pci_driver
);
259 pr_notice("Failed to register driver\n");
260 unregister_netdevice_notifier(&qede_netdev_notifier
);
268 static void __exit
qede_cleanup(void)
270 if (debug
& QED_LOG_INFO_MASK
)
271 pr_info("qede_cleanup called\n");
273 unregister_netdevice_notifier(&qede_netdev_notifier
);
274 pci_unregister_driver(&qede_pci_driver
);
278 module_init(qede_init
);
279 module_exit(qede_cleanup
);
281 /* -------------------------------------------------------------------------
283 * -------------------------------------------------------------------------
286 /* Unmap the data and free skb */
287 static int qede_free_tx_pkt(struct qede_dev
*edev
,
288 struct qede_tx_queue
*txq
, int *len
)
290 u16 idx
= txq
->sw_tx_cons
& NUM_TX_BDS_MAX
;
291 struct sk_buff
*skb
= txq
->sw_tx_ring
[idx
].skb
;
292 struct eth_tx_1st_bd
*first_bd
;
293 struct eth_tx_bd
*tx_data_bd
;
294 int bds_consumed
= 0;
296 bool data_split
= txq
->sw_tx_ring
[idx
].flags
& QEDE_TSO_SPLIT_BD
;
297 int i
, split_bd_len
= 0;
299 if (unlikely(!skb
)) {
301 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
302 idx
, txq
->sw_tx_cons
, txq
->sw_tx_prod
);
308 first_bd
= (struct eth_tx_1st_bd
*)qed_chain_consume(&txq
->tx_pbl
);
312 nbds
= first_bd
->data
.nbds
;
315 struct eth_tx_bd
*split
= (struct eth_tx_bd
*)
316 qed_chain_consume(&txq
->tx_pbl
);
317 split_bd_len
= BD_UNMAP_LEN(split
);
320 dma_unmap_page(&edev
->pdev
->dev
, BD_UNMAP_ADDR(first_bd
),
321 BD_UNMAP_LEN(first_bd
) + split_bd_len
, DMA_TO_DEVICE
);
323 /* Unmap the data of the skb frags */
324 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++, bds_consumed
++) {
325 tx_data_bd
= (struct eth_tx_bd
*)
326 qed_chain_consume(&txq
->tx_pbl
);
327 dma_unmap_page(&edev
->pdev
->dev
, BD_UNMAP_ADDR(tx_data_bd
),
328 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
331 while (bds_consumed
++ < nbds
)
332 qed_chain_consume(&txq
->tx_pbl
);
335 dev_kfree_skb_any(skb
);
336 txq
->sw_tx_ring
[idx
].skb
= NULL
;
337 txq
->sw_tx_ring
[idx
].flags
= 0;
342 /* Unmap the data and free skb when mapping failed during start_xmit */
343 static void qede_free_failed_tx_pkt(struct qede_dev
*edev
,
344 struct qede_tx_queue
*txq
,
345 struct eth_tx_1st_bd
*first_bd
,
346 int nbd
, bool data_split
)
348 u16 idx
= txq
->sw_tx_prod
& NUM_TX_BDS_MAX
;
349 struct sk_buff
*skb
= txq
->sw_tx_ring
[idx
].skb
;
350 struct eth_tx_bd
*tx_data_bd
;
351 int i
, split_bd_len
= 0;
353 /* Return prod to its position before this skb was handled */
354 qed_chain_set_prod(&txq
->tx_pbl
,
355 le16_to_cpu(txq
->tx_db
.data
.bd_prod
), first_bd
);
357 first_bd
= (struct eth_tx_1st_bd
*)qed_chain_produce(&txq
->tx_pbl
);
360 struct eth_tx_bd
*split
= (struct eth_tx_bd
*)
361 qed_chain_produce(&txq
->tx_pbl
);
362 split_bd_len
= BD_UNMAP_LEN(split
);
366 dma_unmap_page(&edev
->pdev
->dev
, BD_UNMAP_ADDR(first_bd
),
367 BD_UNMAP_LEN(first_bd
) + split_bd_len
, DMA_TO_DEVICE
);
369 /* Unmap the data of the skb frags */
370 for (i
= 0; i
< nbd
; i
++) {
371 tx_data_bd
= (struct eth_tx_bd
*)
372 qed_chain_produce(&txq
->tx_pbl
);
373 if (tx_data_bd
->nbytes
)
374 dma_unmap_page(&edev
->pdev
->dev
,
375 BD_UNMAP_ADDR(tx_data_bd
),
376 BD_UNMAP_LEN(tx_data_bd
), DMA_TO_DEVICE
);
379 /* Return again prod to its position before this skb was handled */
380 qed_chain_set_prod(&txq
->tx_pbl
,
381 le16_to_cpu(txq
->tx_db
.data
.bd_prod
), first_bd
);
384 dev_kfree_skb_any(skb
);
385 txq
->sw_tx_ring
[idx
].skb
= NULL
;
386 txq
->sw_tx_ring
[idx
].flags
= 0;
389 static u32
qede_xmit_type(struct qede_dev
*edev
,
390 struct sk_buff
*skb
, int *ipv6_ext
)
392 u32 rc
= XMIT_L4_CSUM
;
395 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
398 l3_proto
= vlan_get_protocol(skb
);
399 if (l3_proto
== htons(ETH_P_IPV6
) &&
400 (ipv6_hdr(skb
)->nexthdr
== NEXTHDR_IPV6
))
403 if (skb
->encapsulation
) {
405 if (skb_is_gso(skb
)) {
406 unsigned short gso_type
= skb_shinfo(skb
)->gso_type
;
408 if ((gso_type
& SKB_GSO_UDP_TUNNEL_CSUM
) ||
409 (gso_type
& SKB_GSO_GRE_CSUM
))
410 rc
|= XMIT_ENC_GSO_L4_CSUM
;
423 static void qede_set_params_for_ipv6_ext(struct sk_buff
*skb
,
424 struct eth_tx_2nd_bd
*second_bd
,
425 struct eth_tx_3rd_bd
*third_bd
)
428 u16 bd2_bits1
= 0, bd2_bits2
= 0;
430 bd2_bits1
|= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT
);
432 bd2_bits2
|= ((((u8
*)skb_transport_header(skb
) - skb
->data
) >> 1) &
433 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK
)
434 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT
;
436 bd2_bits1
|= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH
<<
437 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT
);
439 if (vlan_get_protocol(skb
) == htons(ETH_P_IPV6
))
440 l4_proto
= ipv6_hdr(skb
)->nexthdr
;
442 l4_proto
= ip_hdr(skb
)->protocol
;
444 if (l4_proto
== IPPROTO_UDP
)
445 bd2_bits1
|= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT
;
448 third_bd
->data
.bitfields
|=
449 cpu_to_le16(((tcp_hdrlen(skb
) / 4) &
450 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK
) <<
451 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT
);
453 second_bd
->data
.bitfields1
= cpu_to_le16(bd2_bits1
);
454 second_bd
->data
.bitfields2
= cpu_to_le16(bd2_bits2
);
457 static int map_frag_to_bd(struct qede_dev
*edev
,
458 skb_frag_t
*frag
, struct eth_tx_bd
*bd
)
462 /* Map skb non-linear frag data for DMA */
463 mapping
= skb_frag_dma_map(&edev
->pdev
->dev
, frag
, 0,
464 skb_frag_size(frag
), DMA_TO_DEVICE
);
465 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
466 DP_NOTICE(edev
, "Unable to map frag - dropping packet\n");
470 /* Setup the data pointer of the frag data */
471 BD_SET_UNMAP_ADDR_LEN(bd
, mapping
, skb_frag_size(frag
));
476 static u16
qede_get_skb_hlen(struct sk_buff
*skb
, bool is_encap_pkt
)
479 return (skb_inner_transport_header(skb
) +
480 inner_tcp_hdrlen(skb
) - skb
->data
);
482 return (skb_transport_header(skb
) +
483 tcp_hdrlen(skb
) - skb
->data
);
486 /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
487 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
488 static bool qede_pkt_req_lin(struct qede_dev
*edev
, struct sk_buff
*skb
,
491 int allowed_frags
= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
- 1;
493 if (xmit_type
& XMIT_LSO
) {
496 hlen
= qede_get_skb_hlen(skb
, xmit_type
& XMIT_ENC
);
498 /* linear payload would require its own BD */
499 if (skb_headlen(skb
) > hlen
)
503 return (skb_shinfo(skb
)->nr_frags
> allowed_frags
);
507 static inline void qede_update_tx_producer(struct qede_tx_queue
*txq
)
509 /* wmb makes sure that the BDs data is updated before updating the
510 * producer, otherwise FW may read old data from the BDs.
514 writel(txq
->tx_db
.raw
, txq
->doorbell_addr
);
516 /* mmiowb is needed to synchronize doorbell writes from more than one
517 * processor. It guarantees that the write arrives to the device before
518 * the queue lock is released and another start_xmit is called (possibly
519 * on another CPU). Without this barrier, the next doorbell can bypass
520 * this doorbell. This is applicable to IA64/Altix systems.
525 /* Main transmit function */
526 static netdev_tx_t
qede_start_xmit(struct sk_buff
*skb
,
527 struct net_device
*ndev
)
529 struct qede_dev
*edev
= netdev_priv(ndev
);
530 struct netdev_queue
*netdev_txq
;
531 struct qede_tx_queue
*txq
;
532 struct eth_tx_1st_bd
*first_bd
;
533 struct eth_tx_2nd_bd
*second_bd
= NULL
;
534 struct eth_tx_3rd_bd
*third_bd
= NULL
;
535 struct eth_tx_bd
*tx_data_bd
= NULL
;
539 int rc
, frag_idx
= 0, ipv6_ext
= 0;
543 bool data_split
= false;
545 /* Get tx-queue context and netdev index */
546 txq_index
= skb_get_queue_mapping(skb
);
547 WARN_ON(txq_index
>= QEDE_TSS_COUNT(edev
));
548 txq
= QEDE_TX_QUEUE(edev
, txq_index
);
549 netdev_txq
= netdev_get_tx_queue(ndev
, txq_index
);
551 WARN_ON(qed_chain_get_elem_left(&txq
->tx_pbl
) < (MAX_SKB_FRAGS
+ 1));
553 xmit_type
= qede_xmit_type(edev
, skb
, &ipv6_ext
);
555 #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
556 if (qede_pkt_req_lin(edev
, skb
, xmit_type
)) {
557 if (skb_linearize(skb
)) {
559 "SKB linearization failed - silently dropping this SKB\n");
560 dev_kfree_skb_any(skb
);
566 /* Fill the entry in the SW ring and the BDs in the FW ring */
567 idx
= txq
->sw_tx_prod
& NUM_TX_BDS_MAX
;
568 txq
->sw_tx_ring
[idx
].skb
= skb
;
569 first_bd
= (struct eth_tx_1st_bd
*)
570 qed_chain_produce(&txq
->tx_pbl
);
571 memset(first_bd
, 0, sizeof(*first_bd
));
572 first_bd
->data
.bd_flags
.bitfields
=
573 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT
;
575 /* Map skb linear data for DMA and set in the first BD */
576 mapping
= dma_map_single(&edev
->pdev
->dev
, skb
->data
,
577 skb_headlen(skb
), DMA_TO_DEVICE
);
578 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
579 DP_NOTICE(edev
, "SKB mapping failed\n");
580 qede_free_failed_tx_pkt(edev
, txq
, first_bd
, 0, false);
581 qede_update_tx_producer(txq
);
585 BD_SET_UNMAP_ADDR_LEN(first_bd
, mapping
, skb_headlen(skb
));
587 /* In case there is IPv6 with extension headers or LSO we need 2nd and
590 if (unlikely((xmit_type
& XMIT_LSO
) | ipv6_ext
)) {
591 second_bd
= (struct eth_tx_2nd_bd
*)
592 qed_chain_produce(&txq
->tx_pbl
);
593 memset(second_bd
, 0, sizeof(*second_bd
));
596 third_bd
= (struct eth_tx_3rd_bd
*)
597 qed_chain_produce(&txq
->tx_pbl
);
598 memset(third_bd
, 0, sizeof(*third_bd
));
601 /* We need to fill in additional data in second_bd... */
602 tx_data_bd
= (struct eth_tx_bd
*)second_bd
;
605 if (skb_vlan_tag_present(skb
)) {
606 first_bd
->data
.vlan
= cpu_to_le16(skb_vlan_tag_get(skb
));
607 first_bd
->data
.bd_flags
.bitfields
|=
608 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT
;
611 /* Fill the parsing flags & params according to the requested offload */
612 if (xmit_type
& XMIT_L4_CSUM
) {
613 /* We don't re-calculate IP checksum as it is already done by
616 first_bd
->data
.bd_flags
.bitfields
|=
617 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT
;
619 if (xmit_type
& XMIT_ENC
) {
620 first_bd
->data
.bd_flags
.bitfields
|=
621 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT
;
622 first_bd
->data
.bitfields
|=
623 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT
;
626 /* Legacy FW had flipped behavior in regard to this bit -
627 * I.e., needed to set to prevent FW from touching encapsulated
628 * packets when it didn't need to.
630 if (unlikely(txq
->is_legacy
))
631 first_bd
->data
.bitfields
^=
632 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT
;
634 /* If the packet is IPv6 with extension header, indicate that
635 * to FW and pass few params, since the device cracker doesn't
636 * support parsing IPv6 with extension header/s.
638 if (unlikely(ipv6_ext
))
639 qede_set_params_for_ipv6_ext(skb
, second_bd
, third_bd
);
642 if (xmit_type
& XMIT_LSO
) {
643 first_bd
->data
.bd_flags
.bitfields
|=
644 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT
);
645 third_bd
->data
.lso_mss
=
646 cpu_to_le16(skb_shinfo(skb
)->gso_size
);
648 if (unlikely(xmit_type
& XMIT_ENC
)) {
649 first_bd
->data
.bd_flags
.bitfields
|=
650 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT
;
652 if (xmit_type
& XMIT_ENC_GSO_L4_CSUM
) {
653 u8 tmp
= ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT
;
655 first_bd
->data
.bd_flags
.bitfields
|= 1 << tmp
;
657 hlen
= qede_get_skb_hlen(skb
, true);
659 first_bd
->data
.bd_flags
.bitfields
|=
660 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT
;
661 hlen
= qede_get_skb_hlen(skb
, false);
664 /* @@@TBD - if will not be removed need to check */
665 third_bd
->data
.bitfields
|=
666 cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT
));
668 /* Make life easier for FW guys who can't deal with header and
669 * data on same BD. If we need to split, use the second bd...
671 if (unlikely(skb_headlen(skb
) > hlen
)) {
672 DP_VERBOSE(edev
, NETIF_MSG_TX_QUEUED
,
673 "TSO split header size is %d (%x:%x)\n",
674 first_bd
->nbytes
, first_bd
->addr
.hi
,
677 mapping
= HILO_U64(le32_to_cpu(first_bd
->addr
.hi
),
678 le32_to_cpu(first_bd
->addr
.lo
)) +
681 BD_SET_UNMAP_ADDR_LEN(tx_data_bd
, mapping
,
682 le16_to_cpu(first_bd
->nbytes
) -
685 /* this marks the BD as one that has no
688 txq
->sw_tx_ring
[idx
].flags
|= QEDE_TSO_SPLIT_BD
;
690 first_bd
->nbytes
= cpu_to_le16(hlen
);
692 tx_data_bd
= (struct eth_tx_bd
*)third_bd
;
696 first_bd
->data
.bitfields
|=
697 (skb
->len
& ETH_TX_DATA_1ST_BD_PKT_LEN_MASK
) <<
698 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT
;
701 /* Handle fragmented skb */
702 /* special handle for frags inside 2nd and 3rd bds.. */
703 while (tx_data_bd
&& frag_idx
< skb_shinfo(skb
)->nr_frags
) {
704 rc
= map_frag_to_bd(edev
,
705 &skb_shinfo(skb
)->frags
[frag_idx
],
708 qede_free_failed_tx_pkt(edev
, txq
, first_bd
, nbd
,
710 qede_update_tx_producer(txq
);
714 if (tx_data_bd
== (struct eth_tx_bd
*)second_bd
)
715 tx_data_bd
= (struct eth_tx_bd
*)third_bd
;
722 /* map last frags into 4th, 5th .... */
723 for (; frag_idx
< skb_shinfo(skb
)->nr_frags
; frag_idx
++, nbd
++) {
724 tx_data_bd
= (struct eth_tx_bd
*)
725 qed_chain_produce(&txq
->tx_pbl
);
727 memset(tx_data_bd
, 0, sizeof(*tx_data_bd
));
729 rc
= map_frag_to_bd(edev
,
730 &skb_shinfo(skb
)->frags
[frag_idx
],
733 qede_free_failed_tx_pkt(edev
, txq
, first_bd
, nbd
,
735 qede_update_tx_producer(txq
);
740 /* update the first BD with the actual num BDs */
741 first_bd
->data
.nbds
= nbd
;
743 netdev_tx_sent_queue(netdev_txq
, skb
->len
);
745 skb_tx_timestamp(skb
);
747 /* Advance packet producer only before sending the packet since mapping
752 /* 'next page' entries are counted in the producer value */
753 txq
->tx_db
.data
.bd_prod
=
754 cpu_to_le16(qed_chain_get_prod_idx(&txq
->tx_pbl
));
756 if (!skb
->xmit_more
|| netif_xmit_stopped(netdev_txq
))
757 qede_update_tx_producer(txq
);
759 if (unlikely(qed_chain_get_elem_left(&txq
->tx_pbl
)
760 < (MAX_SKB_FRAGS
+ 1))) {
762 qede_update_tx_producer(txq
);
764 netif_tx_stop_queue(netdev_txq
);
766 DP_VERBOSE(edev
, NETIF_MSG_TX_QUEUED
,
767 "Stop queue was called\n");
768 /* paired memory barrier is in qede_tx_int(), we have to keep
769 * ordering of set_bit() in netif_tx_stop_queue() and read of
774 if (qed_chain_get_elem_left(&txq
->tx_pbl
)
775 >= (MAX_SKB_FRAGS
+ 1) &&
776 (edev
->state
== QEDE_STATE_OPEN
)) {
777 netif_tx_wake_queue(netdev_txq
);
778 DP_VERBOSE(edev
, NETIF_MSG_TX_QUEUED
,
779 "Wake queue was called\n");
786 int qede_txq_has_work(struct qede_tx_queue
*txq
)
790 /* Tell compiler that consumer and producer can change */
792 hw_bd_cons
= le16_to_cpu(*txq
->hw_cons_ptr
);
793 if (qed_chain_get_cons_idx(&txq
->tx_pbl
) == hw_bd_cons
+ 1)
796 return hw_bd_cons
!= qed_chain_get_cons_idx(&txq
->tx_pbl
);
799 static int qede_tx_int(struct qede_dev
*edev
, struct qede_tx_queue
*txq
)
801 struct netdev_queue
*netdev_txq
;
803 unsigned int pkts_compl
= 0, bytes_compl
= 0;
806 netdev_txq
= netdev_get_tx_queue(edev
->ndev
, txq
->index
);
808 hw_bd_cons
= le16_to_cpu(*txq
->hw_cons_ptr
);
811 while (hw_bd_cons
!= qed_chain_get_cons_idx(&txq
->tx_pbl
)) {
814 rc
= qede_free_tx_pkt(edev
, txq
, &len
);
816 DP_NOTICE(edev
, "hw_bd_cons = %d, chain_cons=%d\n",
818 qed_chain_get_cons_idx(&txq
->tx_pbl
));
828 netdev_tx_completed_queue(netdev_txq
, pkts_compl
, bytes_compl
);
830 /* Need to make the tx_bd_cons update visible to start_xmit()
831 * before checking for netif_tx_queue_stopped(). Without the
832 * memory barrier, there is a small possibility that
833 * start_xmit() will miss it and cause the queue to be stopped
835 * On the other hand we need an rmb() here to ensure the proper
836 * ordering of bit testing in the following
837 * netif_tx_queue_stopped(txq) call.
841 if (unlikely(netif_tx_queue_stopped(netdev_txq
))) {
842 /* Taking tx_lock is needed to prevent reenabling the queue
843 * while it's empty. This could have happen if rx_action() gets
844 * suspended in qede_tx_int() after the condition before
845 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
847 * stops the queue->sees fresh tx_bd_cons->releases the queue->
848 * sends some packets consuming the whole queue again->
852 __netif_tx_lock(netdev_txq
, smp_processor_id());
854 if ((netif_tx_queue_stopped(netdev_txq
)) &&
855 (edev
->state
== QEDE_STATE_OPEN
) &&
856 (qed_chain_get_elem_left(&txq
->tx_pbl
)
857 >= (MAX_SKB_FRAGS
+ 1))) {
858 netif_tx_wake_queue(netdev_txq
);
859 DP_VERBOSE(edev
, NETIF_MSG_TX_DONE
,
860 "Wake queue was called\n");
863 __netif_tx_unlock(netdev_txq
);
869 bool qede_has_rx_work(struct qede_rx_queue
*rxq
)
871 u16 hw_comp_cons
, sw_comp_cons
;
873 /* Tell compiler that status block fields can change */
876 hw_comp_cons
= le16_to_cpu(*rxq
->hw_cons_ptr
);
877 sw_comp_cons
= qed_chain_get_cons_idx(&rxq
->rx_comp_ring
);
879 return hw_comp_cons
!= sw_comp_cons
;
882 static bool qede_has_tx_work(struct qede_fastpath
*fp
)
886 for (tc
= 0; tc
< fp
->edev
->num_tc
; tc
++)
887 if (qede_txq_has_work(&fp
->txqs
[tc
]))
892 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue
*rxq
)
894 qed_chain_consume(&rxq
->rx_bd_ring
);
898 /* This function reuses the buffer(from an offset) from
899 * consumer index to producer index in the bd ring
901 static inline void qede_reuse_page(struct qede_dev
*edev
,
902 struct qede_rx_queue
*rxq
,
903 struct sw_rx_data
*curr_cons
)
905 struct eth_rx_bd
*rx_bd_prod
= qed_chain_produce(&rxq
->rx_bd_ring
);
906 struct sw_rx_data
*curr_prod
;
907 dma_addr_t new_mapping
;
909 curr_prod
= &rxq
->sw_rx_ring
[rxq
->sw_rx_prod
& NUM_RX_BDS_MAX
];
910 *curr_prod
= *curr_cons
;
912 new_mapping
= curr_prod
->mapping
+ curr_prod
->page_offset
;
914 rx_bd_prod
->addr
.hi
= cpu_to_le32(upper_32_bits(new_mapping
));
915 rx_bd_prod
->addr
.lo
= cpu_to_le32(lower_32_bits(new_mapping
));
918 curr_cons
->data
= NULL
;
921 /* In case of allocation failures reuse buffers
922 * from consumer index to produce buffers for firmware
924 void qede_recycle_rx_bd_ring(struct qede_rx_queue
*rxq
,
925 struct qede_dev
*edev
, u8 count
)
927 struct sw_rx_data
*curr_cons
;
929 for (; count
> 0; count
--) {
930 curr_cons
= &rxq
->sw_rx_ring
[rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
];
931 qede_reuse_page(edev
, rxq
, curr_cons
);
932 qede_rx_bd_ring_consume(rxq
);
936 static inline int qede_realloc_rx_buffer(struct qede_dev
*edev
,
937 struct qede_rx_queue
*rxq
,
938 struct sw_rx_data
*curr_cons
)
940 /* Move to the next segment in the page */
941 curr_cons
->page_offset
+= rxq
->rx_buf_seg_size
;
943 if (curr_cons
->page_offset
== PAGE_SIZE
) {
944 if (unlikely(qede_alloc_rx_buffer(edev
, rxq
))) {
945 /* Since we failed to allocate new buffer
946 * current buffer can be used again.
948 curr_cons
->page_offset
-= rxq
->rx_buf_seg_size
;
953 dma_unmap_page(&edev
->pdev
->dev
, curr_cons
->mapping
,
954 PAGE_SIZE
, DMA_FROM_DEVICE
);
956 /* Increment refcount of the page as we don't want
957 * network stack to take the ownership of the page
958 * which can be recycled multiple times by the driver.
960 page_ref_inc(curr_cons
->data
);
961 qede_reuse_page(edev
, rxq
, curr_cons
);
967 static inline void qede_update_rx_prod(struct qede_dev
*edev
,
968 struct qede_rx_queue
*rxq
)
970 u16 bd_prod
= qed_chain_get_prod_idx(&rxq
->rx_bd_ring
);
971 u16 cqe_prod
= qed_chain_get_prod_idx(&rxq
->rx_comp_ring
);
972 struct eth_rx_prod_data rx_prods
= {0};
974 /* Update producers */
975 rx_prods
.bd_prod
= cpu_to_le16(bd_prod
);
976 rx_prods
.cqe_prod
= cpu_to_le16(cqe_prod
);
978 /* Make sure that the BD and SGE data is updated before updating the
979 * producers since FW might read the BD/SGE right after the producer
984 internal_ram_wr(rxq
->hw_rxq_prod_addr
, sizeof(rx_prods
),
987 /* mmiowb is needed to synchronize doorbell writes from more than one
988 * processor. It guarantees that the write arrives to the device before
989 * the napi lock is released and another qede_poll is called (possibly
990 * on another CPU). Without this barrier, the next doorbell can bypass
991 * this doorbell. This is applicable to IA64/Altix systems.
996 static u32
qede_get_rxhash(struct qede_dev
*edev
,
998 __le32 rss_hash
, enum pkt_hash_types
*rxhash_type
)
1000 enum rss_hash_type htype
;
1002 htype
= GET_FIELD(bitfields
, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE
);
1004 if ((edev
->ndev
->features
& NETIF_F_RXHASH
) && htype
) {
1005 *rxhash_type
= ((htype
== RSS_HASH_TYPE_IPV4
) ||
1006 (htype
== RSS_HASH_TYPE_IPV6
)) ?
1007 PKT_HASH_TYPE_L3
: PKT_HASH_TYPE_L4
;
1008 return le32_to_cpu(rss_hash
);
1010 *rxhash_type
= PKT_HASH_TYPE_NONE
;
1014 static void qede_set_skb_csum(struct sk_buff
*skb
, u8 csum_flag
)
1016 skb_checksum_none_assert(skb
);
1018 if (csum_flag
& QEDE_CSUM_UNNECESSARY
)
1019 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1021 if (csum_flag
& QEDE_TUNN_CSUM_UNNECESSARY
)
1022 skb
->csum_level
= 1;
1025 static inline void qede_skb_receive(struct qede_dev
*edev
,
1026 struct qede_fastpath
*fp
,
1027 struct sk_buff
*skb
, u16 vlan_tag
)
1030 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
1032 napi_gro_receive(&fp
->napi
, skb
);
1035 static void qede_set_gro_params(struct qede_dev
*edev
,
1036 struct sk_buff
*skb
,
1037 struct eth_fast_path_rx_tpa_start_cqe
*cqe
)
1039 u16 parsing_flags
= le16_to_cpu(cqe
->pars_flags
.flags
);
1041 if (((parsing_flags
>> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT
) &
1042 PARSING_AND_ERR_FLAGS_L3TYPE_MASK
) == 2)
1043 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1045 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1047 skb_shinfo(skb
)->gso_size
= __le16_to_cpu(cqe
->len_on_first_bd
) -
1051 static int qede_fill_frag_skb(struct qede_dev
*edev
,
1052 struct qede_rx_queue
*rxq
,
1053 u8 tpa_agg_index
, u16 len_on_bd
)
1055 struct sw_rx_data
*current_bd
= &rxq
->sw_rx_ring
[rxq
->sw_rx_cons
&
1057 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[tpa_agg_index
];
1058 struct sk_buff
*skb
= tpa_info
->skb
;
1060 if (unlikely(tpa_info
->agg_state
!= QEDE_AGG_STATE_START
))
1063 /* Add one frag and update the appropriate fields in the skb */
1064 skb_fill_page_desc(skb
, tpa_info
->frag_id
++,
1065 current_bd
->data
, current_bd
->page_offset
,
1068 if (unlikely(qede_realloc_rx_buffer(edev
, rxq
, current_bd
))) {
1069 /* Incr page ref count to reuse on allocation failure
1070 * so that it doesn't get freed while freeing SKB.
1072 page_ref_inc(current_bd
->data
);
1076 qed_chain_consume(&rxq
->rx_bd_ring
);
1079 skb
->data_len
+= len_on_bd
;
1080 skb
->truesize
+= rxq
->rx_buf_seg_size
;
1081 skb
->len
+= len_on_bd
;
1086 tpa_info
->agg_state
= QEDE_AGG_STATE_ERROR
;
1087 qede_recycle_rx_bd_ring(rxq
, edev
, 1);
1091 static void qede_tpa_start(struct qede_dev
*edev
,
1092 struct qede_rx_queue
*rxq
,
1093 struct eth_fast_path_rx_tpa_start_cqe
*cqe
)
1095 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[cqe
->tpa_agg_index
];
1096 struct eth_rx_bd
*rx_bd_cons
= qed_chain_consume(&rxq
->rx_bd_ring
);
1097 struct eth_rx_bd
*rx_bd_prod
= qed_chain_produce(&rxq
->rx_bd_ring
);
1098 struct sw_rx_data
*replace_buf
= &tpa_info
->replace_buf
;
1099 dma_addr_t mapping
= tpa_info
->replace_buf_mapping
;
1100 struct sw_rx_data
*sw_rx_data_cons
;
1101 struct sw_rx_data
*sw_rx_data_prod
;
1102 enum pkt_hash_types rxhash_type
;
1105 sw_rx_data_cons
= &rxq
->sw_rx_ring
[rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
];
1106 sw_rx_data_prod
= &rxq
->sw_rx_ring
[rxq
->sw_rx_prod
& NUM_RX_BDS_MAX
];
1108 /* Use pre-allocated replacement buffer - we can't release the agg.
1109 * start until its over and we don't want to risk allocation failing
1110 * here, so re-allocate when aggregation will be over.
1112 sw_rx_data_prod
->mapping
= replace_buf
->mapping
;
1114 sw_rx_data_prod
->data
= replace_buf
->data
;
1115 rx_bd_prod
->addr
.hi
= cpu_to_le32(upper_32_bits(mapping
));
1116 rx_bd_prod
->addr
.lo
= cpu_to_le32(lower_32_bits(mapping
));
1117 sw_rx_data_prod
->page_offset
= replace_buf
->page_offset
;
1121 /* move partial skb from cons to pool (don't unmap yet)
1122 * save mapping, incase we drop the packet later on.
1124 tpa_info
->start_buf
= *sw_rx_data_cons
;
1125 mapping
= HILO_U64(le32_to_cpu(rx_bd_cons
->addr
.hi
),
1126 le32_to_cpu(rx_bd_cons
->addr
.lo
));
1128 tpa_info
->start_buf_mapping
= mapping
;
1131 /* set tpa state to start only if we are able to allocate skb
1132 * for this aggregation, otherwise mark as error and aggregation will
1135 tpa_info
->skb
= netdev_alloc_skb(edev
->ndev
,
1136 le16_to_cpu(cqe
->len_on_first_bd
));
1137 if (unlikely(!tpa_info
->skb
)) {
1138 DP_NOTICE(edev
, "Failed to allocate SKB for gro\n");
1139 tpa_info
->agg_state
= QEDE_AGG_STATE_ERROR
;
1143 skb_put(tpa_info
->skb
, le16_to_cpu(cqe
->len_on_first_bd
));
1144 memcpy(&tpa_info
->start_cqe
, cqe
, sizeof(tpa_info
->start_cqe
));
1146 /* Start filling in the aggregation info */
1147 tpa_info
->frag_id
= 0;
1148 tpa_info
->agg_state
= QEDE_AGG_STATE_START
;
1150 rxhash
= qede_get_rxhash(edev
, cqe
->bitfields
,
1151 cqe
->rss_hash
, &rxhash_type
);
1152 skb_set_hash(tpa_info
->skb
, rxhash
, rxhash_type
);
1153 if ((le16_to_cpu(cqe
->pars_flags
.flags
) >>
1154 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT
) &
1155 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK
)
1156 tpa_info
->vlan_tag
= le16_to_cpu(cqe
->vlan_tag
);
1158 tpa_info
->vlan_tag
= 0;
1160 /* This is needed in order to enable forwarding support */
1161 qede_set_gro_params(edev
, tpa_info
->skb
, cqe
);
1163 cons_buf
: /* We still need to handle bd_len_list to consume buffers */
1164 if (likely(cqe
->ext_bd_len_list
[0]))
1165 qede_fill_frag_skb(edev
, rxq
, cqe
->tpa_agg_index
,
1166 le16_to_cpu(cqe
->ext_bd_len_list
[0]));
1168 if (unlikely(cqe
->ext_bd_len_list
[1])) {
1170 "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
1171 tpa_info
->agg_state
= QEDE_AGG_STATE_ERROR
;
1176 static void qede_gro_ip_csum(struct sk_buff
*skb
)
1178 const struct iphdr
*iph
= ip_hdr(skb
);
1181 skb_set_transport_header(skb
, sizeof(struct iphdr
));
1184 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
1185 iph
->saddr
, iph
->daddr
, 0);
1187 tcp_gro_complete(skb
);
1190 static void qede_gro_ipv6_csum(struct sk_buff
*skb
)
1192 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
1195 skb_set_transport_header(skb
, sizeof(struct ipv6hdr
));
1198 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
1199 &iph
->saddr
, &iph
->daddr
, 0);
1200 tcp_gro_complete(skb
);
1204 static void qede_gro_receive(struct qede_dev
*edev
,
1205 struct qede_fastpath
*fp
,
1206 struct sk_buff
*skb
,
1209 /* FW can send a single MTU sized packet from gro flow
1210 * due to aggregation timeout/last segment etc. which
1211 * is not expected to be a gro packet. If a skb has zero
1212 * frags then simply push it in the stack as non gso skb.
1214 if (unlikely(!skb
->data_len
)) {
1215 skb_shinfo(skb
)->gso_type
= 0;
1216 skb_shinfo(skb
)->gso_size
= 0;
1221 if (skb_shinfo(skb
)->gso_size
) {
1222 skb_set_network_header(skb
, 0);
1224 switch (skb
->protocol
) {
1225 case htons(ETH_P_IP
):
1226 qede_gro_ip_csum(skb
);
1228 case htons(ETH_P_IPV6
):
1229 qede_gro_ipv6_csum(skb
);
1233 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
1234 ntohs(skb
->protocol
));
1240 skb_record_rx_queue(skb
, fp
->rxq
->rxq_id
);
1241 qede_skb_receive(edev
, fp
, skb
, vlan_tag
);
1244 static inline void qede_tpa_cont(struct qede_dev
*edev
,
1245 struct qede_rx_queue
*rxq
,
1246 struct eth_fast_path_rx_tpa_cont_cqe
*cqe
)
1250 for (i
= 0; cqe
->len_list
[i
]; i
++)
1251 qede_fill_frag_skb(edev
, rxq
, cqe
->tpa_agg_index
,
1252 le16_to_cpu(cqe
->len_list
[i
]));
1254 if (unlikely(i
> 1))
1256 "Strange - TPA cont with more than a single len_list entry\n");
1259 static void qede_tpa_end(struct qede_dev
*edev
,
1260 struct qede_fastpath
*fp
,
1261 struct eth_fast_path_rx_tpa_end_cqe
*cqe
)
1263 struct qede_rx_queue
*rxq
= fp
->rxq
;
1264 struct qede_agg_info
*tpa_info
;
1265 struct sk_buff
*skb
;
1268 tpa_info
= &rxq
->tpa_info
[cqe
->tpa_agg_index
];
1269 skb
= tpa_info
->skb
;
1271 for (i
= 0; cqe
->len_list
[i
]; i
++)
1272 qede_fill_frag_skb(edev
, rxq
, cqe
->tpa_agg_index
,
1273 le16_to_cpu(cqe
->len_list
[i
]));
1274 if (unlikely(i
> 1))
1276 "Strange - TPA emd with more than a single len_list entry\n");
1278 if (unlikely(tpa_info
->agg_state
!= QEDE_AGG_STATE_START
))
1282 if (unlikely(cqe
->num_of_bds
!= tpa_info
->frag_id
+ 1))
1284 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
1285 cqe
->num_of_bds
, tpa_info
->frag_id
);
1286 if (unlikely(skb
->len
!= le16_to_cpu(cqe
->total_packet_len
)))
1288 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
1289 le16_to_cpu(cqe
->total_packet_len
), skb
->len
);
1292 page_address(tpa_info
->start_buf
.data
) +
1293 tpa_info
->start_cqe
.placement_offset
+
1294 tpa_info
->start_buf
.page_offset
,
1295 le16_to_cpu(tpa_info
->start_cqe
.len_on_first_bd
));
1297 /* Recycle [mapped] start buffer for the next replacement */
1298 tpa_info
->replace_buf
= tpa_info
->start_buf
;
1299 tpa_info
->replace_buf_mapping
= tpa_info
->start_buf_mapping
;
1301 /* Finalize the SKB */
1302 skb
->protocol
= eth_type_trans(skb
, edev
->ndev
);
1303 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1305 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
1306 * to skb_shinfo(skb)->gso_segs
1308 NAPI_GRO_CB(skb
)->count
= le16_to_cpu(cqe
->num_of_coalesced_segs
);
1310 qede_gro_receive(edev
, fp
, skb
, tpa_info
->vlan_tag
);
1312 tpa_info
->agg_state
= QEDE_AGG_STATE_NONE
;
1316 /* The BD starting the aggregation is still mapped; Re-use it for
1317 * future aggregations [as replacement buffer]
1319 memcpy(&tpa_info
->replace_buf
, &tpa_info
->start_buf
,
1320 sizeof(struct sw_rx_data
));
1321 tpa_info
->replace_buf_mapping
= tpa_info
->start_buf_mapping
;
1322 tpa_info
->start_buf
.data
= NULL
;
1323 tpa_info
->agg_state
= QEDE_AGG_STATE_NONE
;
1324 dev_kfree_skb_any(tpa_info
->skb
);
1325 tpa_info
->skb
= NULL
;
1328 static bool qede_tunn_exist(u16 flag
)
1330 return !!(flag
& (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK
<<
1331 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT
));
1334 static u8
qede_check_tunn_csum(u16 flag
)
1339 if (flag
& (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK
<<
1340 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT
))
1341 csum_flag
|= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK
<<
1342 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT
;
1344 if (flag
& (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK
<<
1345 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT
)) {
1346 csum_flag
|= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK
<<
1347 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT
;
1348 tcsum
= QEDE_TUNN_CSUM_UNNECESSARY
;
1351 csum_flag
|= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK
<<
1352 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT
|
1353 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK
<<
1354 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT
;
1356 if (csum_flag
& flag
)
1357 return QEDE_CSUM_ERROR
;
1359 return QEDE_CSUM_UNNECESSARY
| tcsum
;
1362 static u8
qede_check_notunn_csum(u16 flag
)
1367 if (flag
& (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK
<<
1368 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT
)) {
1369 csum_flag
|= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK
<<
1370 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT
;
1371 csum
= QEDE_CSUM_UNNECESSARY
;
1374 csum_flag
|= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK
<<
1375 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT
;
1377 if (csum_flag
& flag
)
1378 return QEDE_CSUM_ERROR
;
1383 static u8
qede_check_csum(u16 flag
)
1385 if (!qede_tunn_exist(flag
))
1386 return qede_check_notunn_csum(flag
);
1388 return qede_check_tunn_csum(flag
);
1391 static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe
*cqe
,
1394 u8 tun_pars_flg
= cqe
->tunnel_pars_flags
.flags
;
1396 if ((tun_pars_flg
& (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK
<<
1397 ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT
)) ||
1398 (flag
& (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK
<<
1399 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT
)))
1405 static int qede_rx_int(struct qede_fastpath
*fp
, int budget
)
1407 struct qede_dev
*edev
= fp
->edev
;
1408 struct qede_rx_queue
*rxq
= fp
->rxq
;
1410 u16 hw_comp_cons
, sw_comp_cons
, sw_rx_index
, parse_flag
;
1414 hw_comp_cons
= le16_to_cpu(*rxq
->hw_cons_ptr
);
1415 sw_comp_cons
= qed_chain_get_cons_idx(&rxq
->rx_comp_ring
);
1417 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
1418 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1419 * read before it is written by FW, then FW writes CQE and SB, and then
1420 * the CPU reads the hw_comp_cons, it will use an old CQE.
1424 /* Loop to complete all indicated BDs */
1425 while (sw_comp_cons
!= hw_comp_cons
) {
1426 struct eth_fast_path_rx_reg_cqe
*fp_cqe
;
1427 enum pkt_hash_types rxhash_type
;
1428 enum eth_rx_cqe_type cqe_type
;
1429 struct sw_rx_data
*sw_rx_data
;
1430 union eth_rx_cqe
*cqe
;
1431 struct sk_buff
*skb
;
1437 /* Get the CQE from the completion ring */
1438 cqe
= (union eth_rx_cqe
*)
1439 qed_chain_consume(&rxq
->rx_comp_ring
);
1440 cqe_type
= cqe
->fast_path_regular
.type
;
1442 if (unlikely(cqe_type
== ETH_RX_CQE_TYPE_SLOW_PATH
)) {
1443 edev
->ops
->eth_cqe_completion(
1445 (struct eth_slow_path_rx_cqe
*)cqe
);
1449 if (cqe_type
!= ETH_RX_CQE_TYPE_REGULAR
) {
1451 case ETH_RX_CQE_TYPE_TPA_START
:
1452 qede_tpa_start(edev
, rxq
,
1453 &cqe
->fast_path_tpa_start
);
1455 case ETH_RX_CQE_TYPE_TPA_CONT
:
1456 qede_tpa_cont(edev
, rxq
,
1457 &cqe
->fast_path_tpa_cont
);
1459 case ETH_RX_CQE_TYPE_TPA_END
:
1460 qede_tpa_end(edev
, fp
,
1461 &cqe
->fast_path_tpa_end
);
1468 /* Get the data from the SW ring */
1469 sw_rx_index
= rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
;
1470 sw_rx_data
= &rxq
->sw_rx_ring
[sw_rx_index
];
1471 data
= sw_rx_data
->data
;
1473 fp_cqe
= &cqe
->fast_path_regular
;
1474 len
= le16_to_cpu(fp_cqe
->len_on_first_bd
);
1475 pad
= fp_cqe
->placement_offset
;
1476 flags
= cqe
->fast_path_regular
.pars_flags
.flags
;
1478 /* If this is an error packet then drop it */
1479 parse_flag
= le16_to_cpu(flags
);
1481 csum_flag
= qede_check_csum(parse_flag
);
1482 if (unlikely(csum_flag
== QEDE_CSUM_ERROR
)) {
1483 if (qede_pkt_is_ip_fragmented(&cqe
->fast_path_regular
,
1490 "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
1491 sw_comp_cons
, parse_flag
);
1492 rxq
->rx_hw_errors
++;
1493 qede_recycle_rx_bd_ring(rxq
, edev
, fp_cqe
->bd_num
);
1498 skb
= netdev_alloc_skb(edev
->ndev
, QEDE_RX_HDR_SIZE
);
1499 if (unlikely(!skb
)) {
1501 "skb allocation failed, dropping incoming packet\n");
1502 qede_recycle_rx_bd_ring(rxq
, edev
, fp_cqe
->bd_num
);
1503 rxq
->rx_alloc_errors
++;
1507 /* Copy data into SKB */
1508 if (len
+ pad
<= edev
->rx_copybreak
) {
1509 memcpy(skb_put(skb
, len
),
1510 page_address(data
) + pad
+
1511 sw_rx_data
->page_offset
, len
);
1512 qede_reuse_page(edev
, rxq
, sw_rx_data
);
1514 struct skb_frag_struct
*frag
;
1515 unsigned int pull_len
;
1518 frag
= &skb_shinfo(skb
)->frags
[0];
1520 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, data
,
1521 pad
+ sw_rx_data
->page_offset
,
1522 len
, rxq
->rx_buf_seg_size
);
1524 va
= skb_frag_address(frag
);
1525 pull_len
= eth_get_headlen(va
, QEDE_RX_HDR_SIZE
);
1527 /* Align the pull_len to optimize memcpy */
1528 memcpy(skb
->data
, va
, ALIGN(pull_len
, sizeof(long)));
1530 skb_frag_size_sub(frag
, pull_len
);
1531 frag
->page_offset
+= pull_len
;
1532 skb
->data_len
-= pull_len
;
1533 skb
->tail
+= pull_len
;
1535 if (unlikely(qede_realloc_rx_buffer(edev
, rxq
,
1537 DP_ERR(edev
, "Failed to allocate rx buffer\n");
1538 /* Incr page ref count to reuse on allocation
1539 * failure so that it doesn't get freed while
1543 page_ref_inc(sw_rx_data
->data
);
1544 rxq
->rx_alloc_errors
++;
1545 qede_recycle_rx_bd_ring(rxq
, edev
,
1547 dev_kfree_skb_any(skb
);
1552 qede_rx_bd_ring_consume(rxq
);
1554 if (fp_cqe
->bd_num
!= 1) {
1555 u16 pkt_len
= le16_to_cpu(fp_cqe
->pkt_len
);
1560 for (num_frags
= fp_cqe
->bd_num
- 1; num_frags
> 0;
1562 u16 cur_size
= pkt_len
> rxq
->rx_buf_size
?
1563 rxq
->rx_buf_size
: pkt_len
;
1564 if (unlikely(!cur_size
)) {
1566 "Still got %d BDs for mapping jumbo, but length became 0\n",
1568 qede_recycle_rx_bd_ring(rxq
, edev
,
1570 dev_kfree_skb_any(skb
);
1574 if (unlikely(qede_alloc_rx_buffer(edev
, rxq
))) {
1575 qede_recycle_rx_bd_ring(rxq
, edev
,
1577 dev_kfree_skb_any(skb
);
1581 sw_rx_index
= rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
;
1582 sw_rx_data
= &rxq
->sw_rx_ring
[sw_rx_index
];
1583 qede_rx_bd_ring_consume(rxq
);
1585 dma_unmap_page(&edev
->pdev
->dev
,
1586 sw_rx_data
->mapping
,
1587 PAGE_SIZE
, DMA_FROM_DEVICE
);
1589 skb_fill_page_desc(skb
,
1590 skb_shinfo(skb
)->nr_frags
++,
1591 sw_rx_data
->data
, 0,
1594 skb
->truesize
+= PAGE_SIZE
;
1595 skb
->data_len
+= cur_size
;
1596 skb
->len
+= cur_size
;
1597 pkt_len
-= cur_size
;
1600 if (unlikely(pkt_len
))
1602 "Mapped all BDs of jumbo, but still have %d bytes\n",
1606 skb
->protocol
= eth_type_trans(skb
, edev
->ndev
);
1608 rx_hash
= qede_get_rxhash(edev
, fp_cqe
->bitfields
,
1609 fp_cqe
->rss_hash
, &rxhash_type
);
1611 skb_set_hash(skb
, rx_hash
, rxhash_type
);
1613 qede_set_skb_csum(skb
, csum_flag
);
1615 skb_record_rx_queue(skb
, fp
->rxq
->rxq_id
);
1617 qede_skb_receive(edev
, fp
, skb
, le16_to_cpu(fp_cqe
->vlan_tag
));
1621 next_cqe
: /* don't consume bd rx buffer */
1622 qed_chain_recycle_consumed(&rxq
->rx_comp_ring
);
1623 sw_comp_cons
= qed_chain_get_cons_idx(&rxq
->rx_comp_ring
);
1624 /* CR TPA - revisit how to handle budget in TPA perhaps
1627 if (rx_pkt
== budget
)
1629 } /* repeat while sw_comp_cons != hw_comp_cons... */
1631 /* Update producers */
1632 qede_update_rx_prod(edev
, rxq
);
1634 rxq
->rcv_pkts
+= rx_pkt
;
1639 static int qede_poll(struct napi_struct
*napi
, int budget
)
1641 struct qede_fastpath
*fp
= container_of(napi
, struct qede_fastpath
,
1643 struct qede_dev
*edev
= fp
->edev
;
1644 int rx_work_done
= 0;
1647 for (tc
= 0; tc
< edev
->num_tc
; tc
++)
1648 if (likely(fp
->type
& QEDE_FASTPATH_TX
) &&
1649 qede_txq_has_work(&fp
->txqs
[tc
]))
1650 qede_tx_int(edev
, &fp
->txqs
[tc
]);
1652 rx_work_done
= (likely(fp
->type
& QEDE_FASTPATH_RX
) &&
1653 qede_has_rx_work(fp
->rxq
)) ?
1654 qede_rx_int(fp
, budget
) : 0;
1655 if (rx_work_done
< budget
) {
1656 qed_sb_update_sb_idx(fp
->sb_info
);
1657 /* *_has_*_work() reads the status block,
1658 * thus we need to ensure that status block indices
1659 * have been actually read (qed_sb_update_sb_idx)
1660 * prior to this check (*_has_*_work) so that
1661 * we won't write the "newer" value of the status block
1662 * to HW (if there was a DMA right after
1663 * qede_has_rx_work and if there is no rmb, the memory
1664 * reading (qed_sb_update_sb_idx) may be postponed
1665 * to right before *_ack_sb). In this case there
1666 * will never be another interrupt until there is
1667 * another update of the status block, while there
1668 * is still unhandled work.
1672 /* Fall out from the NAPI loop if needed */
1673 if (!((likely(fp
->type
& QEDE_FASTPATH_RX
) &&
1674 qede_has_rx_work(fp
->rxq
)) ||
1675 (likely(fp
->type
& QEDE_FASTPATH_TX
) &&
1676 qede_has_tx_work(fp
)))) {
1677 napi_complete(napi
);
1679 /* Update and reenable interrupts */
1680 qed_sb_ack(fp
->sb_info
, IGU_INT_ENABLE
,
1683 rx_work_done
= budget
;
1687 return rx_work_done
;
1690 static irqreturn_t
qede_msix_fp_int(int irq
, void *fp_cookie
)
1692 struct qede_fastpath
*fp
= fp_cookie
;
1694 qed_sb_ack(fp
->sb_info
, IGU_INT_DISABLE
, 0 /*do not update*/);
1696 napi_schedule_irqoff(&fp
->napi
);
1700 /* -------------------------------------------------------------------------
1702 * -------------------------------------------------------------------------
1705 static int qede_open(struct net_device
*ndev
);
1706 static int qede_close(struct net_device
*ndev
);
1707 static int qede_set_mac_addr(struct net_device
*ndev
, void *p
);
1708 static void qede_set_rx_mode(struct net_device
*ndev
);
1709 static void qede_config_rx_mode(struct net_device
*ndev
);
1711 static int qede_set_ucast_rx_mac(struct qede_dev
*edev
,
1712 enum qed_filter_xcast_params_type opcode
,
1713 unsigned char mac
[ETH_ALEN
])
1715 struct qed_filter_params filter_cmd
;
1717 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
1718 filter_cmd
.type
= QED_FILTER_TYPE_UCAST
;
1719 filter_cmd
.filter
.ucast
.type
= opcode
;
1720 filter_cmd
.filter
.ucast
.mac_valid
= 1;
1721 ether_addr_copy(filter_cmd
.filter
.ucast
.mac
, mac
);
1723 return edev
->ops
->filter_config(edev
->cdev
, &filter_cmd
);
1726 static int qede_set_ucast_rx_vlan(struct qede_dev
*edev
,
1727 enum qed_filter_xcast_params_type opcode
,
1730 struct qed_filter_params filter_cmd
;
1732 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
1733 filter_cmd
.type
= QED_FILTER_TYPE_UCAST
;
1734 filter_cmd
.filter
.ucast
.type
= opcode
;
1735 filter_cmd
.filter
.ucast
.vlan_valid
= 1;
1736 filter_cmd
.filter
.ucast
.vlan
= vid
;
1738 return edev
->ops
->filter_config(edev
->cdev
, &filter_cmd
);
1741 void qede_fill_by_demand_stats(struct qede_dev
*edev
)
1743 struct qed_eth_stats stats
;
1745 edev
->ops
->get_vport_stats(edev
->cdev
, &stats
);
1746 edev
->stats
.no_buff_discards
= stats
.no_buff_discards
;
1747 edev
->stats
.packet_too_big_discard
= stats
.packet_too_big_discard
;
1748 edev
->stats
.ttl0_discard
= stats
.ttl0_discard
;
1749 edev
->stats
.rx_ucast_bytes
= stats
.rx_ucast_bytes
;
1750 edev
->stats
.rx_mcast_bytes
= stats
.rx_mcast_bytes
;
1751 edev
->stats
.rx_bcast_bytes
= stats
.rx_bcast_bytes
;
1752 edev
->stats
.rx_ucast_pkts
= stats
.rx_ucast_pkts
;
1753 edev
->stats
.rx_mcast_pkts
= stats
.rx_mcast_pkts
;
1754 edev
->stats
.rx_bcast_pkts
= stats
.rx_bcast_pkts
;
1755 edev
->stats
.mftag_filter_discards
= stats
.mftag_filter_discards
;
1756 edev
->stats
.mac_filter_discards
= stats
.mac_filter_discards
;
1758 edev
->stats
.tx_ucast_bytes
= stats
.tx_ucast_bytes
;
1759 edev
->stats
.tx_mcast_bytes
= stats
.tx_mcast_bytes
;
1760 edev
->stats
.tx_bcast_bytes
= stats
.tx_bcast_bytes
;
1761 edev
->stats
.tx_ucast_pkts
= stats
.tx_ucast_pkts
;
1762 edev
->stats
.tx_mcast_pkts
= stats
.tx_mcast_pkts
;
1763 edev
->stats
.tx_bcast_pkts
= stats
.tx_bcast_pkts
;
1764 edev
->stats
.tx_err_drop_pkts
= stats
.tx_err_drop_pkts
;
1765 edev
->stats
.coalesced_pkts
= stats
.tpa_coalesced_pkts
;
1766 edev
->stats
.coalesced_events
= stats
.tpa_coalesced_events
;
1767 edev
->stats
.coalesced_aborts_num
= stats
.tpa_aborts_num
;
1768 edev
->stats
.non_coalesced_pkts
= stats
.tpa_not_coalesced_pkts
;
1769 edev
->stats
.coalesced_bytes
= stats
.tpa_coalesced_bytes
;
1771 edev
->stats
.rx_64_byte_packets
= stats
.rx_64_byte_packets
;
1772 edev
->stats
.rx_65_to_127_byte_packets
= stats
.rx_65_to_127_byte_packets
;
1773 edev
->stats
.rx_128_to_255_byte_packets
=
1774 stats
.rx_128_to_255_byte_packets
;
1775 edev
->stats
.rx_256_to_511_byte_packets
=
1776 stats
.rx_256_to_511_byte_packets
;
1777 edev
->stats
.rx_512_to_1023_byte_packets
=
1778 stats
.rx_512_to_1023_byte_packets
;
1779 edev
->stats
.rx_1024_to_1518_byte_packets
=
1780 stats
.rx_1024_to_1518_byte_packets
;
1781 edev
->stats
.rx_1519_to_1522_byte_packets
=
1782 stats
.rx_1519_to_1522_byte_packets
;
1783 edev
->stats
.rx_1519_to_2047_byte_packets
=
1784 stats
.rx_1519_to_2047_byte_packets
;
1785 edev
->stats
.rx_2048_to_4095_byte_packets
=
1786 stats
.rx_2048_to_4095_byte_packets
;
1787 edev
->stats
.rx_4096_to_9216_byte_packets
=
1788 stats
.rx_4096_to_9216_byte_packets
;
1789 edev
->stats
.rx_9217_to_16383_byte_packets
=
1790 stats
.rx_9217_to_16383_byte_packets
;
1791 edev
->stats
.rx_crc_errors
= stats
.rx_crc_errors
;
1792 edev
->stats
.rx_mac_crtl_frames
= stats
.rx_mac_crtl_frames
;
1793 edev
->stats
.rx_pause_frames
= stats
.rx_pause_frames
;
1794 edev
->stats
.rx_pfc_frames
= stats
.rx_pfc_frames
;
1795 edev
->stats
.rx_align_errors
= stats
.rx_align_errors
;
1796 edev
->stats
.rx_carrier_errors
= stats
.rx_carrier_errors
;
1797 edev
->stats
.rx_oversize_packets
= stats
.rx_oversize_packets
;
1798 edev
->stats
.rx_jabbers
= stats
.rx_jabbers
;
1799 edev
->stats
.rx_undersize_packets
= stats
.rx_undersize_packets
;
1800 edev
->stats
.rx_fragments
= stats
.rx_fragments
;
1801 edev
->stats
.tx_64_byte_packets
= stats
.tx_64_byte_packets
;
1802 edev
->stats
.tx_65_to_127_byte_packets
= stats
.tx_65_to_127_byte_packets
;
1803 edev
->stats
.tx_128_to_255_byte_packets
=
1804 stats
.tx_128_to_255_byte_packets
;
1805 edev
->stats
.tx_256_to_511_byte_packets
=
1806 stats
.tx_256_to_511_byte_packets
;
1807 edev
->stats
.tx_512_to_1023_byte_packets
=
1808 stats
.tx_512_to_1023_byte_packets
;
1809 edev
->stats
.tx_1024_to_1518_byte_packets
=
1810 stats
.tx_1024_to_1518_byte_packets
;
1811 edev
->stats
.tx_1519_to_2047_byte_packets
=
1812 stats
.tx_1519_to_2047_byte_packets
;
1813 edev
->stats
.tx_2048_to_4095_byte_packets
=
1814 stats
.tx_2048_to_4095_byte_packets
;
1815 edev
->stats
.tx_4096_to_9216_byte_packets
=
1816 stats
.tx_4096_to_9216_byte_packets
;
1817 edev
->stats
.tx_9217_to_16383_byte_packets
=
1818 stats
.tx_9217_to_16383_byte_packets
;
1819 edev
->stats
.tx_pause_frames
= stats
.tx_pause_frames
;
1820 edev
->stats
.tx_pfc_frames
= stats
.tx_pfc_frames
;
1821 edev
->stats
.tx_lpi_entry_count
= stats
.tx_lpi_entry_count
;
1822 edev
->stats
.tx_total_collisions
= stats
.tx_total_collisions
;
1823 edev
->stats
.brb_truncates
= stats
.brb_truncates
;
1824 edev
->stats
.brb_discards
= stats
.brb_discards
;
1825 edev
->stats
.tx_mac_ctrl_frames
= stats
.tx_mac_ctrl_frames
;
1829 struct rtnl_link_stats64
*qede_get_stats64(struct net_device
*dev
,
1830 struct rtnl_link_stats64
*stats
)
1832 struct qede_dev
*edev
= netdev_priv(dev
);
1834 qede_fill_by_demand_stats(edev
);
1836 stats
->rx_packets
= edev
->stats
.rx_ucast_pkts
+
1837 edev
->stats
.rx_mcast_pkts
+
1838 edev
->stats
.rx_bcast_pkts
;
1839 stats
->tx_packets
= edev
->stats
.tx_ucast_pkts
+
1840 edev
->stats
.tx_mcast_pkts
+
1841 edev
->stats
.tx_bcast_pkts
;
1843 stats
->rx_bytes
= edev
->stats
.rx_ucast_bytes
+
1844 edev
->stats
.rx_mcast_bytes
+
1845 edev
->stats
.rx_bcast_bytes
;
1847 stats
->tx_bytes
= edev
->stats
.tx_ucast_bytes
+
1848 edev
->stats
.tx_mcast_bytes
+
1849 edev
->stats
.tx_bcast_bytes
;
1851 stats
->tx_errors
= edev
->stats
.tx_err_drop_pkts
;
1852 stats
->multicast
= edev
->stats
.rx_mcast_pkts
+
1853 edev
->stats
.rx_bcast_pkts
;
1855 stats
->rx_fifo_errors
= edev
->stats
.no_buff_discards
;
1857 stats
->collisions
= edev
->stats
.tx_total_collisions
;
1858 stats
->rx_crc_errors
= edev
->stats
.rx_crc_errors
;
1859 stats
->rx_frame_errors
= edev
->stats
.rx_align_errors
;
1864 #ifdef CONFIG_QED_SRIOV
1865 static int qede_get_vf_config(struct net_device
*dev
, int vfidx
,
1866 struct ifla_vf_info
*ivi
)
1868 struct qede_dev
*edev
= netdev_priv(dev
);
1873 return edev
->ops
->iov
->get_config(edev
->cdev
, vfidx
, ivi
);
1876 static int qede_set_vf_rate(struct net_device
*dev
, int vfidx
,
1877 int min_tx_rate
, int max_tx_rate
)
1879 struct qede_dev
*edev
= netdev_priv(dev
);
1881 return edev
->ops
->iov
->set_rate(edev
->cdev
, vfidx
, min_tx_rate
,
1885 static int qede_set_vf_spoofchk(struct net_device
*dev
, int vfidx
, bool val
)
1887 struct qede_dev
*edev
= netdev_priv(dev
);
1892 return edev
->ops
->iov
->set_spoof(edev
->cdev
, vfidx
, val
);
1895 static int qede_set_vf_link_state(struct net_device
*dev
, int vfidx
,
1898 struct qede_dev
*edev
= netdev_priv(dev
);
1903 return edev
->ops
->iov
->set_link_state(edev
->cdev
, vfidx
, link_state
);
1907 static void qede_config_accept_any_vlan(struct qede_dev
*edev
, bool action
)
1909 struct qed_update_vport_params params
;
1912 /* Proceed only if action actually needs to be performed */
1913 if (edev
->accept_any_vlan
== action
)
1916 memset(¶ms
, 0, sizeof(params
));
1918 params
.vport_id
= 0;
1919 params
.accept_any_vlan
= action
;
1920 params
.update_accept_any_vlan_flg
= 1;
1922 rc
= edev
->ops
->vport_update(edev
->cdev
, ¶ms
);
1924 DP_ERR(edev
, "Failed to %s accept-any-vlan\n",
1925 action
? "enable" : "disable");
1927 DP_INFO(edev
, "%s accept-any-vlan\n",
1928 action
? "enabled" : "disabled");
1929 edev
->accept_any_vlan
= action
;
1933 static int qede_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1935 struct qede_dev
*edev
= netdev_priv(dev
);
1936 struct qede_vlan
*vlan
, *tmp
;
1939 DP_VERBOSE(edev
, NETIF_MSG_IFUP
, "Adding vlan 0x%04x\n", vid
);
1941 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
1943 DP_INFO(edev
, "Failed to allocate struct for vlan\n");
1946 INIT_LIST_HEAD(&vlan
->list
);
1948 vlan
->configured
= false;
1950 /* Verify vlan isn't already configured */
1951 list_for_each_entry(tmp
, &edev
->vlan_list
, list
) {
1952 if (tmp
->vid
== vlan
->vid
) {
1953 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
1954 "vlan already configured\n");
1960 /* If interface is down, cache this VLAN ID and return */
1961 if (edev
->state
!= QEDE_STATE_OPEN
) {
1962 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
,
1963 "Interface is down, VLAN %d will be configured when interface is up\n",
1966 edev
->non_configured_vlans
++;
1967 list_add(&vlan
->list
, &edev
->vlan_list
);
1972 /* Check for the filter limit.
1973 * Note - vlan0 has a reserved filter and can be added without
1974 * worrying about quota
1976 if ((edev
->configured_vlans
< edev
->dev_info
.num_vlan_filters
) ||
1978 rc
= qede_set_ucast_rx_vlan(edev
,
1979 QED_FILTER_XCAST_TYPE_ADD
,
1982 DP_ERR(edev
, "Failed to configure VLAN %d\n",
1987 vlan
->configured
= true;
1989 /* vlan0 filter isn't consuming out of our quota */
1991 edev
->configured_vlans
++;
1993 /* Out of quota; Activate accept-any-VLAN mode */
1994 if (!edev
->non_configured_vlans
)
1995 qede_config_accept_any_vlan(edev
, true);
1997 edev
->non_configured_vlans
++;
2000 list_add(&vlan
->list
, &edev
->vlan_list
);
2005 static void qede_del_vlan_from_list(struct qede_dev
*edev
,
2006 struct qede_vlan
*vlan
)
2008 /* vlan0 filter isn't consuming out of our quota */
2009 if (vlan
->vid
!= 0) {
2010 if (vlan
->configured
)
2011 edev
->configured_vlans
--;
2013 edev
->non_configured_vlans
--;
2016 list_del(&vlan
->list
);
2020 static int qede_configure_vlan_filters(struct qede_dev
*edev
)
2022 int rc
= 0, real_rc
= 0, accept_any_vlan
= 0;
2023 struct qed_dev_eth_info
*dev_info
;
2024 struct qede_vlan
*vlan
= NULL
;
2026 if (list_empty(&edev
->vlan_list
))
2029 dev_info
= &edev
->dev_info
;
2031 /* Configure non-configured vlans */
2032 list_for_each_entry(vlan
, &edev
->vlan_list
, list
) {
2033 if (vlan
->configured
)
2036 /* We have used all our credits, now enable accept_any_vlan */
2037 if ((vlan
->vid
!= 0) &&
2038 (edev
->configured_vlans
== dev_info
->num_vlan_filters
)) {
2039 accept_any_vlan
= 1;
2043 DP_VERBOSE(edev
, NETIF_MSG_IFUP
, "Adding vlan %d\n", vlan
->vid
);
2045 rc
= qede_set_ucast_rx_vlan(edev
, QED_FILTER_XCAST_TYPE_ADD
,
2048 DP_ERR(edev
, "Failed to configure VLAN %u\n",
2054 vlan
->configured
= true;
2055 /* vlan0 filter doesn't consume our VLAN filter's quota */
2056 if (vlan
->vid
!= 0) {
2057 edev
->non_configured_vlans
--;
2058 edev
->configured_vlans
++;
2062 /* enable accept_any_vlan mode if we have more VLANs than credits,
2063 * or remove accept_any_vlan mode if we've actually removed
2064 * a non-configured vlan, and all remaining vlans are truly configured.
2067 if (accept_any_vlan
)
2068 qede_config_accept_any_vlan(edev
, true);
2069 else if (!edev
->non_configured_vlans
)
2070 qede_config_accept_any_vlan(edev
, false);
2075 static int qede_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
2077 struct qede_dev
*edev
= netdev_priv(dev
);
2078 struct qede_vlan
*vlan
= NULL
;
2081 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
, "Removing vlan 0x%04x\n", vid
);
2083 /* Find whether entry exists */
2084 list_for_each_entry(vlan
, &edev
->vlan_list
, list
)
2085 if (vlan
->vid
== vid
)
2088 if (!vlan
|| (vlan
->vid
!= vid
)) {
2089 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
2090 "Vlan isn't configured\n");
2094 if (edev
->state
!= QEDE_STATE_OPEN
) {
2095 /* As interface is already down, we don't have a VPORT
2096 * instance to remove vlan filter. So just update vlan list
2098 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
,
2099 "Interface is down, removing VLAN from list only\n");
2100 qede_del_vlan_from_list(edev
, vlan
);
2105 if (vlan
->configured
) {
2106 rc
= qede_set_ucast_rx_vlan(edev
, QED_FILTER_XCAST_TYPE_DEL
,
2109 DP_ERR(edev
, "Failed to remove VLAN %d\n", vid
);
2114 qede_del_vlan_from_list(edev
, vlan
);
2116 /* We have removed a VLAN - try to see if we can
2117 * configure non-configured VLAN from the list.
2119 rc
= qede_configure_vlan_filters(edev
);
2124 static void qede_vlan_mark_nonconfigured(struct qede_dev
*edev
)
2126 struct qede_vlan
*vlan
= NULL
;
2128 if (list_empty(&edev
->vlan_list
))
2131 list_for_each_entry(vlan
, &edev
->vlan_list
, list
) {
2132 if (!vlan
->configured
)
2135 vlan
->configured
= false;
2137 /* vlan0 filter isn't consuming out of our quota */
2138 if (vlan
->vid
!= 0) {
2139 edev
->non_configured_vlans
++;
2140 edev
->configured_vlans
--;
2143 DP_VERBOSE(edev
, NETIF_MSG_IFDOWN
,
2144 "marked vlan %d as non-configured\n", vlan
->vid
);
2147 edev
->accept_any_vlan
= false;
2150 static int qede_set_features(struct net_device
*dev
, netdev_features_t features
)
2152 struct qede_dev
*edev
= netdev_priv(dev
);
2153 netdev_features_t changes
= features
^ dev
->features
;
2154 bool need_reload
= false;
2156 /* No action needed if hardware GRO is disabled during driver load */
2157 if (changes
& NETIF_F_GRO
) {
2158 if (dev
->features
& NETIF_F_GRO
)
2159 need_reload
= !edev
->gro_disable
;
2161 need_reload
= edev
->gro_disable
;
2164 if (need_reload
&& netif_running(edev
->ndev
)) {
2165 dev
->features
= features
;
2166 qede_reload(edev
, NULL
, NULL
);
2173 static void qede_udp_tunnel_add(struct net_device
*dev
,
2174 struct udp_tunnel_info
*ti
)
2176 struct qede_dev
*edev
= netdev_priv(dev
);
2177 u16 t_port
= ntohs(ti
->port
);
2180 case UDP_TUNNEL_TYPE_VXLAN
:
2181 if (edev
->vxlan_dst_port
)
2184 edev
->vxlan_dst_port
= t_port
;
2186 DP_VERBOSE(edev
, QED_MSG_DEBUG
, "Added vxlan port=%d\n",
2189 set_bit(QEDE_SP_VXLAN_PORT_CONFIG
, &edev
->sp_flags
);
2191 case UDP_TUNNEL_TYPE_GENEVE
:
2192 if (edev
->geneve_dst_port
)
2195 edev
->geneve_dst_port
= t_port
;
2197 DP_VERBOSE(edev
, QED_MSG_DEBUG
, "Added geneve port=%d\n",
2199 set_bit(QEDE_SP_GENEVE_PORT_CONFIG
, &edev
->sp_flags
);
2205 schedule_delayed_work(&edev
->sp_task
, 0);
2208 static void qede_udp_tunnel_del(struct net_device
*dev
,
2209 struct udp_tunnel_info
*ti
)
2211 struct qede_dev
*edev
= netdev_priv(dev
);
2212 u16 t_port
= ntohs(ti
->port
);
2215 case UDP_TUNNEL_TYPE_VXLAN
:
2216 if (t_port
!= edev
->vxlan_dst_port
)
2219 edev
->vxlan_dst_port
= 0;
2221 DP_VERBOSE(edev
, QED_MSG_DEBUG
, "Deleted vxlan port=%d\n",
2224 set_bit(QEDE_SP_VXLAN_PORT_CONFIG
, &edev
->sp_flags
);
2226 case UDP_TUNNEL_TYPE_GENEVE
:
2227 if (t_port
!= edev
->geneve_dst_port
)
2230 edev
->geneve_dst_port
= 0;
2232 DP_VERBOSE(edev
, QED_MSG_DEBUG
, "Deleted geneve port=%d\n",
2234 set_bit(QEDE_SP_GENEVE_PORT_CONFIG
, &edev
->sp_flags
);
2240 schedule_delayed_work(&edev
->sp_task
, 0);
2243 /* 8B udp header + 8B base tunnel header + 32B option length */
2244 #define QEDE_MAX_TUN_HDR_LEN 48
2246 static netdev_features_t
qede_features_check(struct sk_buff
*skb
,
2247 struct net_device
*dev
,
2248 netdev_features_t features
)
2250 if (skb
->encapsulation
) {
2253 switch (vlan_get_protocol(skb
)) {
2254 case htons(ETH_P_IP
):
2255 l4_proto
= ip_hdr(skb
)->protocol
;
2257 case htons(ETH_P_IPV6
):
2258 l4_proto
= ipv6_hdr(skb
)->nexthdr
;
2264 /* Disable offloads for geneve tunnels, as HW can't parse
2265 * the geneve header which has option length greater than 32B.
2267 if ((l4_proto
== IPPROTO_UDP
) &&
2268 ((skb_inner_mac_header(skb
) -
2269 skb_transport_header(skb
)) > QEDE_MAX_TUN_HDR_LEN
))
2270 return features
& ~(NETIF_F_CSUM_MASK
|
2277 static const struct net_device_ops qede_netdev_ops
= {
2278 .ndo_open
= qede_open
,
2279 .ndo_stop
= qede_close
,
2280 .ndo_start_xmit
= qede_start_xmit
,
2281 .ndo_set_rx_mode
= qede_set_rx_mode
,
2282 .ndo_set_mac_address
= qede_set_mac_addr
,
2283 .ndo_validate_addr
= eth_validate_addr
,
2284 .ndo_change_mtu
= qede_change_mtu
,
2285 #ifdef CONFIG_QED_SRIOV
2286 .ndo_set_vf_mac
= qede_set_vf_mac
,
2287 .ndo_set_vf_vlan
= qede_set_vf_vlan
,
2289 .ndo_vlan_rx_add_vid
= qede_vlan_rx_add_vid
,
2290 .ndo_vlan_rx_kill_vid
= qede_vlan_rx_kill_vid
,
2291 .ndo_set_features
= qede_set_features
,
2292 .ndo_get_stats64
= qede_get_stats64
,
2293 #ifdef CONFIG_QED_SRIOV
2294 .ndo_set_vf_link_state
= qede_set_vf_link_state
,
2295 .ndo_set_vf_spoofchk
= qede_set_vf_spoofchk
,
2296 .ndo_get_vf_config
= qede_get_vf_config
,
2297 .ndo_set_vf_rate
= qede_set_vf_rate
,
2299 .ndo_udp_tunnel_add
= qede_udp_tunnel_add
,
2300 .ndo_udp_tunnel_del
= qede_udp_tunnel_del
,
2301 .ndo_features_check
= qede_features_check
,
2304 /* -------------------------------------------------------------------------
2305 * START OF PROBE / REMOVE
2306 * -------------------------------------------------------------------------
2309 static struct qede_dev
*qede_alloc_etherdev(struct qed_dev
*cdev
,
2310 struct pci_dev
*pdev
,
2311 struct qed_dev_eth_info
*info
,
2312 u32 dp_module
, u8 dp_level
)
2314 struct net_device
*ndev
;
2315 struct qede_dev
*edev
;
2317 ndev
= alloc_etherdev_mqs(sizeof(*edev
),
2318 info
->num_queues
, info
->num_queues
);
2320 pr_err("etherdev allocation failed\n");
2324 edev
= netdev_priv(ndev
);
2328 edev
->dp_module
= dp_module
;
2329 edev
->dp_level
= dp_level
;
2330 edev
->ops
= qed_ops
;
2331 edev
->q_num_rx_buffers
= NUM_RX_BDS_DEF
;
2332 edev
->q_num_tx_buffers
= NUM_TX_BDS_DEF
;
2334 DP_INFO(edev
, "Allocated netdev with %d tx queues and %d rx queues\n",
2335 info
->num_queues
, info
->num_queues
);
2337 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
2339 memset(&edev
->stats
, 0, sizeof(edev
->stats
));
2340 memcpy(&edev
->dev_info
, info
, sizeof(*info
));
2342 edev
->num_tc
= edev
->dev_info
.num_tc
;
2344 INIT_LIST_HEAD(&edev
->vlan_list
);
2349 static void qede_init_ndev(struct qede_dev
*edev
)
2351 struct net_device
*ndev
= edev
->ndev
;
2352 struct pci_dev
*pdev
= edev
->pdev
;
2355 pci_set_drvdata(pdev
, ndev
);
2357 ndev
->mem_start
= edev
->dev_info
.common
.pci_mem_start
;
2358 ndev
->base_addr
= ndev
->mem_start
;
2359 ndev
->mem_end
= edev
->dev_info
.common
.pci_mem_end
;
2360 ndev
->irq
= edev
->dev_info
.common
.pci_irq
;
2362 ndev
->watchdog_timeo
= TX_TIMEOUT
;
2364 ndev
->netdev_ops
= &qede_netdev_ops
;
2366 qede_set_ethtool_ops(ndev
);
2368 ndev
->priv_flags
= IFF_UNICAST_FLT
;
2370 /* user-changeble features */
2371 hw_features
= NETIF_F_GRO
| NETIF_F_SG
|
2372 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2373 NETIF_F_TSO
| NETIF_F_TSO6
;
2376 hw_features
|= NETIF_F_GSO_GRE
| NETIF_F_GSO_UDP_TUNNEL
|
2377 NETIF_F_TSO_ECN
| NETIF_F_GSO_UDP_TUNNEL_CSUM
|
2378 NETIF_F_GSO_GRE_CSUM
;
2379 ndev
->hw_enc_features
= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2380 NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO_ECN
|
2381 NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
2382 NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_RXCSUM
|
2383 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
2384 NETIF_F_GSO_GRE_CSUM
;
2386 ndev
->vlan_features
= hw_features
| NETIF_F_RXHASH
| NETIF_F_RXCSUM
|
2388 ndev
->features
= hw_features
| NETIF_F_RXHASH
| NETIF_F_RXCSUM
|
2389 NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HIGHDMA
|
2390 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_VLAN_CTAG_TX
;
2392 ndev
->hw_features
= hw_features
;
2394 /* MTU range: 46 - 9600 */
2395 ndev
->min_mtu
= ETH_ZLEN
- ETH_HLEN
;
2396 ndev
->max_mtu
= QEDE_MAX_JUMBO_PACKET_SIZE
;
2398 /* Set network device HW mac */
2399 ether_addr_copy(edev
->ndev
->dev_addr
, edev
->dev_info
.common
.hw_mac
);
2402 /* This function converts from 32b param to two params of level and module
2403 * Input 32b decoding:
2404 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
2405 * 'happy' flow, e.g. memory allocation failed.
2406 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
2407 * and provide important parameters.
2408 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
2409 * module. VERBOSE prints are for tracking the specific flow in low level.
2411 * Notice that the level should be that of the lowest required logs.
2413 void qede_config_debug(uint debug
, u32
*p_dp_module
, u8
*p_dp_level
)
2415 *p_dp_level
= QED_LEVEL_NOTICE
;
2418 if (debug
& QED_LOG_VERBOSE_MASK
) {
2419 *p_dp_level
= QED_LEVEL_VERBOSE
;
2420 *p_dp_module
= (debug
& 0x3FFFFFFF);
2421 } else if (debug
& QED_LOG_INFO_MASK
) {
2422 *p_dp_level
= QED_LEVEL_INFO
;
2423 } else if (debug
& QED_LOG_NOTICE_MASK
) {
2424 *p_dp_level
= QED_LEVEL_NOTICE
;
2428 static void qede_free_fp_array(struct qede_dev
*edev
)
2430 if (edev
->fp_array
) {
2431 struct qede_fastpath
*fp
;
2435 fp
= &edev
->fp_array
[i
];
2441 kfree(edev
->fp_array
);
2444 edev
->num_queues
= 0;
2445 edev
->fp_num_tx
= 0;
2446 edev
->fp_num_rx
= 0;
2449 static int qede_alloc_fp_array(struct qede_dev
*edev
)
2451 u8 fp_combined
, fp_rx
= edev
->fp_num_rx
;
2452 struct qede_fastpath
*fp
;
2455 edev
->fp_array
= kcalloc(QEDE_QUEUE_CNT(edev
),
2456 sizeof(*edev
->fp_array
), GFP_KERNEL
);
2457 if (!edev
->fp_array
) {
2458 DP_NOTICE(edev
, "fp array allocation failed\n");
2462 fp_combined
= QEDE_QUEUE_CNT(edev
) - fp_rx
- edev
->fp_num_tx
;
2464 /* Allocate the FP elements for Rx queues followed by combined and then
2465 * the Tx. This ordering should be maintained so that the respective
2466 * queues (Rx or Tx) will be together in the fastpath array and the
2467 * associated ids will be sequential.
2470 fp
= &edev
->fp_array
[i
];
2472 fp
->sb_info
= kcalloc(1, sizeof(*fp
->sb_info
), GFP_KERNEL
);
2474 DP_NOTICE(edev
, "sb info struct allocation failed\n");
2479 fp
->type
= QEDE_FASTPATH_RX
;
2481 } else if (fp_combined
) {
2482 fp
->type
= QEDE_FASTPATH_COMBINED
;
2485 fp
->type
= QEDE_FASTPATH_TX
;
2488 if (fp
->type
& QEDE_FASTPATH_TX
) {
2489 fp
->txqs
= kcalloc(edev
->num_tc
, sizeof(*fp
->txqs
),
2493 "TXQ array allocation failed\n");
2498 if (fp
->type
& QEDE_FASTPATH_RX
) {
2499 fp
->rxq
= kcalloc(1, sizeof(*fp
->rxq
), GFP_KERNEL
);
2502 "RXQ struct allocation failed\n");
2510 qede_free_fp_array(edev
);
2514 static void qede_sp_task(struct work_struct
*work
)
2516 struct qede_dev
*edev
= container_of(work
, struct qede_dev
,
2518 struct qed_dev
*cdev
= edev
->cdev
;
2520 mutex_lock(&edev
->qede_lock
);
2522 if (edev
->state
== QEDE_STATE_OPEN
) {
2523 if (test_and_clear_bit(QEDE_SP_RX_MODE
, &edev
->sp_flags
))
2524 qede_config_rx_mode(edev
->ndev
);
2527 if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG
, &edev
->sp_flags
)) {
2528 struct qed_tunn_params tunn_params
;
2530 memset(&tunn_params
, 0, sizeof(tunn_params
));
2531 tunn_params
.update_vxlan_port
= 1;
2532 tunn_params
.vxlan_port
= edev
->vxlan_dst_port
;
2533 qed_ops
->tunn_config(cdev
, &tunn_params
);
2536 if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG
, &edev
->sp_flags
)) {
2537 struct qed_tunn_params tunn_params
;
2539 memset(&tunn_params
, 0, sizeof(tunn_params
));
2540 tunn_params
.update_geneve_port
= 1;
2541 tunn_params
.geneve_port
= edev
->geneve_dst_port
;
2542 qed_ops
->tunn_config(cdev
, &tunn_params
);
2545 mutex_unlock(&edev
->qede_lock
);
2548 static void qede_update_pf_params(struct qed_dev
*cdev
)
2550 struct qed_pf_params pf_params
;
2553 memset(&pf_params
, 0, sizeof(struct qed_pf_params
));
2554 pf_params
.eth_pf_params
.num_cons
= 128;
2555 qed_ops
->common
->update_pf_params(cdev
, &pf_params
);
2558 enum qede_probe_mode
{
2562 static int __qede_probe(struct pci_dev
*pdev
, u32 dp_module
, u8 dp_level
,
2563 bool is_vf
, enum qede_probe_mode mode
)
2565 struct qed_probe_params probe_params
;
2566 struct qed_slowpath_params sp_params
;
2567 struct qed_dev_eth_info dev_info
;
2568 struct qede_dev
*edev
;
2569 struct qed_dev
*cdev
;
2572 if (unlikely(dp_level
& QED_LEVEL_INFO
))
2573 pr_notice("Starting qede probe\n");
2575 memset(&probe_params
, 0, sizeof(probe_params
));
2576 probe_params
.protocol
= QED_PROTOCOL_ETH
;
2577 probe_params
.dp_module
= dp_module
;
2578 probe_params
.dp_level
= dp_level
;
2579 probe_params
.is_vf
= is_vf
;
2580 cdev
= qed_ops
->common
->probe(pdev
, &probe_params
);
2586 qede_update_pf_params(cdev
);
2588 /* Start the Slowpath-process */
2589 memset(&sp_params
, 0, sizeof(sp_params
));
2590 sp_params
.int_mode
= QED_INT_MODE_MSIX
;
2591 sp_params
.drv_major
= QEDE_MAJOR_VERSION
;
2592 sp_params
.drv_minor
= QEDE_MINOR_VERSION
;
2593 sp_params
.drv_rev
= QEDE_REVISION_VERSION
;
2594 sp_params
.drv_eng
= QEDE_ENGINEERING_VERSION
;
2595 strlcpy(sp_params
.name
, "qede LAN", QED_DRV_VER_STR_SIZE
);
2596 rc
= qed_ops
->common
->slowpath_start(cdev
, &sp_params
);
2598 pr_notice("Cannot start slowpath\n");
2602 /* Learn information crucial for qede to progress */
2603 rc
= qed_ops
->fill_dev_info(cdev
, &dev_info
);
2607 edev
= qede_alloc_etherdev(cdev
, pdev
, &dev_info
, dp_module
,
2615 edev
->flags
|= QEDE_FLAG_IS_VF
;
2617 qede_init_ndev(edev
);
2619 rc
= qede_roce_dev_add(edev
);
2623 rc
= register_netdev(edev
->ndev
);
2625 DP_NOTICE(edev
, "Cannot register net-device\n");
2629 edev
->ops
->common
->set_id(cdev
, edev
->ndev
->name
, DRV_MODULE_VERSION
);
2631 edev
->ops
->register_ops(cdev
, &qede_ll_ops
, edev
);
2635 qede_set_dcbnl_ops(edev
->ndev
);
2638 INIT_DELAYED_WORK(&edev
->sp_task
, qede_sp_task
);
2639 mutex_init(&edev
->qede_lock
);
2640 edev
->rx_copybreak
= QEDE_RX_HDR_SIZE
;
2642 DP_INFO(edev
, "Ending successfully qede probe\n");
2647 qede_roce_dev_remove(edev
);
2649 free_netdev(edev
->ndev
);
2651 qed_ops
->common
->slowpath_stop(cdev
);
2653 qed_ops
->common
->remove(cdev
);
2658 static int qede_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2664 switch ((enum qede_pci_private
)id
->driver_data
) {
2665 case QEDE_PRIVATE_VF
:
2666 if (debug
& QED_LOG_VERBOSE_MASK
)
2667 dev_err(&pdev
->dev
, "Probing a VF\n");
2671 if (debug
& QED_LOG_VERBOSE_MASK
)
2672 dev_err(&pdev
->dev
, "Probing a PF\n");
2675 qede_config_debug(debug
, &dp_module
, &dp_level
);
2677 return __qede_probe(pdev
, dp_module
, dp_level
, is_vf
,
2681 enum qede_remove_mode
{
2685 static void __qede_remove(struct pci_dev
*pdev
, enum qede_remove_mode mode
)
2687 struct net_device
*ndev
= pci_get_drvdata(pdev
);
2688 struct qede_dev
*edev
= netdev_priv(ndev
);
2689 struct qed_dev
*cdev
= edev
->cdev
;
2691 DP_INFO(edev
, "Starting qede_remove\n");
2693 cancel_delayed_work_sync(&edev
->sp_task
);
2695 unregister_netdev(ndev
);
2697 qede_roce_dev_remove(edev
);
2699 edev
->ops
->common
->set_power_state(cdev
, PCI_D0
);
2701 pci_set_drvdata(pdev
, NULL
);
2705 /* Use global ops since we've freed edev */
2706 qed_ops
->common
->slowpath_stop(cdev
);
2707 qed_ops
->common
->remove(cdev
);
2709 dev_info(&pdev
->dev
, "Ending qede_remove successfully\n");
2712 static void qede_remove(struct pci_dev
*pdev
)
2714 __qede_remove(pdev
, QEDE_REMOVE_NORMAL
);
2717 /* -------------------------------------------------------------------------
2718 * START OF LOAD / UNLOAD
2719 * -------------------------------------------------------------------------
2722 static int qede_set_num_queues(struct qede_dev
*edev
)
2727 /* Setup queues according to possible resources*/
2728 if (edev
->req_queues
)
2729 rss_num
= edev
->req_queues
;
2731 rss_num
= netif_get_num_default_rss_queues() *
2732 edev
->dev_info
.common
.num_hwfns
;
2734 rss_num
= min_t(u16
, QEDE_MAX_RSS_CNT(edev
), rss_num
);
2736 rc
= edev
->ops
->common
->set_fp_int(edev
->cdev
, rss_num
);
2738 /* Managed to request interrupts for our queues */
2739 edev
->num_queues
= rc
;
2740 DP_INFO(edev
, "Managed %d [of %d] RSS queues\n",
2741 QEDE_QUEUE_CNT(edev
), rss_num
);
2745 edev
->fp_num_tx
= edev
->req_num_tx
;
2746 edev
->fp_num_rx
= edev
->req_num_rx
;
2751 static void qede_free_mem_sb(struct qede_dev
*edev
,
2752 struct qed_sb_info
*sb_info
)
2754 if (sb_info
->sb_virt
)
2755 dma_free_coherent(&edev
->pdev
->dev
, sizeof(*sb_info
->sb_virt
),
2756 (void *)sb_info
->sb_virt
, sb_info
->sb_phys
);
2759 /* This function allocates fast-path status block memory */
2760 static int qede_alloc_mem_sb(struct qede_dev
*edev
,
2761 struct qed_sb_info
*sb_info
, u16 sb_id
)
2763 struct status_block
*sb_virt
;
2767 sb_virt
= dma_alloc_coherent(&edev
->pdev
->dev
,
2768 sizeof(*sb_virt
), &sb_phys
, GFP_KERNEL
);
2770 DP_ERR(edev
, "Status block allocation failed\n");
2774 rc
= edev
->ops
->common
->sb_init(edev
->cdev
, sb_info
,
2775 sb_virt
, sb_phys
, sb_id
,
2776 QED_SB_TYPE_L2_QUEUE
);
2778 DP_ERR(edev
, "Status block initialization failed\n");
2779 dma_free_coherent(&edev
->pdev
->dev
, sizeof(*sb_virt
),
2787 static void qede_free_rx_buffers(struct qede_dev
*edev
,
2788 struct qede_rx_queue
*rxq
)
2792 for (i
= rxq
->sw_rx_cons
; i
!= rxq
->sw_rx_prod
; i
++) {
2793 struct sw_rx_data
*rx_buf
;
2796 rx_buf
= &rxq
->sw_rx_ring
[i
& NUM_RX_BDS_MAX
];
2797 data
= rx_buf
->data
;
2799 dma_unmap_page(&edev
->pdev
->dev
,
2800 rx_buf
->mapping
, PAGE_SIZE
, DMA_FROM_DEVICE
);
2802 rx_buf
->data
= NULL
;
2807 static void qede_free_sge_mem(struct qede_dev
*edev
, struct qede_rx_queue
*rxq
)
2811 if (edev
->gro_disable
)
2814 for (i
= 0; i
< ETH_TPA_MAX_AGGS_NUM
; i
++) {
2815 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[i
];
2816 struct sw_rx_data
*replace_buf
= &tpa_info
->replace_buf
;
2818 if (replace_buf
->data
) {
2819 dma_unmap_page(&edev
->pdev
->dev
,
2820 replace_buf
->mapping
,
2821 PAGE_SIZE
, DMA_FROM_DEVICE
);
2822 __free_page(replace_buf
->data
);
2827 static void qede_free_mem_rxq(struct qede_dev
*edev
, struct qede_rx_queue
*rxq
)
2829 qede_free_sge_mem(edev
, rxq
);
2831 /* Free rx buffers */
2832 qede_free_rx_buffers(edev
, rxq
);
2834 /* Free the parallel SW ring */
2835 kfree(rxq
->sw_rx_ring
);
2837 /* Free the real RQ ring used by FW */
2838 edev
->ops
->common
->chain_free(edev
->cdev
, &rxq
->rx_bd_ring
);
2839 edev
->ops
->common
->chain_free(edev
->cdev
, &rxq
->rx_comp_ring
);
2842 static int qede_alloc_rx_buffer(struct qede_dev
*edev
,
2843 struct qede_rx_queue
*rxq
)
2845 struct sw_rx_data
*sw_rx_data
;
2846 struct eth_rx_bd
*rx_bd
;
2850 data
= alloc_pages(GFP_ATOMIC
, 0);
2851 if (unlikely(!data
)) {
2852 DP_NOTICE(edev
, "Failed to allocate Rx data [page]\n");
2856 /* Map the entire page as it would be used
2857 * for multiple RX buffer segment size mapping.
2859 mapping
= dma_map_page(&edev
->pdev
->dev
, data
, 0,
2860 PAGE_SIZE
, DMA_FROM_DEVICE
);
2861 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
2863 DP_NOTICE(edev
, "Failed to map Rx buffer\n");
2867 sw_rx_data
= &rxq
->sw_rx_ring
[rxq
->sw_rx_prod
& NUM_RX_BDS_MAX
];
2868 sw_rx_data
->page_offset
= 0;
2869 sw_rx_data
->data
= data
;
2870 sw_rx_data
->mapping
= mapping
;
2872 /* Advance PROD and get BD pointer */
2873 rx_bd
= (struct eth_rx_bd
*)qed_chain_produce(&rxq
->rx_bd_ring
);
2875 rx_bd
->addr
.hi
= cpu_to_le32(upper_32_bits(mapping
));
2876 rx_bd
->addr
.lo
= cpu_to_le32(lower_32_bits(mapping
));
2883 static int qede_alloc_sge_mem(struct qede_dev
*edev
, struct qede_rx_queue
*rxq
)
2888 if (edev
->gro_disable
)
2891 if (edev
->ndev
->mtu
> PAGE_SIZE
) {
2892 edev
->gro_disable
= 1;
2896 for (i
= 0; i
< ETH_TPA_MAX_AGGS_NUM
; i
++) {
2897 struct qede_agg_info
*tpa_info
= &rxq
->tpa_info
[i
];
2898 struct sw_rx_data
*replace_buf
= &tpa_info
->replace_buf
;
2900 replace_buf
->data
= alloc_pages(GFP_ATOMIC
, 0);
2901 if (unlikely(!replace_buf
->data
)) {
2903 "Failed to allocate TPA skb pool [replacement buffer]\n");
2907 mapping
= dma_map_page(&edev
->pdev
->dev
, replace_buf
->data
, 0,
2908 rxq
->rx_buf_size
, DMA_FROM_DEVICE
);
2909 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
2911 "Failed to map TPA replacement buffer\n");
2915 replace_buf
->mapping
= mapping
;
2916 tpa_info
->replace_buf
.page_offset
= 0;
2918 tpa_info
->replace_buf_mapping
= mapping
;
2919 tpa_info
->agg_state
= QEDE_AGG_STATE_NONE
;
2924 qede_free_sge_mem(edev
, rxq
);
2925 edev
->gro_disable
= 1;
2929 /* This function allocates all memory needed per Rx queue */
2930 static int qede_alloc_mem_rxq(struct qede_dev
*edev
, struct qede_rx_queue
*rxq
)
2934 rxq
->num_rx_buffers
= edev
->q_num_rx_buffers
;
2936 rxq
->rx_buf_size
= NET_IP_ALIGN
+ ETH_OVERHEAD
+ edev
->ndev
->mtu
;
2938 if (rxq
->rx_buf_size
> PAGE_SIZE
)
2939 rxq
->rx_buf_size
= PAGE_SIZE
;
2941 /* Segment size to spilt a page in multiple equal parts */
2942 rxq
->rx_buf_seg_size
= roundup_pow_of_two(rxq
->rx_buf_size
);
2944 /* Allocate the parallel driver ring for Rx buffers */
2945 size
= sizeof(*rxq
->sw_rx_ring
) * RX_RING_SIZE
;
2946 rxq
->sw_rx_ring
= kzalloc(size
, GFP_KERNEL
);
2947 if (!rxq
->sw_rx_ring
) {
2948 DP_ERR(edev
, "Rx buffers ring allocation failed\n");
2953 /* Allocate FW Rx ring */
2954 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
2955 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
2956 QED_CHAIN_MODE_NEXT_PTR
,
2957 QED_CHAIN_CNT_TYPE_U16
,
2959 sizeof(struct eth_rx_bd
),
2965 /* Allocate FW completion ring */
2966 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
2967 QED_CHAIN_USE_TO_CONSUME
,
2969 QED_CHAIN_CNT_TYPE_U16
,
2971 sizeof(union eth_rx_cqe
),
2972 &rxq
->rx_comp_ring
);
2976 /* Allocate buffers for the Rx ring */
2977 for (i
= 0; i
< rxq
->num_rx_buffers
; i
++) {
2978 rc
= qede_alloc_rx_buffer(edev
, rxq
);
2981 "Rx buffers allocation failed at index %d\n", i
);
2986 rc
= qede_alloc_sge_mem(edev
, rxq
);
2991 static void qede_free_mem_txq(struct qede_dev
*edev
, struct qede_tx_queue
*txq
)
2993 /* Free the parallel SW ring */
2994 kfree(txq
->sw_tx_ring
);
2996 /* Free the real RQ ring used by FW */
2997 edev
->ops
->common
->chain_free(edev
->cdev
, &txq
->tx_pbl
);
3000 /* This function allocates all memory needed per Tx queue */
3001 static int qede_alloc_mem_txq(struct qede_dev
*edev
, struct qede_tx_queue
*txq
)
3004 union eth_tx_bd_types
*p_virt
;
3006 txq
->num_tx_buffers
= edev
->q_num_tx_buffers
;
3008 /* Allocate the parallel driver ring for Tx buffers */
3009 size
= sizeof(*txq
->sw_tx_ring
) * NUM_TX_BDS_MAX
;
3010 txq
->sw_tx_ring
= kzalloc(size
, GFP_KERNEL
);
3011 if (!txq
->sw_tx_ring
) {
3012 DP_NOTICE(edev
, "Tx buffers ring allocation failed\n");
3016 rc
= edev
->ops
->common
->chain_alloc(edev
->cdev
,
3017 QED_CHAIN_USE_TO_CONSUME_PRODUCE
,
3019 QED_CHAIN_CNT_TYPE_U16
,
3021 sizeof(*p_virt
), &txq
->tx_pbl
);
3028 qede_free_mem_txq(edev
, txq
);
3032 /* This function frees all memory of a single fp */
3033 static void qede_free_mem_fp(struct qede_dev
*edev
, struct qede_fastpath
*fp
)
3037 qede_free_mem_sb(edev
, fp
->sb_info
);
3039 if (fp
->type
& QEDE_FASTPATH_RX
)
3040 qede_free_mem_rxq(edev
, fp
->rxq
);
3042 if (fp
->type
& QEDE_FASTPATH_TX
)
3043 for (tc
= 0; tc
< edev
->num_tc
; tc
++)
3044 qede_free_mem_txq(edev
, &fp
->txqs
[tc
]);
3047 /* This function allocates all memory needed for a single fp (i.e. an entity
3048 * which contains status block, one rx queue and/or multiple per-TC tx queues.
3050 static int qede_alloc_mem_fp(struct qede_dev
*edev
, struct qede_fastpath
*fp
)
3054 rc
= qede_alloc_mem_sb(edev
, fp
->sb_info
, fp
->id
);
3058 if (fp
->type
& QEDE_FASTPATH_RX
) {
3059 rc
= qede_alloc_mem_rxq(edev
, fp
->rxq
);
3064 if (fp
->type
& QEDE_FASTPATH_TX
) {
3065 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
3066 rc
= qede_alloc_mem_txq(edev
, &fp
->txqs
[tc
]);
3077 static void qede_free_mem_load(struct qede_dev
*edev
)
3082 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
3084 qede_free_mem_fp(edev
, fp
);
3088 /* This function allocates all qede memory at NIC load. */
3089 static int qede_alloc_mem_load(struct qede_dev
*edev
)
3091 int rc
= 0, queue_id
;
3093 for (queue_id
= 0; queue_id
< QEDE_QUEUE_CNT(edev
); queue_id
++) {
3094 struct qede_fastpath
*fp
= &edev
->fp_array
[queue_id
];
3096 rc
= qede_alloc_mem_fp(edev
, fp
);
3099 "Failed to allocate memory for fastpath - rss id = %d\n",
3101 qede_free_mem_load(edev
);
3109 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
3110 static void qede_init_fp(struct qede_dev
*edev
)
3112 int queue_id
, rxq_index
= 0, txq_index
= 0, tc
;
3113 struct qede_fastpath
*fp
;
3115 for_each_queue(queue_id
) {
3116 fp
= &edev
->fp_array
[queue_id
];
3121 memset((void *)&fp
->napi
, 0, sizeof(fp
->napi
));
3123 memset((void *)fp
->sb_info
, 0, sizeof(*fp
->sb_info
));
3125 if (fp
->type
& QEDE_FASTPATH_RX
) {
3126 memset((void *)fp
->rxq
, 0, sizeof(*fp
->rxq
));
3127 fp
->rxq
->rxq_id
= rxq_index
++;
3130 if (fp
->type
& QEDE_FASTPATH_TX
) {
3131 memset((void *)fp
->txqs
, 0,
3132 (edev
->num_tc
* sizeof(*fp
->txqs
)));
3133 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
3134 fp
->txqs
[tc
].index
= txq_index
+
3135 tc
* QEDE_TSS_COUNT(edev
);
3136 if (edev
->dev_info
.is_legacy
)
3137 fp
->txqs
[tc
].is_legacy
= true;
3142 snprintf(fp
->name
, sizeof(fp
->name
), "%s-fp-%d",
3143 edev
->ndev
->name
, queue_id
);
3146 edev
->gro_disable
= !(edev
->ndev
->features
& NETIF_F_GRO
);
3149 static int qede_set_real_num_queues(struct qede_dev
*edev
)
3153 rc
= netif_set_real_num_tx_queues(edev
->ndev
, QEDE_TSS_COUNT(edev
));
3155 DP_NOTICE(edev
, "Failed to set real number of Tx queues\n");
3159 rc
= netif_set_real_num_rx_queues(edev
->ndev
, QEDE_RSS_COUNT(edev
));
3161 DP_NOTICE(edev
, "Failed to set real number of Rx queues\n");
3168 static void qede_napi_disable_remove(struct qede_dev
*edev
)
3173 napi_disable(&edev
->fp_array
[i
].napi
);
3175 netif_napi_del(&edev
->fp_array
[i
].napi
);
3179 static void qede_napi_add_enable(struct qede_dev
*edev
)
3183 /* Add NAPI objects */
3185 netif_napi_add(edev
->ndev
, &edev
->fp_array
[i
].napi
,
3186 qede_poll
, NAPI_POLL_WEIGHT
);
3187 napi_enable(&edev
->fp_array
[i
].napi
);
3191 static void qede_sync_free_irqs(struct qede_dev
*edev
)
3195 for (i
= 0; i
< edev
->int_info
.used_cnt
; i
++) {
3196 if (edev
->int_info
.msix_cnt
) {
3197 synchronize_irq(edev
->int_info
.msix
[i
].vector
);
3198 free_irq(edev
->int_info
.msix
[i
].vector
,
3199 &edev
->fp_array
[i
]);
3201 edev
->ops
->common
->simd_handler_clean(edev
->cdev
, i
);
3205 edev
->int_info
.used_cnt
= 0;
3208 static int qede_req_msix_irqs(struct qede_dev
*edev
)
3212 /* Sanitize number of interrupts == number of prepared RSS queues */
3213 if (QEDE_QUEUE_CNT(edev
) > edev
->int_info
.msix_cnt
) {
3215 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
3216 QEDE_QUEUE_CNT(edev
), edev
->int_info
.msix_cnt
);
3220 for (i
= 0; i
< QEDE_QUEUE_CNT(edev
); i
++) {
3221 rc
= request_irq(edev
->int_info
.msix
[i
].vector
,
3222 qede_msix_fp_int
, 0, edev
->fp_array
[i
].name
,
3223 &edev
->fp_array
[i
]);
3225 DP_ERR(edev
, "Request fp %d irq failed\n", i
);
3226 qede_sync_free_irqs(edev
);
3229 DP_VERBOSE(edev
, NETIF_MSG_INTR
,
3230 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
3231 edev
->fp_array
[i
].name
, i
,
3232 &edev
->fp_array
[i
]);
3233 edev
->int_info
.used_cnt
++;
3239 static void qede_simd_fp_handler(void *cookie
)
3241 struct qede_fastpath
*fp
= (struct qede_fastpath
*)cookie
;
3243 napi_schedule_irqoff(&fp
->napi
);
3246 static int qede_setup_irqs(struct qede_dev
*edev
)
3250 /* Learn Interrupt configuration */
3251 rc
= edev
->ops
->common
->get_fp_int(edev
->cdev
, &edev
->int_info
);
3255 if (edev
->int_info
.msix_cnt
) {
3256 rc
= qede_req_msix_irqs(edev
);
3259 edev
->ndev
->irq
= edev
->int_info
.msix
[0].vector
;
3261 const struct qed_common_ops
*ops
;
3263 /* qed should learn receive the RSS ids and callbacks */
3264 ops
= edev
->ops
->common
;
3265 for (i
= 0; i
< QEDE_QUEUE_CNT(edev
); i
++)
3266 ops
->simd_handler_config(edev
->cdev
,
3267 &edev
->fp_array
[i
], i
,
3268 qede_simd_fp_handler
);
3269 edev
->int_info
.used_cnt
= QEDE_QUEUE_CNT(edev
);
3274 static int qede_drain_txq(struct qede_dev
*edev
,
3275 struct qede_tx_queue
*txq
, bool allow_drain
)
3279 while (txq
->sw_tx_cons
!= txq
->sw_tx_prod
) {
3283 "Tx queue[%d] is stuck, requesting MCP to drain\n",
3285 rc
= edev
->ops
->common
->drain(edev
->cdev
);
3288 return qede_drain_txq(edev
, txq
, false);
3291 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
3292 txq
->index
, txq
->sw_tx_prod
,
3297 usleep_range(1000, 2000);
3301 /* FW finished processing, wait for HW to transmit all tx packets */
3302 usleep_range(1000, 2000);
3307 static int qede_stop_queues(struct qede_dev
*edev
)
3309 struct qed_update_vport_params vport_update_params
;
3310 struct qed_dev
*cdev
= edev
->cdev
;
3313 /* Disable the vport */
3314 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
3315 vport_update_params
.vport_id
= 0;
3316 vport_update_params
.update_vport_active_flg
= 1;
3317 vport_update_params
.vport_active_flg
= 0;
3318 vport_update_params
.update_rss_flg
= 0;
3320 rc
= edev
->ops
->vport_update(cdev
, &vport_update_params
);
3322 DP_ERR(edev
, "Failed to update vport\n");
3326 /* Flush Tx queues. If needed, request drain from MCP */
3328 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
3330 if (fp
->type
& QEDE_FASTPATH_TX
) {
3331 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
3332 struct qede_tx_queue
*txq
= &fp
->txqs
[tc
];
3334 rc
= qede_drain_txq(edev
, txq
, true);
3341 /* Stop all Queues in reverse order */
3342 for (i
= QEDE_QUEUE_CNT(edev
) - 1; i
>= 0; i
--) {
3343 struct qed_stop_rxq_params rx_params
;
3345 /* Stop the Tx Queue(s) */
3346 if (edev
->fp_array
[i
].type
& QEDE_FASTPATH_TX
) {
3347 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
3348 struct qed_stop_txq_params tx_params
;
3351 tx_params
.rss_id
= i
;
3352 val
= edev
->fp_array
[i
].txqs
[tc
].index
;
3353 tx_params
.tx_queue_id
= val
;
3354 rc
= edev
->ops
->q_tx_stop(cdev
, &tx_params
);
3356 DP_ERR(edev
, "Failed to stop TXQ #%d\n",
3357 tx_params
.tx_queue_id
);
3363 /* Stop the Rx Queue */
3364 if (edev
->fp_array
[i
].type
& QEDE_FASTPATH_RX
) {
3365 memset(&rx_params
, 0, sizeof(rx_params
));
3366 rx_params
.rss_id
= i
;
3367 rx_params
.rx_queue_id
= edev
->fp_array
[i
].rxq
->rxq_id
;
3369 rc
= edev
->ops
->q_rx_stop(cdev
, &rx_params
);
3371 DP_ERR(edev
, "Failed to stop RXQ #%d\n", i
);
3377 /* Stop the vport */
3378 rc
= edev
->ops
->vport_stop(cdev
, 0);
3380 DP_ERR(edev
, "Failed to stop VPORT\n");
3385 static int qede_start_queues(struct qede_dev
*edev
, bool clear_stats
)
3388 int vlan_removal_en
= 1;
3389 struct qed_dev
*cdev
= edev
->cdev
;
3390 struct qed_update_vport_params vport_update_params
;
3391 struct qed_queue_start_common_params q_params
;
3392 struct qed_dev_info
*qed_info
= &edev
->dev_info
.common
;
3393 struct qed_start_vport_params start
= {0};
3394 bool reset_rss_indir
= false;
3396 if (!edev
->num_queues
) {
3398 "Cannot update V-VPORT as active as there are no Rx queues\n");
3402 start
.gro_enable
= !edev
->gro_disable
;
3403 start
.mtu
= edev
->ndev
->mtu
;
3405 start
.drop_ttl0
= true;
3406 start
.remove_inner_vlan
= vlan_removal_en
;
3407 start
.clear_stats
= clear_stats
;
3409 rc
= edev
->ops
->vport_start(cdev
, &start
);
3412 DP_ERR(edev
, "Start V-PORT failed %d\n", rc
);
3416 DP_VERBOSE(edev
, NETIF_MSG_IFUP
,
3417 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
3418 start
.vport_id
, edev
->ndev
->mtu
+ 0xe, vlan_removal_en
);
3421 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
3422 dma_addr_t p_phys_table
;
3425 if (fp
->type
& QEDE_FASTPATH_RX
) {
3426 struct qede_rx_queue
*rxq
= fp
->rxq
;
3429 memset(&q_params
, 0, sizeof(q_params
));
3430 q_params
.rss_id
= i
;
3431 q_params
.queue_id
= rxq
->rxq_id
;
3432 q_params
.vport_id
= 0;
3433 q_params
.sb
= fp
->sb_info
->igu_sb_id
;
3434 q_params
.sb_idx
= RX_PI
;
3437 qed_chain_get_pbl_phys(&rxq
->rx_comp_ring
);
3438 page_cnt
= qed_chain_get_page_cnt(&rxq
->rx_comp_ring
);
3440 rc
= edev
->ops
->q_rx_start(cdev
, &q_params
,
3442 rxq
->rx_bd_ring
.p_phys_addr
,
3445 &rxq
->hw_rxq_prod_addr
);
3447 DP_ERR(edev
, "Start RXQ #%d failed %d\n", i
,
3452 val
= &fp
->sb_info
->sb_virt
->pi_array
[RX_PI
];
3453 rxq
->hw_cons_ptr
= val
;
3455 qede_update_rx_prod(edev
, rxq
);
3458 if (!(fp
->type
& QEDE_FASTPATH_TX
))
3461 for (tc
= 0; tc
< edev
->num_tc
; tc
++) {
3462 struct qede_tx_queue
*txq
= &fp
->txqs
[tc
];
3464 p_phys_table
= qed_chain_get_pbl_phys(&txq
->tx_pbl
);
3465 page_cnt
= qed_chain_get_page_cnt(&txq
->tx_pbl
);
3467 memset(&q_params
, 0, sizeof(q_params
));
3468 q_params
.rss_id
= i
;
3469 q_params
.queue_id
= txq
->index
;
3470 q_params
.vport_id
= 0;
3471 q_params
.sb
= fp
->sb_info
->igu_sb_id
;
3472 q_params
.sb_idx
= TX_PI(tc
);
3474 rc
= edev
->ops
->q_tx_start(cdev
, &q_params
,
3475 p_phys_table
, page_cnt
,
3476 &txq
->doorbell_addr
);
3478 DP_ERR(edev
, "Start TXQ #%d failed %d\n",
3484 &fp
->sb_info
->sb_virt
->pi_array
[TX_PI(tc
)];
3485 SET_FIELD(txq
->tx_db
.data
.params
,
3486 ETH_DB_DATA_DEST
, DB_DEST_XCM
);
3487 SET_FIELD(txq
->tx_db
.data
.params
, ETH_DB_DATA_AGG_CMD
,
3489 SET_FIELD(txq
->tx_db
.data
.params
,
3490 ETH_DB_DATA_AGG_VAL_SEL
,
3491 DQ_XCM_ETH_TX_BD_PROD_CMD
);
3493 txq
->tx_db
.data
.agg_flags
= DQ_XCM_ETH_DQ_CF_CMD
;
3497 /* Prepare and send the vport enable */
3498 memset(&vport_update_params
, 0, sizeof(vport_update_params
));
3499 vport_update_params
.vport_id
= start
.vport_id
;
3500 vport_update_params
.update_vport_active_flg
= 1;
3501 vport_update_params
.vport_active_flg
= 1;
3503 if ((qed_info
->mf_mode
== QED_MF_NPAR
|| pci_num_vf(edev
->pdev
)) &&
3504 qed_info
->tx_switching
) {
3505 vport_update_params
.update_tx_switching_flg
= 1;
3506 vport_update_params
.tx_switching_flg
= 1;
3509 /* Fill struct with RSS params */
3510 if (QEDE_RSS_COUNT(edev
) > 1) {
3511 vport_update_params
.update_rss_flg
= 1;
3513 /* Need to validate current RSS config uses valid entries */
3514 for (i
= 0; i
< QED_RSS_IND_TABLE_SIZE
; i
++) {
3515 if (edev
->rss_params
.rss_ind_table
[i
] >=
3516 QEDE_RSS_COUNT(edev
)) {
3517 reset_rss_indir
= true;
3522 if (!(edev
->rss_params_inited
& QEDE_RSS_INDIR_INITED
) ||
3526 for (i
= 0; i
< QED_RSS_IND_TABLE_SIZE
; i
++) {
3529 val
= QEDE_RSS_COUNT(edev
);
3530 indir_val
= ethtool_rxfh_indir_default(i
, val
);
3531 edev
->rss_params
.rss_ind_table
[i
] = indir_val
;
3533 edev
->rss_params_inited
|= QEDE_RSS_INDIR_INITED
;
3536 if (!(edev
->rss_params_inited
& QEDE_RSS_KEY_INITED
)) {
3537 netdev_rss_key_fill(edev
->rss_params
.rss_key
,
3538 sizeof(edev
->rss_params
.rss_key
));
3539 edev
->rss_params_inited
|= QEDE_RSS_KEY_INITED
;
3542 if (!(edev
->rss_params_inited
& QEDE_RSS_CAPS_INITED
)) {
3543 edev
->rss_params
.rss_caps
= QED_RSS_IPV4
|
3547 edev
->rss_params_inited
|= QEDE_RSS_CAPS_INITED
;
3550 memcpy(&vport_update_params
.rss_params
, &edev
->rss_params
,
3551 sizeof(vport_update_params
.rss_params
));
3553 memset(&vport_update_params
.rss_params
, 0,
3554 sizeof(vport_update_params
.rss_params
));
3557 rc
= edev
->ops
->vport_update(cdev
, &vport_update_params
);
3559 DP_ERR(edev
, "Update V-PORT failed %d\n", rc
);
3566 static int qede_set_mcast_rx_mac(struct qede_dev
*edev
,
3567 enum qed_filter_xcast_params_type opcode
,
3568 unsigned char *mac
, int num_macs
)
3570 struct qed_filter_params filter_cmd
;
3573 memset(&filter_cmd
, 0, sizeof(filter_cmd
));
3574 filter_cmd
.type
= QED_FILTER_TYPE_MCAST
;
3575 filter_cmd
.filter
.mcast
.type
= opcode
;
3576 filter_cmd
.filter
.mcast
.num
= num_macs
;
3578 for (i
= 0; i
< num_macs
; i
++, mac
+= ETH_ALEN
)
3579 ether_addr_copy(filter_cmd
.filter
.mcast
.mac
[i
], mac
);
3581 return edev
->ops
->filter_config(edev
->cdev
, &filter_cmd
);
3584 enum qede_unload_mode
{
3588 static void qede_unload(struct qede_dev
*edev
, enum qede_unload_mode mode
)
3590 struct qed_link_params link_params
;
3593 DP_INFO(edev
, "Starting qede unload\n");
3595 qede_roce_dev_event_close(edev
);
3596 mutex_lock(&edev
->qede_lock
);
3597 edev
->state
= QEDE_STATE_CLOSED
;
3600 netif_tx_disable(edev
->ndev
);
3601 netif_carrier_off(edev
->ndev
);
3603 /* Reset the link */
3604 memset(&link_params
, 0, sizeof(link_params
));
3605 link_params
.link_up
= false;
3606 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
3607 rc
= qede_stop_queues(edev
);
3609 qede_sync_free_irqs(edev
);
3613 DP_INFO(edev
, "Stopped Queues\n");
3615 qede_vlan_mark_nonconfigured(edev
);
3616 edev
->ops
->fastpath_stop(edev
->cdev
);
3618 /* Release the interrupts */
3619 qede_sync_free_irqs(edev
);
3620 edev
->ops
->common
->set_fp_int(edev
->cdev
, 0);
3622 qede_napi_disable_remove(edev
);
3624 qede_free_mem_load(edev
);
3625 qede_free_fp_array(edev
);
3628 mutex_unlock(&edev
->qede_lock
);
3629 DP_INFO(edev
, "Ending qede unload\n");
3632 enum qede_load_mode
{
3637 static int qede_load(struct qede_dev
*edev
, enum qede_load_mode mode
)
3639 struct qed_link_params link_params
;
3640 struct qed_link_output link_output
;
3643 DP_INFO(edev
, "Starting qede load\n");
3645 rc
= qede_set_num_queues(edev
);
3649 rc
= qede_alloc_fp_array(edev
);
3655 rc
= qede_alloc_mem_load(edev
);
3658 DP_INFO(edev
, "Allocated %d RSS queues on %d TC/s\n",
3659 QEDE_QUEUE_CNT(edev
), edev
->num_tc
);
3661 rc
= qede_set_real_num_queues(edev
);
3665 qede_napi_add_enable(edev
);
3666 DP_INFO(edev
, "Napi added and enabled\n");
3668 rc
= qede_setup_irqs(edev
);
3671 DP_INFO(edev
, "Setup IRQs succeeded\n");
3673 rc
= qede_start_queues(edev
, mode
!= QEDE_LOAD_RELOAD
);
3676 DP_INFO(edev
, "Start VPORT, RXQ and TXQ succeeded\n");
3678 /* Add primary mac and set Rx filters */
3679 ether_addr_copy(edev
->primary_mac
, edev
->ndev
->dev_addr
);
3681 mutex_lock(&edev
->qede_lock
);
3682 edev
->state
= QEDE_STATE_OPEN
;
3683 mutex_unlock(&edev
->qede_lock
);
3685 /* Program un-configured VLANs */
3686 qede_configure_vlan_filters(edev
);
3688 /* Ask for link-up using current configuration */
3689 memset(&link_params
, 0, sizeof(link_params
));
3690 link_params
.link_up
= true;
3691 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
3693 /* Query whether link is already-up */
3694 memset(&link_output
, 0, sizeof(link_output
));
3695 edev
->ops
->common
->get_link(edev
->cdev
, &link_output
);
3696 qede_roce_dev_event_open(edev
);
3697 qede_link_update(edev
, &link_output
);
3699 DP_INFO(edev
, "Ending successfully qede load\n");
3704 qede_sync_free_irqs(edev
);
3705 memset(&edev
->int_info
.msix_cnt
, 0, sizeof(struct qed_int_info
));
3707 qede_napi_disable_remove(edev
);
3709 qede_free_mem_load(edev
);
3711 edev
->ops
->common
->set_fp_int(edev
->cdev
, 0);
3712 qede_free_fp_array(edev
);
3713 edev
->num_queues
= 0;
3714 edev
->fp_num_tx
= 0;
3715 edev
->fp_num_rx
= 0;
3720 void qede_reload(struct qede_dev
*edev
,
3721 void (*func
)(struct qede_dev
*, union qede_reload_args
*),
3722 union qede_reload_args
*args
)
3724 qede_unload(edev
, QEDE_UNLOAD_NORMAL
);
3725 /* Call function handler to update parameters
3726 * needed for function load.
3731 qede_load(edev
, QEDE_LOAD_RELOAD
);
3733 mutex_lock(&edev
->qede_lock
);
3734 qede_config_rx_mode(edev
->ndev
);
3735 mutex_unlock(&edev
->qede_lock
);
3738 /* called with rtnl_lock */
3739 static int qede_open(struct net_device
*ndev
)
3741 struct qede_dev
*edev
= netdev_priv(ndev
);
3744 netif_carrier_off(ndev
);
3746 edev
->ops
->common
->set_power_state(edev
->cdev
, PCI_D0
);
3748 rc
= qede_load(edev
, QEDE_LOAD_NORMAL
);
3753 udp_tunnel_get_rx_info(ndev
);
3758 static int qede_close(struct net_device
*ndev
)
3760 struct qede_dev
*edev
= netdev_priv(ndev
);
3762 qede_unload(edev
, QEDE_UNLOAD_NORMAL
);
3767 static void qede_link_update(void *dev
, struct qed_link_output
*link
)
3769 struct qede_dev
*edev
= dev
;
3771 if (!netif_running(edev
->ndev
)) {
3772 DP_VERBOSE(edev
, NETIF_MSG_LINK
, "Interface is not running\n");
3776 if (link
->link_up
) {
3777 if (!netif_carrier_ok(edev
->ndev
)) {
3778 DP_NOTICE(edev
, "Link is up\n");
3779 netif_tx_start_all_queues(edev
->ndev
);
3780 netif_carrier_on(edev
->ndev
);
3783 if (netif_carrier_ok(edev
->ndev
)) {
3784 DP_NOTICE(edev
, "Link is down\n");
3785 netif_tx_disable(edev
->ndev
);
3786 netif_carrier_off(edev
->ndev
);
3791 static int qede_set_mac_addr(struct net_device
*ndev
, void *p
)
3793 struct qede_dev
*edev
= netdev_priv(ndev
);
3794 struct sockaddr
*addr
= p
;
3797 ASSERT_RTNL(); /* @@@TBD To be removed */
3799 DP_INFO(edev
, "Set_mac_addr called\n");
3801 if (!is_valid_ether_addr(addr
->sa_data
)) {
3802 DP_NOTICE(edev
, "The MAC address is not valid\n");
3806 if (!edev
->ops
->check_mac(edev
->cdev
, addr
->sa_data
)) {
3807 DP_NOTICE(edev
, "qed prevents setting MAC\n");
3811 ether_addr_copy(ndev
->dev_addr
, addr
->sa_data
);
3813 if (!netif_running(ndev
)) {
3814 DP_NOTICE(edev
, "The device is currently down\n");
3818 /* Remove the previous primary mac */
3819 rc
= qede_set_ucast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_DEL
,
3824 /* Add MAC filter according to the new unicast HW MAC address */
3825 ether_addr_copy(edev
->primary_mac
, ndev
->dev_addr
);
3826 return qede_set_ucast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_ADD
,
3831 qede_configure_mcast_filtering(struct net_device
*ndev
,
3832 enum qed_filter_rx_mode_type
*accept_flags
)
3834 struct qede_dev
*edev
= netdev_priv(ndev
);
3835 unsigned char *mc_macs
, *temp
;
3836 struct netdev_hw_addr
*ha
;
3837 int rc
= 0, mc_count
;
3840 size
= 64 * ETH_ALEN
;
3842 mc_macs
= kzalloc(size
, GFP_KERNEL
);
3845 "Failed to allocate memory for multicast MACs\n");
3852 /* Remove all previously configured MAC filters */
3853 rc
= qede_set_mcast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_DEL
,
3858 netif_addr_lock_bh(ndev
);
3860 mc_count
= netdev_mc_count(ndev
);
3861 if (mc_count
< 64) {
3862 netdev_for_each_mc_addr(ha
, ndev
) {
3863 ether_addr_copy(temp
, ha
->addr
);
3868 netif_addr_unlock_bh(ndev
);
3870 /* Check for all multicast @@@TBD resource allocation */
3871 if ((ndev
->flags
& IFF_ALLMULTI
) ||
3873 if (*accept_flags
== QED_FILTER_RX_MODE_TYPE_REGULAR
)
3874 *accept_flags
= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
;
3876 /* Add all multicast MAC filters */
3877 rc
= qede_set_mcast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_ADD
,
3886 static void qede_set_rx_mode(struct net_device
*ndev
)
3888 struct qede_dev
*edev
= netdev_priv(ndev
);
3890 DP_INFO(edev
, "qede_set_rx_mode called\n");
3892 if (edev
->state
!= QEDE_STATE_OPEN
) {
3894 "qede_set_rx_mode called while interface is down\n");
3896 set_bit(QEDE_SP_RX_MODE
, &edev
->sp_flags
);
3897 schedule_delayed_work(&edev
->sp_task
, 0);
3901 /* Must be called with qede_lock held */
3902 static void qede_config_rx_mode(struct net_device
*ndev
)
3904 enum qed_filter_rx_mode_type accept_flags
= QED_FILTER_TYPE_UCAST
;
3905 struct qede_dev
*edev
= netdev_priv(ndev
);
3906 struct qed_filter_params rx_mode
;
3907 unsigned char *uc_macs
, *temp
;
3908 struct netdev_hw_addr
*ha
;
3912 netif_addr_lock_bh(ndev
);
3914 uc_count
= netdev_uc_count(ndev
);
3915 size
= uc_count
* ETH_ALEN
;
3917 uc_macs
= kzalloc(size
, GFP_ATOMIC
);
3919 DP_NOTICE(edev
, "Failed to allocate memory for unicast MACs\n");
3920 netif_addr_unlock_bh(ndev
);
3925 netdev_for_each_uc_addr(ha
, ndev
) {
3926 ether_addr_copy(temp
, ha
->addr
);
3930 netif_addr_unlock_bh(ndev
);
3932 /* Configure the struct for the Rx mode */
3933 memset(&rx_mode
, 0, sizeof(struct qed_filter_params
));
3934 rx_mode
.type
= QED_FILTER_TYPE_RX_MODE
;
3936 /* Remove all previous unicast secondary macs and multicast macs
3937 * (configrue / leave the primary mac)
3939 rc
= qede_set_ucast_rx_mac(edev
, QED_FILTER_XCAST_TYPE_REPLACE
,
3944 /* Check for promiscuous */
3945 if ((ndev
->flags
& IFF_PROMISC
) ||
3946 (uc_count
> edev
->dev_info
.num_mac_filters
- 1)) {
3947 accept_flags
= QED_FILTER_RX_MODE_TYPE_PROMISC
;
3949 /* Add MAC filters according to the unicast secondary macs */
3953 for (i
= 0; i
< uc_count
; i
++) {
3954 rc
= qede_set_ucast_rx_mac(edev
,
3955 QED_FILTER_XCAST_TYPE_ADD
,
3963 rc
= qede_configure_mcast_filtering(ndev
, &accept_flags
);
3968 /* take care of VLAN mode */
3969 if (ndev
->flags
& IFF_PROMISC
) {
3970 qede_config_accept_any_vlan(edev
, true);
3971 } else if (!edev
->non_configured_vlans
) {
3972 /* It's possible that accept_any_vlan mode is set due to a
3973 * previous setting of IFF_PROMISC. If vlan credits are
3974 * sufficient, disable accept_any_vlan.
3976 qede_config_accept_any_vlan(edev
, false);
3979 rx_mode
.filter
.accept_flags
= accept_flags
;
3980 edev
->ops
->filter_config(edev
->cdev
, &rx_mode
);