2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/etherdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/log2.h>
17 #include <linux/prefetch.h>
18 #include <linux/irq.h>
19 #include <linux/iommu.h>
20 #include <linux/bpf.h>
21 #include <linux/bpf_trace.h>
22 #include <linux/filter.h>
26 #include "nicvf_queues.h"
27 #include "thunder_bgx.h"
29 #define DRV_NAME "thunder-nicvf"
30 #define DRV_VERSION "1.0"
32 /* Supported devices */
33 static const struct pci_device_id nicvf_id_table
[] = {
34 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
35 PCI_DEVICE_ID_THUNDER_NIC_VF
,
37 PCI_SUBSYS_DEVID_88XX_NIC_VF
) },
38 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
39 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF
,
41 PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF
) },
42 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
43 PCI_DEVICE_ID_THUNDER_NIC_VF
,
45 PCI_SUBSYS_DEVID_81XX_NIC_VF
) },
46 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM
,
47 PCI_DEVICE_ID_THUNDER_NIC_VF
,
49 PCI_SUBSYS_DEVID_83XX_NIC_VF
) },
50 { 0, } /* end of table */
53 MODULE_AUTHOR("Sunil Goutham");
54 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
55 MODULE_LICENSE("GPL v2");
56 MODULE_VERSION(DRV_VERSION
);
57 MODULE_DEVICE_TABLE(pci
, nicvf_id_table
);
59 static int debug
= 0x00;
60 module_param(debug
, int, 0644);
61 MODULE_PARM_DESC(debug
, "Debug message level bitmap");
63 static int cpi_alg
= CPI_ALG_NONE
;
64 module_param(cpi_alg
, int, S_IRUGO
);
65 MODULE_PARM_DESC(cpi_alg
,
66 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
68 static inline u8
nicvf_netdev_qidx(struct nicvf
*nic
, u8 qidx
)
71 return qidx
+ ((nic
->sqs_id
+ 1) * MAX_CMP_QUEUES_PER_QS
);
76 /* The Cavium ThunderX network controller can *only* be found in SoCs
77 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
78 * registers on this platform are implicitly strongly ordered with respect
79 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
80 * with no memory barriers in this driver. The readq()/writeq() functions add
81 * explicit ordering operation which in this case are redundant, and only
85 /* Register read/write APIs */
86 void nicvf_reg_write(struct nicvf
*nic
, u64 offset
, u64 val
)
88 writeq_relaxed(val
, nic
->reg_base
+ offset
);
91 u64
nicvf_reg_read(struct nicvf
*nic
, u64 offset
)
93 return readq_relaxed(nic
->reg_base
+ offset
);
96 void nicvf_queue_reg_write(struct nicvf
*nic
, u64 offset
,
99 void __iomem
*addr
= nic
->reg_base
+ offset
;
101 writeq_relaxed(val
, addr
+ (qidx
<< NIC_Q_NUM_SHIFT
));
104 u64
nicvf_queue_reg_read(struct nicvf
*nic
, u64 offset
, u64 qidx
)
106 void __iomem
*addr
= nic
->reg_base
+ offset
;
108 return readq_relaxed(addr
+ (qidx
<< NIC_Q_NUM_SHIFT
));
111 /* VF -> PF mailbox communication */
112 static void nicvf_write_to_mbx(struct nicvf
*nic
, union nic_mbx
*mbx
)
114 u64
*msg
= (u64
*)mbx
;
116 nicvf_reg_write(nic
, NIC_VF_PF_MAILBOX_0_1
+ 0, msg
[0]);
117 nicvf_reg_write(nic
, NIC_VF_PF_MAILBOX_0_1
+ 8, msg
[1]);
120 int nicvf_send_msg_to_pf(struct nicvf
*nic
, union nic_mbx
*mbx
)
122 int timeout
= NIC_MBOX_MSG_TIMEOUT
;
125 nic
->pf_acked
= false;
126 nic
->pf_nacked
= false;
128 nicvf_write_to_mbx(nic
, mbx
);
130 /* Wait for previous message to be acked, timeout 2sec */
131 while (!nic
->pf_acked
) {
132 if (nic
->pf_nacked
) {
133 netdev_err(nic
->netdev
,
134 "PF NACK to mbox msg 0x%02x from VF%d\n",
135 (mbx
->msg
.msg
& 0xFF), nic
->vf_id
);
143 netdev_err(nic
->netdev
,
144 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
145 (mbx
->msg
.msg
& 0xFF), nic
->vf_id
);
152 /* Checks if VF is able to comminicate with PF
153 * and also gets the VNIC number this VF is associated to.
155 static int nicvf_check_pf_ready(struct nicvf
*nic
)
157 union nic_mbx mbx
= {};
159 mbx
.msg
.msg
= NIC_MBOX_MSG_READY
;
160 if (nicvf_send_msg_to_pf(nic
, &mbx
)) {
161 netdev_err(nic
->netdev
,
162 "PF didn't respond to READY msg\n");
169 static void nicvf_read_bgx_stats(struct nicvf
*nic
, struct bgx_stats_msg
*bgx
)
172 nic
->bgx_stats
.rx_stats
[bgx
->idx
] = bgx
->stats
;
174 nic
->bgx_stats
.tx_stats
[bgx
->idx
] = bgx
->stats
;
177 static void nicvf_handle_mbx_intr(struct nicvf
*nic
)
179 union nic_mbx mbx
= {};
184 mbx_addr
= NIC_VF_PF_MAILBOX_0_1
;
185 mbx_data
= (u64
*)&mbx
;
187 for (i
= 0; i
< NIC_PF_VF_MAILBOX_SIZE
; i
++) {
188 *mbx_data
= nicvf_reg_read(nic
, mbx_addr
);
190 mbx_addr
+= sizeof(u64
);
193 netdev_dbg(nic
->netdev
, "Mbox message: msg: 0x%x\n", mbx
.msg
.msg
);
194 switch (mbx
.msg
.msg
) {
195 case NIC_MBOX_MSG_READY
:
196 nic
->pf_acked
= true;
197 nic
->vf_id
= mbx
.nic_cfg
.vf_id
& 0x7F;
198 nic
->tns_mode
= mbx
.nic_cfg
.tns_mode
& 0x7F;
199 nic
->node
= mbx
.nic_cfg
.node_id
;
200 if (!nic
->set_mac_pending
)
201 ether_addr_copy(nic
->netdev
->dev_addr
,
202 mbx
.nic_cfg
.mac_addr
);
203 nic
->sqs_mode
= mbx
.nic_cfg
.sqs_mode
;
204 nic
->loopback_supported
= mbx
.nic_cfg
.loopback_supported
;
205 nic
->link_up
= false;
209 case NIC_MBOX_MSG_ACK
:
210 nic
->pf_acked
= true;
212 case NIC_MBOX_MSG_NACK
:
213 nic
->pf_nacked
= true;
215 case NIC_MBOX_MSG_RSS_SIZE
:
216 nic
->rss_info
.rss_size
= mbx
.rss_size
.ind_tbl_size
;
217 nic
->pf_acked
= true;
219 case NIC_MBOX_MSG_BGX_STATS
:
220 nicvf_read_bgx_stats(nic
, &mbx
.bgx_stats
);
221 nic
->pf_acked
= true;
223 case NIC_MBOX_MSG_BGX_LINK_CHANGE
:
224 nic
->pf_acked
= true;
225 nic
->link_up
= mbx
.link_status
.link_up
;
226 nic
->duplex
= mbx
.link_status
.duplex
;
227 nic
->speed
= mbx
.link_status
.speed
;
228 nic
->mac_type
= mbx
.link_status
.mac_type
;
230 netdev_info(nic
->netdev
, "%s: Link is Up %d Mbps %s\n",
231 nic
->netdev
->name
, nic
->speed
,
232 nic
->duplex
== DUPLEX_FULL
?
233 "Full duplex" : "Half duplex");
234 netif_carrier_on(nic
->netdev
);
235 netif_tx_start_all_queues(nic
->netdev
);
237 netdev_info(nic
->netdev
, "%s: Link is Down\n",
239 netif_carrier_off(nic
->netdev
);
240 netif_tx_stop_all_queues(nic
->netdev
);
243 case NIC_MBOX_MSG_ALLOC_SQS
:
244 nic
->sqs_count
= mbx
.sqs_alloc
.qs_count
;
245 nic
->pf_acked
= true;
247 case NIC_MBOX_MSG_SNICVF_PTR
:
248 /* Primary VF: make note of secondary VF's pointer
249 * to be used while packet transmission.
251 nic
->snicvf
[mbx
.nicvf
.sqs_id
] =
252 (struct nicvf
*)mbx
.nicvf
.nicvf
;
253 nic
->pf_acked
= true;
255 case NIC_MBOX_MSG_PNICVF_PTR
:
256 /* Secondary VF/Qset: make note of primary VF's pointer
257 * to be used while packet reception, to handover packet
258 * to primary VF's netdev.
260 nic
->pnicvf
= (struct nicvf
*)mbx
.nicvf
.nicvf
;
261 nic
->pf_acked
= true;
263 case NIC_MBOX_MSG_PFC
:
264 nic
->pfc
.autoneg
= mbx
.pfc
.autoneg
;
265 nic
->pfc
.fc_rx
= mbx
.pfc
.fc_rx
;
266 nic
->pfc
.fc_tx
= mbx
.pfc
.fc_tx
;
267 nic
->pf_acked
= true;
270 netdev_err(nic
->netdev
,
271 "Invalid message from PF, msg 0x%x\n", mbx
.msg
.msg
);
274 nicvf_clear_intr(nic
, NICVF_INTR_MBOX
, 0);
277 static int nicvf_hw_set_mac_addr(struct nicvf
*nic
, struct net_device
*netdev
)
279 union nic_mbx mbx
= {};
281 mbx
.mac
.msg
= NIC_MBOX_MSG_SET_MAC
;
282 mbx
.mac
.vf_id
= nic
->vf_id
;
283 ether_addr_copy(mbx
.mac
.mac_addr
, netdev
->dev_addr
);
285 return nicvf_send_msg_to_pf(nic
, &mbx
);
288 static void nicvf_config_cpi(struct nicvf
*nic
)
290 union nic_mbx mbx
= {};
292 mbx
.cpi_cfg
.msg
= NIC_MBOX_MSG_CPI_CFG
;
293 mbx
.cpi_cfg
.vf_id
= nic
->vf_id
;
294 mbx
.cpi_cfg
.cpi_alg
= nic
->cpi_alg
;
295 mbx
.cpi_cfg
.rq_cnt
= nic
->qs
->rq_cnt
;
297 nicvf_send_msg_to_pf(nic
, &mbx
);
300 static void nicvf_get_rss_size(struct nicvf
*nic
)
302 union nic_mbx mbx
= {};
304 mbx
.rss_size
.msg
= NIC_MBOX_MSG_RSS_SIZE
;
305 mbx
.rss_size
.vf_id
= nic
->vf_id
;
306 nicvf_send_msg_to_pf(nic
, &mbx
);
309 void nicvf_config_rss(struct nicvf
*nic
)
311 union nic_mbx mbx
= {};
312 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
313 int ind_tbl_len
= rss
->rss_size
;
316 mbx
.rss_cfg
.vf_id
= nic
->vf_id
;
317 mbx
.rss_cfg
.hash_bits
= rss
->hash_bits
;
318 while (ind_tbl_len
) {
319 mbx
.rss_cfg
.tbl_offset
= nextq
;
320 mbx
.rss_cfg
.tbl_len
= min(ind_tbl_len
,
321 RSS_IND_TBL_LEN_PER_MBX_MSG
);
322 mbx
.rss_cfg
.msg
= mbx
.rss_cfg
.tbl_offset
?
323 NIC_MBOX_MSG_RSS_CFG_CONT
: NIC_MBOX_MSG_RSS_CFG
;
325 for (i
= 0; i
< mbx
.rss_cfg
.tbl_len
; i
++)
326 mbx
.rss_cfg
.ind_tbl
[i
] = rss
->ind_tbl
[nextq
++];
328 nicvf_send_msg_to_pf(nic
, &mbx
);
330 ind_tbl_len
-= mbx
.rss_cfg
.tbl_len
;
334 void nicvf_set_rss_key(struct nicvf
*nic
)
336 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
337 u64 key_addr
= NIC_VNIC_RSS_KEY_0_4
;
340 for (idx
= 0; idx
< RSS_HASH_KEY_SIZE
; idx
++) {
341 nicvf_reg_write(nic
, key_addr
, rss
->key
[idx
]);
342 key_addr
+= sizeof(u64
);
346 static int nicvf_rss_init(struct nicvf
*nic
)
348 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
351 nicvf_get_rss_size(nic
);
353 if (cpi_alg
!= CPI_ALG_NONE
) {
361 netdev_rss_key_fill(rss
->key
, RSS_HASH_KEY_SIZE
* sizeof(u64
));
362 nicvf_set_rss_key(nic
);
364 rss
->cfg
= RSS_IP_HASH_ENA
| RSS_TCP_HASH_ENA
| RSS_UDP_HASH_ENA
;
365 nicvf_reg_write(nic
, NIC_VNIC_RSS_CFG
, rss
->cfg
);
367 rss
->hash_bits
= ilog2(rounddown_pow_of_two(rss
->rss_size
));
369 for (idx
= 0; idx
< rss
->rss_size
; idx
++)
370 rss
->ind_tbl
[idx
] = ethtool_rxfh_indir_default(idx
,
372 nicvf_config_rss(nic
);
376 /* Request PF to allocate additional Qsets */
377 static void nicvf_request_sqs(struct nicvf
*nic
)
379 union nic_mbx mbx
= {};
381 int sqs_count
= nic
->sqs_count
;
382 int rx_queues
= 0, tx_queues
= 0;
384 /* Only primary VF should request */
385 if (nic
->sqs_mode
|| !nic
->sqs_count
)
388 mbx
.sqs_alloc
.msg
= NIC_MBOX_MSG_ALLOC_SQS
;
389 mbx
.sqs_alloc
.vf_id
= nic
->vf_id
;
390 mbx
.sqs_alloc
.qs_count
= nic
->sqs_count
;
391 if (nicvf_send_msg_to_pf(nic
, &mbx
)) {
392 /* No response from PF */
397 /* Return if no Secondary Qsets available */
401 if (nic
->rx_queues
> MAX_RCV_QUEUES_PER_QS
)
402 rx_queues
= nic
->rx_queues
- MAX_RCV_QUEUES_PER_QS
;
404 tx_queues
= nic
->tx_queues
+ nic
->xdp_tx_queues
;
405 if (tx_queues
> MAX_SND_QUEUES_PER_QS
)
406 tx_queues
= tx_queues
- MAX_SND_QUEUES_PER_QS
;
408 /* Set no of Rx/Tx queues in each of the SQsets */
409 for (sqs
= 0; sqs
< nic
->sqs_count
; sqs
++) {
410 mbx
.nicvf
.msg
= NIC_MBOX_MSG_SNICVF_PTR
;
411 mbx
.nicvf
.vf_id
= nic
->vf_id
;
412 mbx
.nicvf
.sqs_id
= sqs
;
413 nicvf_send_msg_to_pf(nic
, &mbx
);
415 nic
->snicvf
[sqs
]->sqs_id
= sqs
;
416 if (rx_queues
> MAX_RCV_QUEUES_PER_QS
) {
417 nic
->snicvf
[sqs
]->qs
->rq_cnt
= MAX_RCV_QUEUES_PER_QS
;
418 rx_queues
-= MAX_RCV_QUEUES_PER_QS
;
420 nic
->snicvf
[sqs
]->qs
->rq_cnt
= rx_queues
;
424 if (tx_queues
> MAX_SND_QUEUES_PER_QS
) {
425 nic
->snicvf
[sqs
]->qs
->sq_cnt
= MAX_SND_QUEUES_PER_QS
;
426 tx_queues
-= MAX_SND_QUEUES_PER_QS
;
428 nic
->snicvf
[sqs
]->qs
->sq_cnt
= tx_queues
;
432 nic
->snicvf
[sqs
]->qs
->cq_cnt
=
433 max(nic
->snicvf
[sqs
]->qs
->rq_cnt
, nic
->snicvf
[sqs
]->qs
->sq_cnt
);
435 /* Initialize secondary Qset's queues and its interrupts */
436 nicvf_open(nic
->snicvf
[sqs
]->netdev
);
439 /* Update stack with actual Rx/Tx queue count allocated */
440 if (sqs_count
!= nic
->sqs_count
)
441 nicvf_set_real_num_queues(nic
->netdev
,
442 nic
->tx_queues
, nic
->rx_queues
);
445 /* Send this Qset's nicvf pointer to PF.
446 * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
447 * so that packets received by these Qsets can use primary VF's netdev
449 static void nicvf_send_vf_struct(struct nicvf
*nic
)
451 union nic_mbx mbx
= {};
453 mbx
.nicvf
.msg
= NIC_MBOX_MSG_NICVF_PTR
;
454 mbx
.nicvf
.sqs_mode
= nic
->sqs_mode
;
455 mbx
.nicvf
.nicvf
= (u64
)nic
;
456 nicvf_send_msg_to_pf(nic
, &mbx
);
459 static void nicvf_get_primary_vf_struct(struct nicvf
*nic
)
461 union nic_mbx mbx
= {};
463 mbx
.nicvf
.msg
= NIC_MBOX_MSG_PNICVF_PTR
;
464 nicvf_send_msg_to_pf(nic
, &mbx
);
467 int nicvf_set_real_num_queues(struct net_device
*netdev
,
468 int tx_queues
, int rx_queues
)
472 err
= netif_set_real_num_tx_queues(netdev
, tx_queues
);
475 "Failed to set no of Tx queues: %d\n", tx_queues
);
479 err
= netif_set_real_num_rx_queues(netdev
, rx_queues
);
482 "Failed to set no of Rx queues: %d\n", rx_queues
);
486 static int nicvf_init_resources(struct nicvf
*nic
)
491 nicvf_qset_config(nic
, true);
493 /* Initialize queues and HW for data transfer */
494 err
= nicvf_config_data_transfer(nic
, true);
496 netdev_err(nic
->netdev
,
497 "Failed to alloc/config VF's QSet resources\n");
504 static inline bool nicvf_xdp_rx(struct nicvf
*nic
, struct bpf_prog
*prog
,
505 struct cqe_rx_t
*cqe_rx
, struct snd_queue
*sq
,
506 struct sk_buff
**skb
)
512 u64 dma_addr
, cpu_addr
;
515 /* Retrieve packet buffer's DMA address and length */
516 len
= *((u16
*)((void *)cqe_rx
+ (3 * sizeof(u64
))));
517 dma_addr
= *((u64
*)((void *)cqe_rx
+ (7 * sizeof(u64
))));
519 cpu_addr
= nicvf_iova_to_phys(nic
, dma_addr
);
522 cpu_addr
= (u64
)phys_to_virt(cpu_addr
);
523 page
= virt_to_page((void *)cpu_addr
);
525 xdp
.data_hard_start
= page_address(page
);
526 xdp
.data
= (void *)cpu_addr
;
527 xdp
.data_end
= xdp
.data
+ len
;
528 orig_data
= xdp
.data
;
531 action
= bpf_prog_run_xdp(prog
, &xdp
);
534 /* Check if XDP program has changed headers */
535 if (orig_data
!= xdp
.data
) {
536 len
= xdp
.data_end
- xdp
.data
;
537 offset
= orig_data
- xdp
.data
;
543 /* Check if it's a recycled page, if not
544 * unmap the DMA mapping.
546 * Recycled page holds an extra reference.
548 if (page_ref_count(page
) == 1) {
549 dma_addr
&= PAGE_MASK
;
550 dma_unmap_page_attrs(&nic
->pdev
->dev
, dma_addr
,
551 RCV_FRAG_LEN
+ XDP_PACKET_HEADROOM
,
553 DMA_ATTR_SKIP_CPU_SYNC
);
556 /* Build SKB and pass on packet to network stack */
557 *skb
= build_skb(xdp
.data
,
558 RCV_FRAG_LEN
- cqe_rx
->align_pad
+ offset
);
565 nicvf_xdp_sq_append_pkt(nic
, sq
, (u64
)xdp
.data
, dma_addr
, len
);
568 bpf_warn_invalid_xdp_action(action
);
570 trace_xdp_exception(nic
->netdev
, prog
, action
);
572 /* Check if it's a recycled page, if not
573 * unmap the DMA mapping.
575 * Recycled page holds an extra reference.
577 if (page_ref_count(page
) == 1) {
578 dma_addr
&= PAGE_MASK
;
579 dma_unmap_page_attrs(&nic
->pdev
->dev
, dma_addr
,
580 RCV_FRAG_LEN
+ XDP_PACKET_HEADROOM
,
582 DMA_ATTR_SKIP_CPU_SYNC
);
590 static void nicvf_snd_pkt_handler(struct net_device
*netdev
,
591 struct cqe_send_t
*cqe_tx
,
592 int budget
, int *subdesc_cnt
,
593 unsigned int *tx_pkts
, unsigned int *tx_bytes
)
595 struct sk_buff
*skb
= NULL
;
597 struct nicvf
*nic
= netdev_priv(netdev
);
598 struct snd_queue
*sq
;
599 struct sq_hdr_subdesc
*hdr
;
600 struct sq_hdr_subdesc
*tso_sqe
;
602 sq
= &nic
->qs
->sq
[cqe_tx
->sq_idx
];
604 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, cqe_tx
->sqe_ptr
);
605 if (hdr
->subdesc_type
!= SQ_DESC_TYPE_HEADER
)
608 /* Check for errors */
609 if (cqe_tx
->send_status
)
610 nicvf_check_cqe_tx_errs(nic
->pnicvf
, cqe_tx
);
612 /* Is this a XDP designated Tx queue */
614 page
= (struct page
*)sq
->xdp_page
[cqe_tx
->sqe_ptr
];
615 /* Check if it's recycled page or else unmap DMA mapping */
616 if (page
&& (page_ref_count(page
) == 1))
617 nicvf_unmap_sndq_buffers(nic
, sq
, cqe_tx
->sqe_ptr
,
620 /* Release page reference for recycling */
623 sq
->xdp_page
[cqe_tx
->sqe_ptr
] = (u64
)NULL
;
624 *subdesc_cnt
+= hdr
->subdesc_cnt
+ 1;
628 skb
= (struct sk_buff
*)sq
->skbuff
[cqe_tx
->sqe_ptr
];
630 /* Check for dummy descriptor used for HW TSO offload on 88xx */
631 if (hdr
->dont_send
) {
632 /* Get actual TSO descriptors and free them */
634 (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, hdr
->rsvd2
);
635 nicvf_unmap_sndq_buffers(nic
, sq
, hdr
->rsvd2
,
636 tso_sqe
->subdesc_cnt
);
637 *subdesc_cnt
+= tso_sqe
->subdesc_cnt
+ 1;
639 nicvf_unmap_sndq_buffers(nic
, sq
, cqe_tx
->sqe_ptr
,
642 *subdesc_cnt
+= hdr
->subdesc_cnt
+ 1;
645 *tx_bytes
+= skb
->len
;
646 napi_consume_skb(skb
, budget
);
647 sq
->skbuff
[cqe_tx
->sqe_ptr
] = (u64
)NULL
;
649 /* In case of SW TSO on 88xx, only last segment will have
650 * a SKB attached, so just free SQEs here.
653 *subdesc_cnt
+= hdr
->subdesc_cnt
+ 1;
657 static inline void nicvf_set_rxhash(struct net_device
*netdev
,
658 struct cqe_rx_t
*cqe_rx
,
664 if (!(netdev
->features
& NETIF_F_RXHASH
))
667 switch (cqe_rx
->rss_alg
) {
670 hash_type
= PKT_HASH_TYPE_L4
;
671 hash
= cqe_rx
->rss_tag
;
674 hash_type
= PKT_HASH_TYPE_L3
;
675 hash
= cqe_rx
->rss_tag
;
678 hash_type
= PKT_HASH_TYPE_NONE
;
682 skb_set_hash(skb
, hash
, hash_type
);
685 static void nicvf_rcv_pkt_handler(struct net_device
*netdev
,
686 struct napi_struct
*napi
,
687 struct cqe_rx_t
*cqe_rx
, struct snd_queue
*sq
)
689 struct sk_buff
*skb
= NULL
;
690 struct nicvf
*nic
= netdev_priv(netdev
);
691 struct nicvf
*snic
= nic
;
695 rq_idx
= nicvf_netdev_qidx(nic
, cqe_rx
->rq_idx
);
698 /* Use primary VF's 'nicvf' struct */
700 netdev
= nic
->netdev
;
703 /* Check for errors */
704 if (cqe_rx
->err_level
|| cqe_rx
->err_opcode
) {
705 err
= nicvf_check_cqe_rx_errs(nic
, cqe_rx
);
706 if (err
&& !cqe_rx
->rb_cnt
)
710 /* For XDP, ignore pkts spanning multiple pages */
711 if (nic
->xdp_prog
&& (cqe_rx
->rb_cnt
== 1)) {
712 /* Packet consumed by XDP */
713 if (nicvf_xdp_rx(snic
, nic
->xdp_prog
, cqe_rx
, sq
, &skb
))
716 skb
= nicvf_get_rcv_skb(snic
, cqe_rx
,
717 nic
->xdp_prog
? true : false);
723 if (netif_msg_pktdata(nic
)) {
724 netdev_info(nic
->netdev
, "%s: skb 0x%p, len=%d\n", netdev
->name
,
726 print_hex_dump(KERN_INFO
, "", DUMP_PREFIX_OFFSET
, 16, 1,
727 skb
->data
, skb
->len
, true);
730 /* If error packet, drop it here */
732 dev_kfree_skb_any(skb
);
736 nicvf_set_rxhash(netdev
, cqe_rx
, skb
);
738 skb_record_rx_queue(skb
, rq_idx
);
739 if (netdev
->hw_features
& NETIF_F_RXCSUM
) {
740 /* HW by default verifies TCP/UDP/SCTP checksums */
741 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
743 skb_checksum_none_assert(skb
);
746 skb
->protocol
= eth_type_trans(skb
, netdev
);
748 /* Check for stripped VLAN */
749 if (cqe_rx
->vlan_found
&& cqe_rx
->vlan_stripped
)
750 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
751 ntohs((__force __be16
)cqe_rx
->vlan_tci
));
753 if (napi
&& (netdev
->features
& NETIF_F_GRO
))
754 napi_gro_receive(napi
, skb
);
756 netif_receive_skb(skb
);
759 static int nicvf_cq_intr_handler(struct net_device
*netdev
, u8 cq_idx
,
760 struct napi_struct
*napi
, int budget
)
762 int processed_cqe
, work_done
= 0, tx_done
= 0;
763 int cqe_count
, cqe_head
;
765 struct nicvf
*nic
= netdev_priv(netdev
);
766 struct queue_set
*qs
= nic
->qs
;
767 struct cmp_queue
*cq
= &qs
->cq
[cq_idx
];
768 struct cqe_rx_t
*cq_desc
;
769 struct netdev_queue
*txq
;
770 struct snd_queue
*sq
= &qs
->sq
[cq_idx
];
771 unsigned int tx_pkts
= 0, tx_bytes
= 0, txq_idx
;
773 spin_lock_bh(&cq
->lock
);
776 /* Get no of valid CQ entries to process */
777 cqe_count
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_STATUS
, cq_idx
);
778 cqe_count
&= CQ_CQE_COUNT
;
782 /* Get head of the valid CQ entries */
783 cqe_head
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_HEAD
, cq_idx
) >> 9;
786 while (processed_cqe
< cqe_count
) {
787 /* Get the CQ descriptor */
788 cq_desc
= (struct cqe_rx_t
*)GET_CQ_DESC(cq
, cqe_head
);
790 cqe_head
&= (cq
->dmem
.q_len
- 1);
791 /* Initiate prefetch for next descriptor */
792 prefetch((struct cqe_rx_t
*)GET_CQ_DESC(cq
, cqe_head
));
794 if ((work_done
>= budget
) && napi
&&
795 (cq_desc
->cqe_type
!= CQE_TYPE_SEND
)) {
799 switch (cq_desc
->cqe_type
) {
801 nicvf_rcv_pkt_handler(netdev
, napi
, cq_desc
, sq
);
805 nicvf_snd_pkt_handler(netdev
, (void *)cq_desc
,
806 budget
, &subdesc_cnt
,
807 &tx_pkts
, &tx_bytes
);
810 case CQE_TYPE_INVALID
:
811 case CQE_TYPE_RX_SPLIT
:
812 case CQE_TYPE_RX_TCP
:
813 case CQE_TYPE_SEND_PTP
:
820 /* Ring doorbell to inform H/W to reuse processed CQEs */
821 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_DOOR
,
822 cq_idx
, processed_cqe
);
824 if ((work_done
< budget
) && napi
)
828 /* Update SQ's descriptor free count */
830 nicvf_put_sq_desc(sq
, subdesc_cnt
);
832 txq_idx
= nicvf_netdev_qidx(nic
, cq_idx
);
833 /* Handle XDP TX queues */
834 if (nic
->pnicvf
->xdp_prog
) {
835 if (txq_idx
< nic
->pnicvf
->xdp_tx_queues
) {
836 nicvf_xdp_sq_doorbell(nic
, sq
, cq_idx
);
840 txq_idx
-= nic
->pnicvf
->xdp_tx_queues
;
843 /* Wakeup TXQ if its stopped earlier due to SQ full */
845 (atomic_read(&sq
->free_cnt
) >= MIN_SQ_DESC_PER_PKT_XMIT
)) {
846 netdev
= nic
->pnicvf
->netdev
;
847 txq
= netdev_get_tx_queue(netdev
, txq_idx
);
849 netdev_tx_completed_queue(txq
, tx_pkts
, tx_bytes
);
851 /* To read updated queue and carrier status */
853 if (netif_tx_queue_stopped(txq
) && netif_carrier_ok(netdev
)) {
854 netif_tx_wake_queue(txq
);
856 this_cpu_inc(nic
->drv_stats
->txq_wake
);
857 if (netif_msg_tx_err(nic
))
859 "%s: Transmit queue wakeup SQ%d\n",
860 netdev
->name
, txq_idx
);
865 spin_unlock_bh(&cq
->lock
);
869 static int nicvf_poll(struct napi_struct
*napi
, int budget
)
873 struct net_device
*netdev
= napi
->dev
;
874 struct nicvf
*nic
= netdev_priv(netdev
);
875 struct nicvf_cq_poll
*cq
;
877 cq
= container_of(napi
, struct nicvf_cq_poll
, napi
);
878 work_done
= nicvf_cq_intr_handler(netdev
, cq
->cq_idx
, napi
, budget
);
880 if (work_done
< budget
) {
881 /* Slow packet rate, exit polling */
882 napi_complete_done(napi
, work_done
);
883 /* Re-enable interrupts */
884 cq_head
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_HEAD
,
886 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, cq
->cq_idx
);
887 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_HEAD
,
888 cq
->cq_idx
, cq_head
);
889 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, cq
->cq_idx
);
894 /* Qset error interrupt handler
896 * As of now only CQ errors are handled
898 static void nicvf_handle_qs_err(unsigned long data
)
900 struct nicvf
*nic
= (struct nicvf
*)data
;
901 struct queue_set
*qs
= nic
->qs
;
905 netif_tx_disable(nic
->netdev
);
907 /* Check if it is CQ err */
908 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
909 status
= nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_STATUS
,
911 if (!(status
& CQ_ERR_MASK
))
913 /* Process already queued CQEs and reconfig CQ */
914 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
915 nicvf_sq_disable(nic
, qidx
);
916 nicvf_cq_intr_handler(nic
->netdev
, qidx
, NULL
, 0);
917 nicvf_cmp_queue_config(nic
, qs
, qidx
, true);
918 nicvf_sq_free_used_descs(nic
->netdev
, &qs
->sq
[qidx
], qidx
);
919 nicvf_sq_enable(nic
, &qs
->sq
[qidx
], qidx
);
921 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, qidx
);
924 netif_tx_start_all_queues(nic
->netdev
);
925 /* Re-enable Qset error interrupt */
926 nicvf_enable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
929 static void nicvf_dump_intr_status(struct nicvf
*nic
)
931 if (netif_msg_intr(nic
))
932 netdev_info(nic
->netdev
, "%s: interrupt status 0x%llx\n",
933 nic
->netdev
->name
, nicvf_reg_read(nic
, NIC_VF_INT
));
936 static irqreturn_t
nicvf_misc_intr_handler(int irq
, void *nicvf_irq
)
938 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
941 nicvf_dump_intr_status(nic
);
943 intr
= nicvf_reg_read(nic
, NIC_VF_INT
);
944 /* Check for spurious interrupt */
945 if (!(intr
& NICVF_INTR_MBOX_MASK
))
948 nicvf_handle_mbx_intr(nic
);
953 static irqreturn_t
nicvf_intr_handler(int irq
, void *cq_irq
)
955 struct nicvf_cq_poll
*cq_poll
= (struct nicvf_cq_poll
*)cq_irq
;
956 struct nicvf
*nic
= cq_poll
->nicvf
;
957 int qidx
= cq_poll
->cq_idx
;
959 nicvf_dump_intr_status(nic
);
961 /* Disable interrupts */
962 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
965 napi_schedule_irqoff(&cq_poll
->napi
);
967 /* Clear interrupt */
968 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, qidx
);
973 static irqreturn_t
nicvf_rbdr_intr_handler(int irq
, void *nicvf_irq
)
975 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
979 nicvf_dump_intr_status(nic
);
981 /* Disable RBDR interrupt and schedule softirq */
982 for (qidx
= 0; qidx
< nic
->qs
->rbdr_cnt
; qidx
++) {
983 if (!nicvf_is_intr_enabled(nic
, NICVF_INTR_RBDR
, qidx
))
985 nicvf_disable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
986 tasklet_hi_schedule(&nic
->rbdr_task
);
987 /* Clear interrupt */
988 nicvf_clear_intr(nic
, NICVF_INTR_RBDR
, qidx
);
994 static irqreturn_t
nicvf_qs_err_intr_handler(int irq
, void *nicvf_irq
)
996 struct nicvf
*nic
= (struct nicvf
*)nicvf_irq
;
998 nicvf_dump_intr_status(nic
);
1000 /* Disable Qset err interrupt and schedule softirq */
1001 nicvf_disable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
1002 tasklet_hi_schedule(&nic
->qs_err_task
);
1003 nicvf_clear_intr(nic
, NICVF_INTR_QS_ERR
, 0);
1008 static void nicvf_set_irq_affinity(struct nicvf
*nic
)
1012 for (vec
= 0; vec
< nic
->num_vec
; vec
++) {
1013 if (!nic
->irq_allocated
[vec
])
1016 if (!zalloc_cpumask_var(&nic
->affinity_mask
[vec
], GFP_KERNEL
))
1019 if (vec
< NICVF_INTR_ID_SQ
)
1020 /* Leave CPU0 for RBDR and other interrupts */
1021 cpu
= nicvf_netdev_qidx(nic
, vec
) + 1;
1025 cpumask_set_cpu(cpumask_local_spread(cpu
, nic
->node
),
1026 nic
->affinity_mask
[vec
]);
1027 irq_set_affinity_hint(pci_irq_vector(nic
->pdev
, vec
),
1028 nic
->affinity_mask
[vec
]);
1032 static int nicvf_register_interrupts(struct nicvf
*nic
)
1036 for_each_cq_irq(irq
)
1037 sprintf(nic
->irq_name
[irq
], "%s-rxtx-%d",
1038 nic
->pnicvf
->netdev
->name
,
1039 nicvf_netdev_qidx(nic
, irq
));
1041 for_each_sq_irq(irq
)
1042 sprintf(nic
->irq_name
[irq
], "%s-sq-%d",
1043 nic
->pnicvf
->netdev
->name
,
1044 nicvf_netdev_qidx(nic
, irq
- NICVF_INTR_ID_SQ
));
1046 for_each_rbdr_irq(irq
)
1047 sprintf(nic
->irq_name
[irq
], "%s-rbdr-%d",
1048 nic
->pnicvf
->netdev
->name
,
1049 nic
->sqs_mode
? (nic
->sqs_id
+ 1) : 0);
1051 /* Register CQ interrupts */
1052 for (irq
= 0; irq
< nic
->qs
->cq_cnt
; irq
++) {
1053 ret
= request_irq(pci_irq_vector(nic
->pdev
, irq
),
1055 0, nic
->irq_name
[irq
], nic
->napi
[irq
]);
1058 nic
->irq_allocated
[irq
] = true;
1061 /* Register RBDR interrupt */
1062 for (irq
= NICVF_INTR_ID_RBDR
;
1063 irq
< (NICVF_INTR_ID_RBDR
+ nic
->qs
->rbdr_cnt
); irq
++) {
1064 ret
= request_irq(pci_irq_vector(nic
->pdev
, irq
),
1065 nicvf_rbdr_intr_handler
,
1066 0, nic
->irq_name
[irq
], nic
);
1069 nic
->irq_allocated
[irq
] = true;
1072 /* Register QS error interrupt */
1073 sprintf(nic
->irq_name
[NICVF_INTR_ID_QS_ERR
], "%s-qset-err-%d",
1074 nic
->pnicvf
->netdev
->name
,
1075 nic
->sqs_mode
? (nic
->sqs_id
+ 1) : 0);
1076 irq
= NICVF_INTR_ID_QS_ERR
;
1077 ret
= request_irq(pci_irq_vector(nic
->pdev
, irq
),
1078 nicvf_qs_err_intr_handler
,
1079 0, nic
->irq_name
[irq
], nic
);
1083 nic
->irq_allocated
[irq
] = true;
1085 /* Set IRQ affinities */
1086 nicvf_set_irq_affinity(nic
);
1090 netdev_err(nic
->netdev
, "request_irq failed, vector %d\n", irq
);
1095 static void nicvf_unregister_interrupts(struct nicvf
*nic
)
1097 struct pci_dev
*pdev
= nic
->pdev
;
1100 /* Free registered interrupts */
1101 for (irq
= 0; irq
< nic
->num_vec
; irq
++) {
1102 if (!nic
->irq_allocated
[irq
])
1105 irq_set_affinity_hint(pci_irq_vector(pdev
, irq
), NULL
);
1106 free_cpumask_var(nic
->affinity_mask
[irq
]);
1108 if (irq
< NICVF_INTR_ID_SQ
)
1109 free_irq(pci_irq_vector(pdev
, irq
), nic
->napi
[irq
]);
1111 free_irq(pci_irq_vector(pdev
, irq
), nic
);
1113 nic
->irq_allocated
[irq
] = false;
1117 pci_free_irq_vectors(pdev
);
1121 /* Initialize MSIX vectors and register MISC interrupt.
1122 * Send READY message to PF to check if its alive
1124 static int nicvf_register_misc_interrupt(struct nicvf
*nic
)
1127 int irq
= NICVF_INTR_ID_MISC
;
1129 /* Return if mailbox interrupt is already registered */
1130 if (nic
->pdev
->msix_enabled
)
1134 nic
->num_vec
= pci_msix_vec_count(nic
->pdev
);
1135 ret
= pci_alloc_irq_vectors(nic
->pdev
, nic
->num_vec
, nic
->num_vec
,
1138 netdev_err(nic
->netdev
,
1139 "Req for #%d msix vectors failed\n", nic
->num_vec
);
1143 sprintf(nic
->irq_name
[irq
], "%s Mbox", "NICVF");
1144 /* Register Misc interrupt */
1145 ret
= request_irq(pci_irq_vector(nic
->pdev
, irq
),
1146 nicvf_misc_intr_handler
, 0, nic
->irq_name
[irq
], nic
);
1150 nic
->irq_allocated
[irq
] = true;
1152 /* Enable mailbox interrupt */
1153 nicvf_enable_intr(nic
, NICVF_INTR_MBOX
, 0);
1155 /* Check if VF is able to communicate with PF */
1156 if (!nicvf_check_pf_ready(nic
)) {
1157 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
1158 nicvf_unregister_interrupts(nic
);
1165 static netdev_tx_t
nicvf_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1167 struct nicvf
*nic
= netdev_priv(netdev
);
1168 int qid
= skb_get_queue_mapping(skb
);
1169 struct netdev_queue
*txq
= netdev_get_tx_queue(netdev
, qid
);
1171 struct snd_queue
*sq
;
1174 /* Check for minimum packet length */
1175 if (skb
->len
<= ETH_HLEN
) {
1177 return NETDEV_TX_OK
;
1180 /* In XDP case, initial HW tx queues are used for XDP,
1181 * but stack's queue mapping starts at '0', so skip the
1182 * Tx queues attached to Rx queues for XDP.
1185 qid
+= nic
->xdp_tx_queues
;
1188 /* Get secondary Qset's SQ structure */
1189 if (qid
>= MAX_SND_QUEUES_PER_QS
) {
1190 tmp
= qid
/ MAX_SND_QUEUES_PER_QS
;
1191 snic
= (struct nicvf
*)nic
->snicvf
[tmp
- 1];
1193 netdev_warn(nic
->netdev
,
1194 "Secondary Qset#%d's ptr not initialized\n",
1197 return NETDEV_TX_OK
;
1199 qid
= qid
% MAX_SND_QUEUES_PER_QS
;
1202 sq
= &snic
->qs
->sq
[qid
];
1203 if (!netif_tx_queue_stopped(txq
) &&
1204 !nicvf_sq_append_skb(snic
, sq
, skb
, qid
)) {
1205 netif_tx_stop_queue(txq
);
1207 /* Barrier, so that stop_queue visible to other cpus */
1210 /* Check again, incase another cpu freed descriptors */
1211 if (atomic_read(&sq
->free_cnt
) > MIN_SQ_DESC_PER_PKT_XMIT
) {
1212 netif_tx_wake_queue(txq
);
1214 this_cpu_inc(nic
->drv_stats
->txq_stop
);
1215 if (netif_msg_tx_err(nic
))
1217 "%s: Transmit ring full, stopping SQ%d\n",
1220 return NETDEV_TX_BUSY
;
1223 return NETDEV_TX_OK
;
1226 static inline void nicvf_free_cq_poll(struct nicvf
*nic
)
1228 struct nicvf_cq_poll
*cq_poll
;
1231 for (qidx
= 0; qidx
< nic
->qs
->cq_cnt
; qidx
++) {
1232 cq_poll
= nic
->napi
[qidx
];
1235 nic
->napi
[qidx
] = NULL
;
1240 int nicvf_stop(struct net_device
*netdev
)
1243 struct nicvf
*nic
= netdev_priv(netdev
);
1244 struct queue_set
*qs
= nic
->qs
;
1245 struct nicvf_cq_poll
*cq_poll
= NULL
;
1246 union nic_mbx mbx
= {};
1248 mbx
.msg
.msg
= NIC_MBOX_MSG_SHUTDOWN
;
1249 nicvf_send_msg_to_pf(nic
, &mbx
);
1251 netif_carrier_off(netdev
);
1252 netif_tx_stop_all_queues(nic
->netdev
);
1253 nic
->link_up
= false;
1255 /* Teardown secondary qsets first */
1256 if (!nic
->sqs_mode
) {
1257 for (qidx
= 0; qidx
< nic
->sqs_count
; qidx
++) {
1258 if (!nic
->snicvf
[qidx
])
1260 nicvf_stop(nic
->snicvf
[qidx
]->netdev
);
1261 nic
->snicvf
[qidx
] = NULL
;
1265 /* Disable RBDR & QS error interrupts */
1266 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++) {
1267 nicvf_disable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
1268 nicvf_clear_intr(nic
, NICVF_INTR_RBDR
, qidx
);
1270 nicvf_disable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
1271 nicvf_clear_intr(nic
, NICVF_INTR_QS_ERR
, 0);
1273 /* Wait for pending IRQ handlers to finish */
1274 for (irq
= 0; irq
< nic
->num_vec
; irq
++)
1275 synchronize_irq(pci_irq_vector(nic
->pdev
, irq
));
1277 tasklet_kill(&nic
->rbdr_task
);
1278 tasklet_kill(&nic
->qs_err_task
);
1279 if (nic
->rb_work_scheduled
)
1280 cancel_delayed_work_sync(&nic
->rbdr_work
);
1282 for (qidx
= 0; qidx
< nic
->qs
->cq_cnt
; qidx
++) {
1283 cq_poll
= nic
->napi
[qidx
];
1286 napi_synchronize(&cq_poll
->napi
);
1287 /* CQ intr is enabled while napi_complete,
1290 nicvf_disable_intr(nic
, NICVF_INTR_CQ
, qidx
);
1291 nicvf_clear_intr(nic
, NICVF_INTR_CQ
, qidx
);
1292 napi_disable(&cq_poll
->napi
);
1293 netif_napi_del(&cq_poll
->napi
);
1296 netif_tx_disable(netdev
);
1298 for (qidx
= 0; qidx
< netdev
->num_tx_queues
; qidx
++)
1299 netdev_tx_reset_queue(netdev_get_tx_queue(netdev
, qidx
));
1301 /* Free resources */
1302 nicvf_config_data_transfer(nic
, false);
1304 /* Disable HW Qset */
1305 nicvf_qset_config(nic
, false);
1307 /* disable mailbox interrupt */
1308 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
1310 nicvf_unregister_interrupts(nic
);
1312 nicvf_free_cq_poll(nic
);
1314 /* Clear multiqset info */
1320 static int nicvf_update_hw_max_frs(struct nicvf
*nic
, int mtu
)
1322 union nic_mbx mbx
= {};
1324 mbx
.frs
.msg
= NIC_MBOX_MSG_SET_MAX_FRS
;
1325 mbx
.frs
.max_frs
= mtu
;
1326 mbx
.frs
.vf_id
= nic
->vf_id
;
1328 return nicvf_send_msg_to_pf(nic
, &mbx
);
1331 int nicvf_open(struct net_device
*netdev
)
1334 struct nicvf
*nic
= netdev_priv(netdev
);
1335 struct queue_set
*qs
= nic
->qs
;
1336 struct nicvf_cq_poll
*cq_poll
= NULL
;
1337 union nic_mbx mbx
= {};
1339 netif_carrier_off(netdev
);
1341 err
= nicvf_register_misc_interrupt(nic
);
1345 /* Register NAPI handler for processing CQEs */
1346 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
1347 cq_poll
= kzalloc(sizeof(*cq_poll
), GFP_KERNEL
);
1352 cq_poll
->cq_idx
= qidx
;
1353 cq_poll
->nicvf
= nic
;
1354 netif_napi_add(netdev
, &cq_poll
->napi
, nicvf_poll
,
1356 napi_enable(&cq_poll
->napi
);
1357 nic
->napi
[qidx
] = cq_poll
;
1360 /* Check if we got MAC address from PF or else generate a radom MAC */
1361 if (!nic
->sqs_mode
&& is_zero_ether_addr(netdev
->dev_addr
)) {
1362 eth_hw_addr_random(netdev
);
1363 nicvf_hw_set_mac_addr(nic
, netdev
);
1366 if (nic
->set_mac_pending
) {
1367 nic
->set_mac_pending
= false;
1368 nicvf_hw_set_mac_addr(nic
, netdev
);
1371 /* Init tasklet for handling Qset err interrupt */
1372 tasklet_init(&nic
->qs_err_task
, nicvf_handle_qs_err
,
1373 (unsigned long)nic
);
1375 /* Init RBDR tasklet which will refill RBDR */
1376 tasklet_init(&nic
->rbdr_task
, nicvf_rbdr_task
,
1377 (unsigned long)nic
);
1378 INIT_DELAYED_WORK(&nic
->rbdr_work
, nicvf_rbdr_work
);
1380 /* Configure CPI alorithm */
1381 nic
->cpi_alg
= cpi_alg
;
1383 nicvf_config_cpi(nic
);
1385 nicvf_request_sqs(nic
);
1387 nicvf_get_primary_vf_struct(nic
);
1389 /* Configure receive side scaling and MTU */
1390 if (!nic
->sqs_mode
) {
1391 nicvf_rss_init(nic
);
1392 err
= nicvf_update_hw_max_frs(nic
, netdev
->mtu
);
1396 /* Clear percpu stats */
1397 for_each_possible_cpu(cpu
)
1398 memset(per_cpu_ptr(nic
->drv_stats
, cpu
), 0,
1399 sizeof(struct nicvf_drv_stats
));
1402 err
= nicvf_register_interrupts(nic
);
1406 /* Initialize the queues */
1407 err
= nicvf_init_resources(nic
);
1411 /* Make sure queue initialization is written */
1414 nicvf_reg_write(nic
, NIC_VF_INT
, -1);
1415 /* Enable Qset err interrupt */
1416 nicvf_enable_intr(nic
, NICVF_INTR_QS_ERR
, 0);
1418 /* Enable completion queue interrupt */
1419 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
1420 nicvf_enable_intr(nic
, NICVF_INTR_CQ
, qidx
);
1422 /* Enable RBDR threshold interrupt */
1423 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
1424 nicvf_enable_intr(nic
, NICVF_INTR_RBDR
, qidx
);
1426 /* Send VF config done msg to PF */
1427 mbx
.msg
.msg
= NIC_MBOX_MSG_CFG_DONE
;
1428 nicvf_write_to_mbx(nic
, &mbx
);
1432 nicvf_disable_intr(nic
, NICVF_INTR_MBOX
, 0);
1433 nicvf_unregister_interrupts(nic
);
1434 tasklet_kill(&nic
->qs_err_task
);
1435 tasklet_kill(&nic
->rbdr_task
);
1437 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
1438 cq_poll
= nic
->napi
[qidx
];
1441 napi_disable(&cq_poll
->napi
);
1442 netif_napi_del(&cq_poll
->napi
);
1444 nicvf_free_cq_poll(nic
);
1448 static int nicvf_change_mtu(struct net_device
*netdev
, int new_mtu
)
1450 struct nicvf
*nic
= netdev_priv(netdev
);
1451 int orig_mtu
= netdev
->mtu
;
1453 netdev
->mtu
= new_mtu
;
1455 if (!netif_running(netdev
))
1458 if (nicvf_update_hw_max_frs(nic
, new_mtu
)) {
1459 netdev
->mtu
= orig_mtu
;
1466 static int nicvf_set_mac_address(struct net_device
*netdev
, void *p
)
1468 struct sockaddr
*addr
= p
;
1469 struct nicvf
*nic
= netdev_priv(netdev
);
1471 if (!is_valid_ether_addr(addr
->sa_data
))
1472 return -EADDRNOTAVAIL
;
1474 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
1476 if (nic
->pdev
->msix_enabled
) {
1477 if (nicvf_hw_set_mac_addr(nic
, netdev
))
1480 nic
->set_mac_pending
= true;
1486 void nicvf_update_lmac_stats(struct nicvf
*nic
)
1489 union nic_mbx mbx
= {};
1491 if (!netif_running(nic
->netdev
))
1494 mbx
.bgx_stats
.msg
= NIC_MBOX_MSG_BGX_STATS
;
1495 mbx
.bgx_stats
.vf_id
= nic
->vf_id
;
1497 mbx
.bgx_stats
.rx
= 1;
1498 while (stat
< BGX_RX_STATS_COUNT
) {
1499 mbx
.bgx_stats
.idx
= stat
;
1500 if (nicvf_send_msg_to_pf(nic
, &mbx
))
1508 mbx
.bgx_stats
.rx
= 0;
1509 while (stat
< BGX_TX_STATS_COUNT
) {
1510 mbx
.bgx_stats
.idx
= stat
;
1511 if (nicvf_send_msg_to_pf(nic
, &mbx
))
1517 void nicvf_update_stats(struct nicvf
*nic
)
1521 struct nicvf_hw_stats
*stats
= &nic
->hw_stats
;
1522 struct nicvf_drv_stats
*drv_stats
;
1523 struct queue_set
*qs
= nic
->qs
;
1525 #define GET_RX_STATS(reg) \
1526 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1527 #define GET_TX_STATS(reg) \
1528 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1530 stats
->rx_bytes
= GET_RX_STATS(RX_OCTS
);
1531 stats
->rx_ucast_frames
= GET_RX_STATS(RX_UCAST
);
1532 stats
->rx_bcast_frames
= GET_RX_STATS(RX_BCAST
);
1533 stats
->rx_mcast_frames
= GET_RX_STATS(RX_MCAST
);
1534 stats
->rx_fcs_errors
= GET_RX_STATS(RX_FCS
);
1535 stats
->rx_l2_errors
= GET_RX_STATS(RX_L2ERR
);
1536 stats
->rx_drop_red
= GET_RX_STATS(RX_RED
);
1537 stats
->rx_drop_red_bytes
= GET_RX_STATS(RX_RED_OCTS
);
1538 stats
->rx_drop_overrun
= GET_RX_STATS(RX_ORUN
);
1539 stats
->rx_drop_overrun_bytes
= GET_RX_STATS(RX_ORUN_OCTS
);
1540 stats
->rx_drop_bcast
= GET_RX_STATS(RX_DRP_BCAST
);
1541 stats
->rx_drop_mcast
= GET_RX_STATS(RX_DRP_MCAST
);
1542 stats
->rx_drop_l3_bcast
= GET_RX_STATS(RX_DRP_L3BCAST
);
1543 stats
->rx_drop_l3_mcast
= GET_RX_STATS(RX_DRP_L3MCAST
);
1545 stats
->tx_bytes
= GET_TX_STATS(TX_OCTS
);
1546 stats
->tx_ucast_frames
= GET_TX_STATS(TX_UCAST
);
1547 stats
->tx_bcast_frames
= GET_TX_STATS(TX_BCAST
);
1548 stats
->tx_mcast_frames
= GET_TX_STATS(TX_MCAST
);
1549 stats
->tx_drops
= GET_TX_STATS(TX_DROP
);
1551 /* On T88 pass 2.0, the dummy SQE added for TSO notification
1552 * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
1553 * pointed by dummy SQE and results in tx_drops counter being
1554 * incremented. Subtracting it from tx_tso counter will give
1555 * exact tx_drops counter.
1557 if (nic
->t88
&& nic
->hw_tso
) {
1558 for_each_possible_cpu(cpu
) {
1559 drv_stats
= per_cpu_ptr(nic
->drv_stats
, cpu
);
1560 tmp_stats
+= drv_stats
->tx_tso
;
1562 stats
->tx_drops
= tmp_stats
- stats
->tx_drops
;
1564 stats
->tx_frames
= stats
->tx_ucast_frames
+
1565 stats
->tx_bcast_frames
+
1566 stats
->tx_mcast_frames
;
1567 stats
->rx_frames
= stats
->rx_ucast_frames
+
1568 stats
->rx_bcast_frames
+
1569 stats
->rx_mcast_frames
;
1570 stats
->rx_drops
= stats
->rx_drop_red
+
1571 stats
->rx_drop_overrun
;
1573 /* Update RQ and SQ stats */
1574 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
1575 nicvf_update_rq_stats(nic
, qidx
);
1576 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
1577 nicvf_update_sq_stats(nic
, qidx
);
1580 static void nicvf_get_stats64(struct net_device
*netdev
,
1581 struct rtnl_link_stats64
*stats
)
1583 struct nicvf
*nic
= netdev_priv(netdev
);
1584 struct nicvf_hw_stats
*hw_stats
= &nic
->hw_stats
;
1586 nicvf_update_stats(nic
);
1588 stats
->rx_bytes
= hw_stats
->rx_bytes
;
1589 stats
->rx_packets
= hw_stats
->rx_frames
;
1590 stats
->rx_dropped
= hw_stats
->rx_drops
;
1591 stats
->multicast
= hw_stats
->rx_mcast_frames
;
1593 stats
->tx_bytes
= hw_stats
->tx_bytes
;
1594 stats
->tx_packets
= hw_stats
->tx_frames
;
1595 stats
->tx_dropped
= hw_stats
->tx_drops
;
1599 static void nicvf_tx_timeout(struct net_device
*dev
)
1601 struct nicvf
*nic
= netdev_priv(dev
);
1603 if (netif_msg_tx_err(nic
))
1604 netdev_warn(dev
, "%s: Transmit timed out, resetting\n",
1607 this_cpu_inc(nic
->drv_stats
->tx_timeout
);
1608 schedule_work(&nic
->reset_task
);
1611 static void nicvf_reset_task(struct work_struct
*work
)
1615 nic
= container_of(work
, struct nicvf
, reset_task
);
1617 if (!netif_running(nic
->netdev
))
1620 nicvf_stop(nic
->netdev
);
1621 nicvf_open(nic
->netdev
);
1622 netif_trans_update(nic
->netdev
);
1625 static int nicvf_config_loopback(struct nicvf
*nic
,
1626 netdev_features_t features
)
1628 union nic_mbx mbx
= {};
1630 mbx
.lbk
.msg
= NIC_MBOX_MSG_LOOPBACK
;
1631 mbx
.lbk
.vf_id
= nic
->vf_id
;
1632 mbx
.lbk
.enable
= (features
& NETIF_F_LOOPBACK
) != 0;
1634 return nicvf_send_msg_to_pf(nic
, &mbx
);
1637 static netdev_features_t
nicvf_fix_features(struct net_device
*netdev
,
1638 netdev_features_t features
)
1640 struct nicvf
*nic
= netdev_priv(netdev
);
1642 if ((features
& NETIF_F_LOOPBACK
) &&
1643 netif_running(netdev
) && !nic
->loopback_supported
)
1644 features
&= ~NETIF_F_LOOPBACK
;
1649 static int nicvf_set_features(struct net_device
*netdev
,
1650 netdev_features_t features
)
1652 struct nicvf
*nic
= netdev_priv(netdev
);
1653 netdev_features_t changed
= features
^ netdev
->features
;
1655 if (changed
& NETIF_F_HW_VLAN_CTAG_RX
)
1656 nicvf_config_vlan_stripping(nic
, features
);
1658 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(netdev
))
1659 return nicvf_config_loopback(nic
, features
);
1664 static void nicvf_set_xdp_queues(struct nicvf
*nic
, bool bpf_attached
)
1666 u8 cq_count
, txq_count
;
1668 /* Set XDP Tx queue count same as Rx queue count */
1670 nic
->xdp_tx_queues
= 0;
1672 nic
->xdp_tx_queues
= nic
->rx_queues
;
1674 /* If queue count > MAX_CMP_QUEUES_PER_QS, then additional qsets
1675 * needs to be allocated, check how many.
1677 txq_count
= nic
->xdp_tx_queues
+ nic
->tx_queues
;
1678 cq_count
= max(nic
->rx_queues
, txq_count
);
1679 if (cq_count
> MAX_CMP_QUEUES_PER_QS
) {
1680 nic
->sqs_count
= roundup(cq_count
, MAX_CMP_QUEUES_PER_QS
);
1681 nic
->sqs_count
= (nic
->sqs_count
/ MAX_CMP_QUEUES_PER_QS
) - 1;
1686 /* Set primary Qset's resources */
1687 nic
->qs
->rq_cnt
= min_t(u8
, nic
->rx_queues
, MAX_RCV_QUEUES_PER_QS
);
1688 nic
->qs
->sq_cnt
= min_t(u8
, txq_count
, MAX_SND_QUEUES_PER_QS
);
1689 nic
->qs
->cq_cnt
= max_t(u8
, nic
->qs
->rq_cnt
, nic
->qs
->sq_cnt
);
1692 nicvf_set_real_num_queues(nic
->netdev
, nic
->tx_queues
, nic
->rx_queues
);
1695 static int nicvf_xdp_setup(struct nicvf
*nic
, struct bpf_prog
*prog
)
1697 struct net_device
*dev
= nic
->netdev
;
1698 bool if_up
= netif_running(nic
->netdev
);
1699 struct bpf_prog
*old_prog
;
1700 bool bpf_attached
= false;
1702 /* For now just support only the usual MTU sized frames */
1703 if (prog
&& (dev
->mtu
> 1500)) {
1704 netdev_warn(dev
, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1709 /* ALL SQs attached to CQs i.e same as RQs, are treated as
1710 * XDP Tx queues and more Tx queues are allocated for
1711 * network stack to send pkts out.
1713 * No of Tx queues are either same as Rx queues or whatever
1714 * is left in max no of queues possible.
1716 if ((nic
->rx_queues
+ nic
->tx_queues
) > nic
->max_queues
) {
1718 "Failed to attach BPF prog, RXQs + TXQs > Max %d\n",
1724 nicvf_stop(nic
->netdev
);
1726 old_prog
= xchg(&nic
->xdp_prog
, prog
);
1727 /* Detach old prog, if any */
1729 bpf_prog_put(old_prog
);
1731 if (nic
->xdp_prog
) {
1732 /* Attach BPF program */
1733 nic
->xdp_prog
= bpf_prog_add(nic
->xdp_prog
, nic
->rx_queues
- 1);
1734 if (!IS_ERR(nic
->xdp_prog
))
1735 bpf_attached
= true;
1738 /* Calculate Tx queues needed for XDP and network stack */
1739 nicvf_set_xdp_queues(nic
, bpf_attached
);
1742 /* Reinitialize interface, clean slate */
1743 nicvf_open(nic
->netdev
);
1744 netif_trans_update(nic
->netdev
);
1750 static int nicvf_xdp(struct net_device
*netdev
, struct netdev_xdp
*xdp
)
1752 struct nicvf
*nic
= netdev_priv(netdev
);
1754 /* To avoid checks while retrieving buffer address from CQE_RX,
1755 * do not support XDP for T88 pass1.x silicons which are anyway
1756 * not in use widely.
1758 if (pass1_silicon(nic
->pdev
))
1761 switch (xdp
->command
) {
1762 case XDP_SETUP_PROG
:
1763 return nicvf_xdp_setup(nic
, xdp
->prog
);
1764 case XDP_QUERY_PROG
:
1765 xdp
->prog_attached
= !!nic
->xdp_prog
;
1766 xdp
->prog_id
= nic
->xdp_prog
? nic
->xdp_prog
->aux
->id
: 0;
1773 static const struct net_device_ops nicvf_netdev_ops
= {
1774 .ndo_open
= nicvf_open
,
1775 .ndo_stop
= nicvf_stop
,
1776 .ndo_start_xmit
= nicvf_xmit
,
1777 .ndo_change_mtu
= nicvf_change_mtu
,
1778 .ndo_set_mac_address
= nicvf_set_mac_address
,
1779 .ndo_get_stats64
= nicvf_get_stats64
,
1780 .ndo_tx_timeout
= nicvf_tx_timeout
,
1781 .ndo_fix_features
= nicvf_fix_features
,
1782 .ndo_set_features
= nicvf_set_features
,
1783 .ndo_xdp
= nicvf_xdp
,
1786 static int nicvf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1788 struct device
*dev
= &pdev
->dev
;
1789 struct net_device
*netdev
;
1794 err
= pci_enable_device(pdev
);
1796 dev_err(dev
, "Failed to enable PCI device\n");
1800 err
= pci_request_regions(pdev
, DRV_NAME
);
1802 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
1803 goto err_disable_device
;
1806 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(48));
1808 dev_err(dev
, "Unable to get usable DMA configuration\n");
1809 goto err_release_regions
;
1812 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(48));
1814 dev_err(dev
, "unable to get 48-bit DMA for consistent allocations\n");
1815 goto err_release_regions
;
1818 qcount
= netif_get_num_default_rss_queues();
1820 /* Restrict multiqset support only for host bound VFs */
1821 if (pdev
->is_virtfn
) {
1822 /* Set max number of queues per VF */
1823 qcount
= min_t(int, num_online_cpus(),
1824 (MAX_SQS_PER_VF
+ 1) * MAX_CMP_QUEUES_PER_QS
);
1827 netdev
= alloc_etherdev_mqs(sizeof(struct nicvf
), qcount
, qcount
);
1830 goto err_release_regions
;
1833 pci_set_drvdata(pdev
, netdev
);
1835 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
1837 nic
= netdev_priv(netdev
);
1838 nic
->netdev
= netdev
;
1841 nic
->max_queues
= qcount
;
1843 /* MAP VF's configuration registers */
1844 nic
->reg_base
= pcim_iomap(pdev
, PCI_CFG_REG_BAR_NUM
, 0);
1845 if (!nic
->reg_base
) {
1846 dev_err(dev
, "Cannot map config register space, aborting\n");
1848 goto err_free_netdev
;
1851 nic
->drv_stats
= netdev_alloc_pcpu_stats(struct nicvf_drv_stats
);
1852 if (!nic
->drv_stats
) {
1854 goto err_free_netdev
;
1857 err
= nicvf_set_qset_resources(nic
);
1859 goto err_free_netdev
;
1861 /* Check if PF is alive and get MAC address for this VF */
1862 err
= nicvf_register_misc_interrupt(nic
);
1864 goto err_free_netdev
;
1866 nicvf_send_vf_struct(nic
);
1868 if (!pass1_silicon(nic
->pdev
))
1871 /* Get iommu domain for iova to physical addr conversion */
1872 nic
->iommu_domain
= iommu_get_domain_for_dev(dev
);
1874 pci_read_config_word(nic
->pdev
, PCI_SUBSYSTEM_ID
, &sdevid
);
1875 if (sdevid
== 0xA134)
1878 /* Check if this VF is in QS only mode */
1882 err
= nicvf_set_real_num_queues(netdev
, nic
->tx_queues
, nic
->rx_queues
);
1884 goto err_unregister_interrupts
;
1886 netdev
->hw_features
= (NETIF_F_RXCSUM
| NETIF_F_SG
|
1887 NETIF_F_TSO
| NETIF_F_GRO
| NETIF_F_TSO6
|
1888 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1889 NETIF_F_HW_VLAN_CTAG_RX
);
1891 netdev
->hw_features
|= NETIF_F_RXHASH
;
1893 netdev
->features
|= netdev
->hw_features
;
1894 netdev
->hw_features
|= NETIF_F_LOOPBACK
;
1896 netdev
->vlan_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
|
1897 NETIF_F_IPV6_CSUM
| NETIF_F_TSO
| NETIF_F_TSO6
;
1899 netdev
->netdev_ops
= &nicvf_netdev_ops
;
1900 netdev
->watchdog_timeo
= NICVF_TX_TIMEOUT
;
1902 /* MTU range: 64 - 9200 */
1903 netdev
->min_mtu
= NIC_HW_MIN_FRS
;
1904 netdev
->max_mtu
= NIC_HW_MAX_FRS
;
1906 INIT_WORK(&nic
->reset_task
, nicvf_reset_task
);
1908 err
= register_netdev(netdev
);
1910 dev_err(dev
, "Failed to register netdevice\n");
1911 goto err_unregister_interrupts
;
1914 nic
->msg_enable
= debug
;
1916 nicvf_set_ethtool_ops(netdev
);
1920 err_unregister_interrupts
:
1921 nicvf_unregister_interrupts(nic
);
1923 pci_set_drvdata(pdev
, NULL
);
1925 free_percpu(nic
->drv_stats
);
1926 free_netdev(netdev
);
1927 err_release_regions
:
1928 pci_release_regions(pdev
);
1930 pci_disable_device(pdev
);
1934 static void nicvf_remove(struct pci_dev
*pdev
)
1936 struct net_device
*netdev
= pci_get_drvdata(pdev
);
1938 struct net_device
*pnetdev
;
1943 nic
= netdev_priv(netdev
);
1944 pnetdev
= nic
->pnicvf
->netdev
;
1946 /* Check if this Qset is assigned to different VF.
1947 * If yes, clean primary and all secondary Qsets.
1949 if (pnetdev
&& (pnetdev
->reg_state
== NETREG_REGISTERED
))
1950 unregister_netdev(pnetdev
);
1951 nicvf_unregister_interrupts(nic
);
1952 pci_set_drvdata(pdev
, NULL
);
1954 free_percpu(nic
->drv_stats
);
1955 free_netdev(netdev
);
1956 pci_release_regions(pdev
);
1957 pci_disable_device(pdev
);
1960 static void nicvf_shutdown(struct pci_dev
*pdev
)
1965 static struct pci_driver nicvf_driver
= {
1967 .id_table
= nicvf_id_table
,
1968 .probe
= nicvf_probe
,
1969 .remove
= nicvf_remove
,
1970 .shutdown
= nicvf_shutdown
,
1973 static int __init
nicvf_init_module(void)
1975 pr_info("%s, ver %s\n", DRV_NAME
, DRV_VERSION
);
1977 return pci_register_driver(&nicvf_driver
);
1980 static void __exit
nicvf_cleanup_module(void)
1982 pci_unregister_driver(&nicvf_driver
);
1985 module_init(nicvf_init_module
);
1986 module_exit(nicvf_cleanup_module
);