2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
10 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
19 #include "nicvf_queues.h"
21 /* Poll a register for a specific value */
22 static int nicvf_poll_reg(struct nicvf
*nic
, int qidx
,
23 u64 reg
, int bit_pos
, int bits
, int val
)
29 bit_mask
= (1ULL << bits
) - 1;
30 bit_mask
= (bit_mask
<< bit_pos
);
33 reg_val
= nicvf_queue_reg_read(nic
, reg
, qidx
);
34 if (((reg_val
& bit_mask
) >> bit_pos
) == val
)
36 usleep_range(1000, 2000);
39 netdev_err(nic
->netdev
, "Poll on reg 0x%llx failed\n", reg
);
43 /* Allocate memory for a queue's descriptors */
44 static int nicvf_alloc_q_desc_mem(struct nicvf
*nic
, struct q_desc_mem
*dmem
,
45 int q_len
, int desc_size
, int align_bytes
)
48 dmem
->size
= (desc_size
* q_len
) + align_bytes
;
49 /* Save address, need it while freeing */
50 dmem
->unalign_base
= dma_zalloc_coherent(&nic
->pdev
->dev
, dmem
->size
,
51 &dmem
->dma
, GFP_KERNEL
);
52 if (!dmem
->unalign_base
)
55 /* Align memory address for 'align_bytes' */
56 dmem
->phys_base
= NICVF_ALIGNED_ADDR((u64
)dmem
->dma
, align_bytes
);
57 dmem
->base
= dmem
->unalign_base
+ (dmem
->phys_base
- dmem
->dma
);
61 /* Free queue's descriptor memory */
62 static void nicvf_free_q_desc_mem(struct nicvf
*nic
, struct q_desc_mem
*dmem
)
67 dma_free_coherent(&nic
->pdev
->dev
, dmem
->size
,
68 dmem
->unalign_base
, dmem
->dma
);
69 dmem
->unalign_base
= NULL
;
73 /* Allocate buffer for packet reception
74 * HW returns memory address where packet is DMA'ed but not a pointer
75 * into RBDR ring, so save buffer address at the start of fragment and
76 * align the start address to a cache aligned address
78 static inline int nicvf_alloc_rcv_buffer(struct nicvf
*nic
, gfp_t gfp
,
79 u32 buf_len
, u64
**rbuf
)
81 int order
= get_order(buf_len
);
83 /* Check if request can be accomodated in previous allocated page */
85 if ((nic
->rb_page_offset
+ buf_len
+ buf_len
) >
86 (PAGE_SIZE
<< order
)) {
89 nic
->rb_page_offset
+= buf_len
;
90 get_page(nic
->rb_page
);
94 /* Allocate a new page */
96 nic
->rb_page
= alloc_pages(gfp
| __GFP_COMP
| __GFP_NOWARN
,
99 netdev_err(nic
->netdev
,
100 "Failed to allocate new rcv buffer\n");
103 nic
->rb_page_offset
= 0;
106 *rbuf
= (u64
*)((u64
)page_address(nic
->rb_page
) + nic
->rb_page_offset
);
111 /* Build skb around receive buffer */
112 static struct sk_buff
*nicvf_rb_ptr_to_skb(struct nicvf
*nic
,
118 data
= phys_to_virt(rb_ptr
);
120 /* Now build an skb to give to stack */
121 skb
= build_skb(data
, RCV_FRAG_LEN
);
123 put_page(virt_to_page(data
));
131 /* Allocate RBDR ring and populate receive buffers */
132 static int nicvf_init_rbdr(struct nicvf
*nic
, struct rbdr
*rbdr
,
133 int ring_len
, int buf_size
)
137 struct rbdr_entry_t
*desc
;
140 err
= nicvf_alloc_q_desc_mem(nic
, &rbdr
->dmem
, ring_len
,
141 sizeof(struct rbdr_entry_t
),
142 NICVF_RCV_BUF_ALIGN_BYTES
);
146 rbdr
->desc
= rbdr
->dmem
.base
;
147 /* Buffer size has to be in multiples of 128 bytes */
148 rbdr
->dma_size
= buf_size
;
150 rbdr
->thresh
= RBDR_THRESH
;
153 for (idx
= 0; idx
< ring_len
; idx
++) {
154 err
= nicvf_alloc_rcv_buffer(nic
, GFP_KERNEL
, RCV_FRAG_LEN
,
159 desc
= GET_RBDR_DESC(rbdr
, idx
);
160 desc
->buf_addr
= virt_to_phys(rbuf
) >> NICVF_RCV_BUF_ALIGN
;
165 /* Free RBDR ring and its receive buffers */
166 static void nicvf_free_rbdr(struct nicvf
*nic
, struct rbdr
*rbdr
)
170 struct rbdr_entry_t
*desc
;
175 rbdr
->enable
= false;
176 if (!rbdr
->dmem
.base
)
183 while (head
!= tail
) {
184 desc
= GET_RBDR_DESC(rbdr
, head
);
185 buf_addr
= desc
->buf_addr
<< NICVF_RCV_BUF_ALIGN
;
186 put_page(virt_to_page(phys_to_virt(buf_addr
)));
188 head
&= (rbdr
->dmem
.q_len
- 1);
190 /* Free SKB of tail desc */
191 desc
= GET_RBDR_DESC(rbdr
, tail
);
192 buf_addr
= desc
->buf_addr
<< NICVF_RCV_BUF_ALIGN
;
193 put_page(virt_to_page(phys_to_virt(buf_addr
)));
196 nicvf_free_q_desc_mem(nic
, &rbdr
->dmem
);
199 /* Refill receive buffer descriptors with new buffers.
201 static void nicvf_refill_rbdr(struct nicvf
*nic
, gfp_t gfp
)
203 struct queue_set
*qs
= nic
->qs
;
204 int rbdr_idx
= qs
->rbdr_cnt
;
208 struct rbdr_entry_t
*desc
;
216 rbdr
= &qs
->rbdr
[rbdr_idx
];
217 /* Check if it's enabled */
221 /* Get no of desc's to be refilled */
222 qcount
= nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_STATUS0
, rbdr_idx
);
224 /* Doorbell can be ringed with a max of ring size minus 1 */
225 if (qcount
>= (qs
->rbdr_len
- 1))
228 refill_rb_cnt
= qs
->rbdr_len
- qcount
- 1;
230 /* Start filling descs from tail */
231 tail
= nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_TAIL
, rbdr_idx
) >> 3;
232 while (refill_rb_cnt
) {
234 tail
&= (rbdr
->dmem
.q_len
- 1);
236 if (nicvf_alloc_rcv_buffer(nic
, gfp
, RCV_FRAG_LEN
, &rbuf
))
239 desc
= GET_RBDR_DESC(rbdr
, tail
);
240 desc
->buf_addr
= virt_to_phys(rbuf
) >> NICVF_RCV_BUF_ALIGN
;
245 /* make sure all memory stores are done before ringing doorbell */
248 /* Check if buffer allocation failed */
250 nic
->rb_alloc_fail
= true;
252 nic
->rb_alloc_fail
= false;
255 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_DOOR
,
258 /* Re-enable RBDR interrupts only if buffer allocation is success */
259 if (!nic
->rb_alloc_fail
&& rbdr
->enable
)
260 nicvf_enable_intr(nic
, NICVF_INTR_RBDR
, rbdr_idx
);
266 /* Alloc rcv buffers in non-atomic mode for better success */
267 void nicvf_rbdr_work(struct work_struct
*work
)
269 struct nicvf
*nic
= container_of(work
, struct nicvf
, rbdr_work
.work
);
271 nicvf_refill_rbdr(nic
, GFP_KERNEL
);
272 if (nic
->rb_alloc_fail
)
273 schedule_delayed_work(&nic
->rbdr_work
, msecs_to_jiffies(10));
275 nic
->rb_work_scheduled
= false;
278 /* In Softirq context, alloc rcv buffers in atomic mode */
279 void nicvf_rbdr_task(unsigned long data
)
281 struct nicvf
*nic
= (struct nicvf
*)data
;
283 nicvf_refill_rbdr(nic
, GFP_ATOMIC
);
284 if (nic
->rb_alloc_fail
) {
285 nic
->rb_work_scheduled
= true;
286 schedule_delayed_work(&nic
->rbdr_work
, msecs_to_jiffies(10));
290 /* Initialize completion queue */
291 static int nicvf_init_cmp_queue(struct nicvf
*nic
,
292 struct cmp_queue
*cq
, int q_len
)
296 err
= nicvf_alloc_q_desc_mem(nic
, &cq
->dmem
, q_len
, CMP_QUEUE_DESC_SIZE
,
297 NICVF_CQ_BASE_ALIGN_BYTES
);
301 cq
->desc
= cq
->dmem
.base
;
302 cq
->thresh
= pass1_silicon(nic
->pdev
) ? 0 : CMP_QUEUE_CQE_THRESH
;
303 nic
->cq_coalesce_usecs
= (CMP_QUEUE_TIMER_THRESH
* 0.05) - 1;
308 static void nicvf_free_cmp_queue(struct nicvf
*nic
, struct cmp_queue
*cq
)
315 nicvf_free_q_desc_mem(nic
, &cq
->dmem
);
318 /* Initialize transmit queue */
319 static int nicvf_init_snd_queue(struct nicvf
*nic
,
320 struct snd_queue
*sq
, int q_len
)
324 err
= nicvf_alloc_q_desc_mem(nic
, &sq
->dmem
, q_len
, SND_QUEUE_DESC_SIZE
,
325 NICVF_SQ_BASE_ALIGN_BYTES
);
329 sq
->desc
= sq
->dmem
.base
;
330 sq
->skbuff
= kcalloc(q_len
, sizeof(u64
), GFP_KERNEL
);
335 atomic_set(&sq
->free_cnt
, q_len
- 1);
336 sq
->thresh
= SND_QUEUE_THRESH
;
338 /* Preallocate memory for TSO segment's header */
339 sq
->tso_hdrs
= dma_alloc_coherent(&nic
->pdev
->dev
,
340 q_len
* TSO_HEADER_SIZE
,
341 &sq
->tso_hdrs_phys
, GFP_KERNEL
);
348 static void nicvf_free_snd_queue(struct nicvf
*nic
, struct snd_queue
*sq
)
356 dma_free_coherent(&nic
->pdev
->dev
,
357 sq
->dmem
.q_len
* TSO_HEADER_SIZE
,
358 sq
->tso_hdrs
, sq
->tso_hdrs_phys
);
361 nicvf_free_q_desc_mem(nic
, &sq
->dmem
);
364 static void nicvf_reclaim_snd_queue(struct nicvf
*nic
,
365 struct queue_set
*qs
, int qidx
)
367 /* Disable send queue */
368 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, 0);
369 /* Check if SQ is stopped */
370 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_SQ_0_7_STATUS
, 21, 1, 0x01))
372 /* Reset send queue */
373 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, NICVF_SQ_RESET
);
376 static void nicvf_reclaim_rcv_queue(struct nicvf
*nic
,
377 struct queue_set
*qs
, int qidx
)
379 union nic_mbx mbx
= {};
381 /* Make sure all packets in the pipeline are written back into mem */
382 mbx
.msg
.msg
= NIC_MBOX_MSG_RQ_SW_SYNC
;
383 nicvf_send_msg_to_pf(nic
, &mbx
);
386 static void nicvf_reclaim_cmp_queue(struct nicvf
*nic
,
387 struct queue_set
*qs
, int qidx
)
389 /* Disable timer threshold (doesn't get reset upon CQ reset */
390 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG2
, qidx
, 0);
391 /* Disable completion queue */
392 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, 0);
393 /* Reset completion queue */
394 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, NICVF_CQ_RESET
);
397 static void nicvf_reclaim_rbdr(struct nicvf
*nic
,
398 struct rbdr
*rbdr
, int qidx
)
403 /* Save head and tail pointers for feeing up buffers */
404 rbdr
->head
= nicvf_queue_reg_read(nic
,
405 NIC_QSET_RBDR_0_1_HEAD
,
407 rbdr
->tail
= nicvf_queue_reg_read(nic
,
408 NIC_QSET_RBDR_0_1_TAIL
,
411 /* If RBDR FIFO is in 'FAIL' state then do a reset first
414 fifo_state
= nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_STATUS0
, qidx
);
415 if (((fifo_state
>> 62) & 0x03) == 0x3)
416 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
,
417 qidx
, NICVF_RBDR_RESET
);
420 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
, qidx
, 0);
421 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_RBDR_0_1_STATUS0
, 62, 2, 0x00))
424 tmp
= nicvf_queue_reg_read(nic
,
425 NIC_QSET_RBDR_0_1_PREFETCH_STATUS
,
427 if ((tmp
& 0xFFFFFFFF) == ((tmp
>> 32) & 0xFFFFFFFF))
429 usleep_range(1000, 2000);
432 netdev_err(nic
->netdev
,
433 "Failed polling on prefetch status\n");
437 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
,
438 qidx
, NICVF_RBDR_RESET
);
440 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_RBDR_0_1_STATUS0
, 62, 2, 0x02))
442 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
, qidx
, 0x00);
443 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_RBDR_0_1_STATUS0
, 62, 2, 0x00))
447 void nicvf_config_vlan_stripping(struct nicvf
*nic
, netdev_features_t features
)
452 rq_cfg
= nicvf_queue_reg_read(nic
, NIC_QSET_RQ_GEN_CFG
, 0);
454 /* Enable first VLAN stripping */
455 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
456 rq_cfg
|= (1ULL << 25);
458 rq_cfg
&= ~(1ULL << 25);
459 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_GEN_CFG
, 0, rq_cfg
);
461 /* Configure Secondary Qsets, if any */
462 for (sqs
= 0; sqs
< nic
->sqs_count
; sqs
++)
463 if (nic
->snicvf
[sqs
])
464 nicvf_queue_reg_write(nic
->snicvf
[sqs
],
465 NIC_QSET_RQ_GEN_CFG
, 0, rq_cfg
);
468 /* Configures receive queue */
469 static void nicvf_rcv_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
470 int qidx
, bool enable
)
472 union nic_mbx mbx
= {};
473 struct rcv_queue
*rq
;
474 struct rq_cfg rq_cfg
;
479 /* Disable receive queue */
480 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_0_7_CFG
, qidx
, 0);
483 nicvf_reclaim_rcv_queue(nic
, qs
, qidx
);
487 rq
->cq_qs
= qs
->vnic_id
;
489 rq
->start_rbdr_qs
= qs
->vnic_id
;
490 rq
->start_qs_rbdr_idx
= qs
->rbdr_cnt
- 1;
491 rq
->cont_rbdr_qs
= qs
->vnic_id
;
492 rq
->cont_qs_rbdr_idx
= qs
->rbdr_cnt
- 1;
493 /* all writes of RBDR data to be loaded into L2 Cache as well*/
496 /* Send a mailbox msg to PF to config RQ */
497 mbx
.rq
.msg
= NIC_MBOX_MSG_RQ_CFG
;
498 mbx
.rq
.qs_num
= qs
->vnic_id
;
499 mbx
.rq
.rq_num
= qidx
;
500 mbx
.rq
.cfg
= (rq
->caching
<< 26) | (rq
->cq_qs
<< 19) |
501 (rq
->cq_idx
<< 16) | (rq
->cont_rbdr_qs
<< 9) |
502 (rq
->cont_qs_rbdr_idx
<< 8) |
503 (rq
->start_rbdr_qs
<< 1) | (rq
->start_qs_rbdr_idx
);
504 nicvf_send_msg_to_pf(nic
, &mbx
);
506 mbx
.rq
.msg
= NIC_MBOX_MSG_RQ_BP_CFG
;
507 mbx
.rq
.cfg
= (1ULL << 63) | (1ULL << 62) | (qs
->vnic_id
<< 0);
508 nicvf_send_msg_to_pf(nic
, &mbx
);
511 * Enable CQ drop to reserve sufficient CQEs for all tx packets
513 mbx
.rq
.msg
= NIC_MBOX_MSG_RQ_DROP_CFG
;
514 mbx
.rq
.cfg
= (1ULL << 62) | (RQ_CQ_DROP
<< 8);
515 nicvf_send_msg_to_pf(nic
, &mbx
);
517 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_GEN_CFG
, 0, 0x00);
519 nicvf_config_vlan_stripping(nic
, nic
->netdev
->features
);
521 /* Enable Receive queue */
524 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_0_7_CFG
, qidx
, *(u64
*)&rq_cfg
);
527 /* Configures completion queue */
528 void nicvf_cmp_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
529 int qidx
, bool enable
)
531 struct cmp_queue
*cq
;
532 struct cq_cfg cq_cfg
;
538 nicvf_reclaim_cmp_queue(nic
, qs
, qidx
);
542 /* Reset completion queue */
543 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, NICVF_CQ_RESET
);
548 spin_lock_init(&cq
->lock
);
549 /* Set completion queue base address */
550 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_BASE
,
551 qidx
, (u64
)(cq
->dmem
.phys_base
));
553 /* Enable Completion queue */
557 cq_cfg
.qsize
= CMP_QSIZE
;
559 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, *(u64
*)&cq_cfg
);
561 /* Set threshold value for interrupt generation */
562 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_THRESH
, qidx
, cq
->thresh
);
563 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG2
,
564 qidx
, CMP_QUEUE_TIMER_THRESH
);
567 /* Configures transmit queue */
568 static void nicvf_snd_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
569 int qidx
, bool enable
)
571 union nic_mbx mbx
= {};
572 struct snd_queue
*sq
;
573 struct sq_cfg sq_cfg
;
579 nicvf_reclaim_snd_queue(nic
, qs
, qidx
);
583 /* Reset send queue */
584 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, NICVF_SQ_RESET
);
586 sq
->cq_qs
= qs
->vnic_id
;
589 /* Send a mailbox msg to PF to config SQ */
590 mbx
.sq
.msg
= NIC_MBOX_MSG_SQ_CFG
;
591 mbx
.sq
.qs_num
= qs
->vnic_id
;
592 mbx
.sq
.sq_num
= qidx
;
593 mbx
.sq
.sqs_mode
= nic
->sqs_mode
;
594 mbx
.sq
.cfg
= (sq
->cq_qs
<< 3) | sq
->cq_idx
;
595 nicvf_send_msg_to_pf(nic
, &mbx
);
597 /* Set queue base address */
598 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_BASE
,
599 qidx
, (u64
)(sq
->dmem
.phys_base
));
601 /* Enable send queue & set queue size */
605 sq_cfg
.qsize
= SND_QSIZE
;
606 sq_cfg
.tstmp_bgx_intf
= 0;
607 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, *(u64
*)&sq_cfg
);
609 /* Set threshold value for interrupt generation */
610 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_THRESH
, qidx
, sq
->thresh
);
612 /* Set queue:cpu affinity for better load distribution */
613 if (cpu_online(qidx
)) {
614 cpumask_set_cpu(qidx
, &sq
->affinity_mask
);
615 netif_set_xps_queue(nic
->netdev
,
616 &sq
->affinity_mask
, qidx
);
620 /* Configures receive buffer descriptor ring */
621 static void nicvf_rbdr_config(struct nicvf
*nic
, struct queue_set
*qs
,
622 int qidx
, bool enable
)
625 struct rbdr_cfg rbdr_cfg
;
627 rbdr
= &qs
->rbdr
[qidx
];
628 nicvf_reclaim_rbdr(nic
, rbdr
, qidx
);
632 /* Set descriptor base address */
633 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_BASE
,
634 qidx
, (u64
)(rbdr
->dmem
.phys_base
));
636 /* Enable RBDR & set queue size */
637 /* Buffer size should be in multiples of 128 bytes */
641 rbdr_cfg
.qsize
= RBDR_SIZE
;
642 rbdr_cfg
.avg_con
= 0;
643 rbdr_cfg
.lines
= rbdr
->dma_size
/ 128;
644 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
,
645 qidx
, *(u64
*)&rbdr_cfg
);
648 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_DOOR
,
649 qidx
, qs
->rbdr_len
- 1);
651 /* Set threshold value for interrupt generation */
652 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_THRESH
,
653 qidx
, rbdr
->thresh
- 1);
656 /* Requests PF to assign and enable Qset */
657 void nicvf_qset_config(struct nicvf
*nic
, bool enable
)
659 union nic_mbx mbx
= {};
660 struct queue_set
*qs
= nic
->qs
;
661 struct qs_cfg
*qs_cfg
;
664 netdev_warn(nic
->netdev
,
665 "Qset is still not allocated, don't init queues\n");
670 qs
->vnic_id
= nic
->vf_id
;
672 /* Send a mailbox msg to PF to config Qset */
673 mbx
.qs
.msg
= NIC_MBOX_MSG_QS_CFG
;
674 mbx
.qs
.num
= qs
->vnic_id
;
675 mbx
.qs
.sqs_count
= nic
->sqs_count
;
678 qs_cfg
= (struct qs_cfg
*)&mbx
.qs
.cfg
;
684 qs_cfg
->vnic
= qs
->vnic_id
;
686 nicvf_send_msg_to_pf(nic
, &mbx
);
689 static void nicvf_free_resources(struct nicvf
*nic
)
692 struct queue_set
*qs
= nic
->qs
;
694 /* Free receive buffer descriptor ring */
695 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
696 nicvf_free_rbdr(nic
, &qs
->rbdr
[qidx
]);
698 /* Free completion queue */
699 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
700 nicvf_free_cmp_queue(nic
, &qs
->cq
[qidx
]);
702 /* Free send queue */
703 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
704 nicvf_free_snd_queue(nic
, &qs
->sq
[qidx
]);
707 static int nicvf_alloc_resources(struct nicvf
*nic
)
710 struct queue_set
*qs
= nic
->qs
;
712 /* Alloc receive buffer descriptor ring */
713 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++) {
714 if (nicvf_init_rbdr(nic
, &qs
->rbdr
[qidx
], qs
->rbdr_len
,
719 /* Alloc send queue */
720 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++) {
721 if (nicvf_init_snd_queue(nic
, &qs
->sq
[qidx
], qs
->sq_len
))
725 /* Alloc completion queue */
726 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
727 if (nicvf_init_cmp_queue(nic
, &qs
->cq
[qidx
], qs
->cq_len
))
733 nicvf_free_resources(nic
);
737 int nicvf_set_qset_resources(struct nicvf
*nic
)
739 struct queue_set
*qs
;
741 qs
= devm_kzalloc(&nic
->pdev
->dev
, sizeof(*qs
), GFP_KERNEL
);
746 /* Set count of each queue */
747 qs
->rbdr_cnt
= RBDR_CNT
;
748 qs
->rq_cnt
= RCV_QUEUE_CNT
;
749 qs
->sq_cnt
= SND_QUEUE_CNT
;
750 qs
->cq_cnt
= CMP_QUEUE_CNT
;
752 /* Set queue lengths */
753 qs
->rbdr_len
= RCV_BUF_COUNT
;
754 qs
->sq_len
= SND_QUEUE_LEN
;
755 qs
->cq_len
= CMP_QUEUE_LEN
;
757 nic
->rx_queues
= qs
->rq_cnt
;
758 nic
->tx_queues
= qs
->sq_cnt
;
763 int nicvf_config_data_transfer(struct nicvf
*nic
, bool enable
)
765 bool disable
= false;
766 struct queue_set
*qs
= nic
->qs
;
773 if (nicvf_alloc_resources(nic
))
776 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
777 nicvf_snd_queue_config(nic
, qs
, qidx
, enable
);
778 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
779 nicvf_cmp_queue_config(nic
, qs
, qidx
, enable
);
780 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
781 nicvf_rbdr_config(nic
, qs
, qidx
, enable
);
782 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
783 nicvf_rcv_queue_config(nic
, qs
, qidx
, enable
);
785 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
786 nicvf_rcv_queue_config(nic
, qs
, qidx
, disable
);
787 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
788 nicvf_rbdr_config(nic
, qs
, qidx
, disable
);
789 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
790 nicvf_snd_queue_config(nic
, qs
, qidx
, disable
);
791 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
792 nicvf_cmp_queue_config(nic
, qs
, qidx
, disable
);
794 nicvf_free_resources(nic
);
800 /* Get a free desc from SQ
801 * returns descriptor ponter & descriptor number
803 static inline int nicvf_get_sq_desc(struct snd_queue
*sq
, int desc_cnt
)
808 atomic_sub(desc_cnt
, &sq
->free_cnt
);
809 sq
->tail
+= desc_cnt
;
810 sq
->tail
&= (sq
->dmem
.q_len
- 1);
815 /* Free descriptor back to SQ for future use */
816 void nicvf_put_sq_desc(struct snd_queue
*sq
, int desc_cnt
)
818 atomic_add(desc_cnt
, &sq
->free_cnt
);
819 sq
->head
+= desc_cnt
;
820 sq
->head
&= (sq
->dmem
.q_len
- 1);
823 static inline int nicvf_get_nxt_sqentry(struct snd_queue
*sq
, int qentry
)
826 qentry
&= (sq
->dmem
.q_len
- 1);
830 void nicvf_sq_enable(struct nicvf
*nic
, struct snd_queue
*sq
, int qidx
)
834 sq_cfg
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
);
835 sq_cfg
|= NICVF_SQ_EN
;
836 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, sq_cfg
);
837 /* Ring doorbell so that H/W restarts processing SQEs */
838 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_DOOR
, qidx
, 0);
841 void nicvf_sq_disable(struct nicvf
*nic
, int qidx
)
845 sq_cfg
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
);
846 sq_cfg
&= ~NICVF_SQ_EN
;
847 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, sq_cfg
);
850 void nicvf_sq_free_used_descs(struct net_device
*netdev
, struct snd_queue
*sq
,
855 struct nicvf
*nic
= netdev_priv(netdev
);
856 struct sq_hdr_subdesc
*hdr
;
858 head
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_HEAD
, qidx
) >> 4;
859 tail
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_TAIL
, qidx
) >> 4;
860 while (sq
->head
!= head
) {
861 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, sq
->head
);
862 if (hdr
->subdesc_type
!= SQ_DESC_TYPE_HEADER
) {
863 nicvf_put_sq_desc(sq
, 1);
866 skb
= (struct sk_buff
*)sq
->skbuff
[sq
->head
];
868 dev_kfree_skb_any(skb
);
869 atomic64_add(1, (atomic64_t
*)&netdev
->stats
.tx_packets
);
870 atomic64_add(hdr
->tot_len
,
871 (atomic64_t
*)&netdev
->stats
.tx_bytes
);
872 nicvf_put_sq_desc(sq
, hdr
->subdesc_cnt
+ 1);
876 /* Calculate no of SQ subdescriptors needed to transmit all
877 * segments of this TSO packet.
878 * Taken from 'Tilera network driver' with a minor modification.
880 static int nicvf_tso_count_subdescs(struct sk_buff
*skb
)
882 struct skb_shared_info
*sh
= skb_shinfo(skb
);
883 unsigned int sh_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
884 unsigned int data_len
= skb
->len
- sh_len
;
885 unsigned int p_len
= sh
->gso_size
;
886 long f_id
= -1; /* id of the current fragment */
887 long f_size
= skb_headlen(skb
) - sh_len
; /* current fragment size */
888 long f_used
= 0; /* bytes used from the current fragment */
889 long n
; /* size of the current piece of payload */
893 for (segment
= 0; segment
< sh
->gso_segs
; segment
++) {
894 unsigned int p_used
= 0;
896 /* One edesc for header and for each piece of the payload. */
897 for (num_edescs
++; p_used
< p_len
; num_edescs
++) {
898 /* Advance as needed. */
899 while (f_used
>= f_size
) {
901 f_size
= skb_frag_size(&sh
->frags
[f_id
]);
905 /* Use bytes from the current fragment. */
907 if (n
> f_size
- f_used
)
913 /* The last segment may be less than gso_size. */
915 if (data_len
< p_len
)
919 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
920 return num_edescs
+ sh
->gso_segs
;
923 /* Get the number of SQ descriptors needed to xmit this skb */
924 static int nicvf_sq_subdesc_required(struct nicvf
*nic
, struct sk_buff
*skb
)
926 int subdesc_cnt
= MIN_SQ_DESC_PER_PKT_XMIT
;
928 if (skb_shinfo(skb
)->gso_size
&& !nic
->hw_tso
) {
929 subdesc_cnt
= nicvf_tso_count_subdescs(skb
);
933 if (skb_shinfo(skb
)->nr_frags
)
934 subdesc_cnt
+= skb_shinfo(skb
)->nr_frags
;
939 /* Add SQ HEADER subdescriptor.
940 * First subdescriptor for every send descriptor.
943 nicvf_sq_add_hdr_subdesc(struct nicvf
*nic
, struct snd_queue
*sq
, int qentry
,
944 int subdesc_cnt
, struct sk_buff
*skb
, int len
)
947 struct sq_hdr_subdesc
*hdr
;
949 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, qentry
);
950 sq
->skbuff
[qentry
] = (u64
)skb
;
952 memset(hdr
, 0, SND_QUEUE_DESC_SIZE
);
953 hdr
->subdesc_type
= SQ_DESC_TYPE_HEADER
;
954 /* Enable notification via CQE after processing SQE */
956 /* No of subdescriptors following this */
957 hdr
->subdesc_cnt
= subdesc_cnt
;
960 /* Offload checksum calculation to HW */
961 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
962 hdr
->csum_l3
= 1; /* Enable IP csum calculation */
963 hdr
->l3_offset
= skb_network_offset(skb
);
964 hdr
->l4_offset
= skb_transport_offset(skb
);
966 proto
= ip_hdr(skb
)->protocol
;
969 hdr
->csum_l4
= SEND_L4_CSUM_TCP
;
972 hdr
->csum_l4
= SEND_L4_CSUM_UDP
;
975 hdr
->csum_l4
= SEND_L4_CSUM_SCTP
;
980 if (nic
->hw_tso
&& skb_shinfo(skb
)->gso_size
) {
982 hdr
->tso_start
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
983 hdr
->tso_max_paysize
= skb_shinfo(skb
)->gso_size
;
984 /* For non-tunneled pkts, point this to L2 ethertype */
985 hdr
->inner_l3_offset
= skb_network_offset(skb
) - 2;
986 nic
->drv_stats
.tx_tso
++;
990 /* SQ GATHER subdescriptor
991 * Must follow HDR descriptor
993 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue
*sq
, int qentry
,
996 struct sq_gather_subdesc
*gather
;
998 qentry
&= (sq
->dmem
.q_len
- 1);
999 gather
= (struct sq_gather_subdesc
*)GET_SQ_DESC(sq
, qentry
);
1001 memset(gather
, 0, SND_QUEUE_DESC_SIZE
);
1002 gather
->subdesc_type
= SQ_DESC_TYPE_GATHER
;
1003 gather
->ld_type
= NIC_SEND_LD_TYPE_E_LDD
;
1004 gather
->size
= size
;
1005 gather
->addr
= data
;
1008 /* Segment a TSO packet into 'gso_size' segments and append
1009 * them to SQ for transfer
1011 static int nicvf_sq_append_tso(struct nicvf
*nic
, struct snd_queue
*sq
,
1012 int sq_num
, int qentry
, struct sk_buff
*skb
)
1015 int seg_subdescs
= 0, desc_cnt
= 0;
1016 int seg_len
, total_len
, data_left
;
1017 int hdr_qentry
= qentry
;
1018 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1020 tso_start(skb
, &tso
);
1021 total_len
= skb
->len
- hdr_len
;
1022 while (total_len
> 0) {
1025 /* Save Qentry for adding HDR_SUBDESC at the end */
1026 hdr_qentry
= qentry
;
1028 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
1029 total_len
-= data_left
;
1031 /* Add segment's header */
1032 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1033 hdr
= sq
->tso_hdrs
+ qentry
* TSO_HEADER_SIZE
;
1034 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
1035 nicvf_sq_add_gather_subdesc(sq
, qentry
, hdr_len
,
1037 qentry
* TSO_HEADER_SIZE
);
1038 /* HDR_SUDESC + GATHER */
1042 /* Add segment's payload fragments */
1043 while (data_left
> 0) {
1046 size
= min_t(int, tso
.size
, data_left
);
1048 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1049 nicvf_sq_add_gather_subdesc(sq
, qentry
, size
,
1050 virt_to_phys(tso
.data
));
1055 tso_build_data(skb
, &tso
, size
);
1057 nicvf_sq_add_hdr_subdesc(nic
, sq
, hdr_qentry
,
1058 seg_subdescs
- 1, skb
, seg_len
);
1059 sq
->skbuff
[hdr_qentry
] = (u64
)NULL
;
1060 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1062 desc_cnt
+= seg_subdescs
;
1064 /* Save SKB in the last segment for freeing */
1065 sq
->skbuff
[hdr_qentry
] = (u64
)skb
;
1067 /* make sure all memory stores are done before ringing doorbell */
1070 /* Inform HW to xmit all TSO segments */
1071 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_DOOR
,
1073 nic
->drv_stats
.tx_tso
++;
1077 /* Append an skb to a SQ for packet transfer. */
1078 int nicvf_sq_append_skb(struct nicvf
*nic
, struct sk_buff
*skb
)
1083 struct queue_set
*qs
;
1084 struct snd_queue
*sq
;
1086 sq_num
= skb_get_queue_mapping(skb
);
1087 if (sq_num
>= MAX_SND_QUEUES_PER_QS
) {
1088 /* Get secondary Qset's SQ structure */
1089 i
= sq_num
/ MAX_SND_QUEUES_PER_QS
;
1090 if (!nic
->snicvf
[i
- 1]) {
1091 netdev_warn(nic
->netdev
,
1092 "Secondary Qset#%d's ptr not initialized\n",
1096 nic
= (struct nicvf
*)nic
->snicvf
[i
- 1];
1097 sq_num
= sq_num
% MAX_SND_QUEUES_PER_QS
;
1101 sq
= &qs
->sq
[sq_num
];
1103 subdesc_cnt
= nicvf_sq_subdesc_required(nic
, skb
);
1104 if (subdesc_cnt
> atomic_read(&sq
->free_cnt
))
1107 qentry
= nicvf_get_sq_desc(sq
, subdesc_cnt
);
1109 /* Check if its a TSO packet */
1110 if (skb_shinfo(skb
)->gso_size
&& !nic
->hw_tso
)
1111 return nicvf_sq_append_tso(nic
, sq
, sq_num
, qentry
, skb
);
1113 /* Add SQ header subdesc */
1114 nicvf_sq_add_hdr_subdesc(nic
, sq
, qentry
, subdesc_cnt
- 1,
1117 /* Add SQ gather subdescs */
1118 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1119 size
= skb_is_nonlinear(skb
) ? skb_headlen(skb
) : skb
->len
;
1120 nicvf_sq_add_gather_subdesc(sq
, qentry
, size
, virt_to_phys(skb
->data
));
1122 /* Check for scattered buffer */
1123 if (!skb_is_nonlinear(skb
))
1126 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1127 const struct skb_frag_struct
*frag
;
1129 frag
= &skb_shinfo(skb
)->frags
[i
];
1131 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1132 size
= skb_frag_size(frag
);
1133 nicvf_sq_add_gather_subdesc(sq
, qentry
, size
,
1135 skb_frag_address(frag
)));
1139 /* make sure all memory stores are done before ringing doorbell */
1142 /* Inform HW to xmit new packet */
1143 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_DOOR
,
1144 sq_num
, subdesc_cnt
);
1148 /* Use original PCI dev for debug log */
1150 netdev_dbg(nic
->netdev
, "Not enough SQ descriptors to xmit pkt\n");
1154 static inline unsigned frag_num(unsigned i
)
1157 return (i
& ~3) + 3 - (i
& 3);
1163 /* Returns SKB for a received packet */
1164 struct sk_buff
*nicvf_get_rcv_skb(struct nicvf
*nic
, struct cqe_rx_t
*cqe_rx
)
1167 int payload_len
= 0;
1168 struct sk_buff
*skb
= NULL
;
1169 struct sk_buff
*skb_frag
= NULL
;
1170 struct sk_buff
*prev_frag
= NULL
;
1171 u16
*rb_lens
= NULL
;
1172 u64
*rb_ptrs
= NULL
;
1174 rb_lens
= (void *)cqe_rx
+ (3 * sizeof(u64
));
1175 rb_ptrs
= (void *)cqe_rx
+ (6 * sizeof(u64
));
1177 netdev_dbg(nic
->netdev
, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
1178 __func__
, cqe_rx
->rb_cnt
, cqe_rx
->rb0_ptr
, cqe_rx
->rb0_sz
);
1180 for (frag
= 0; frag
< cqe_rx
->rb_cnt
; frag
++) {
1181 payload_len
= rb_lens
[frag_num(frag
)];
1183 /* First fragment */
1184 skb
= nicvf_rb_ptr_to_skb(nic
,
1185 *rb_ptrs
- cqe_rx
->align_pad
,
1189 skb_reserve(skb
, cqe_rx
->align_pad
);
1190 skb_put(skb
, payload_len
);
1193 skb_frag
= nicvf_rb_ptr_to_skb(nic
, *rb_ptrs
,
1200 if (!skb_shinfo(skb
)->frag_list
)
1201 skb_shinfo(skb
)->frag_list
= skb_frag
;
1203 prev_frag
->next
= skb_frag
;
1205 prev_frag
= skb_frag
;
1206 skb
->len
+= payload_len
;
1207 skb
->data_len
+= payload_len
;
1208 skb_frag
->len
= payload_len
;
1210 /* Next buffer pointer */
1216 static u64
nicvf_int_type_to_mask(int int_type
, int q_idx
)
1222 reg_val
= ((1ULL << q_idx
) << NICVF_INTR_CQ_SHIFT
);
1225 reg_val
= ((1ULL << q_idx
) << NICVF_INTR_SQ_SHIFT
);
1227 case NICVF_INTR_RBDR
:
1228 reg_val
= ((1ULL << q_idx
) << NICVF_INTR_RBDR_SHIFT
);
1230 case NICVF_INTR_PKT_DROP
:
1231 reg_val
= (1ULL << NICVF_INTR_PKT_DROP_SHIFT
);
1233 case NICVF_INTR_TCP_TIMER
:
1234 reg_val
= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT
);
1236 case NICVF_INTR_MBOX
:
1237 reg_val
= (1ULL << NICVF_INTR_MBOX_SHIFT
);
1239 case NICVF_INTR_QS_ERR
:
1240 reg_val
= (1ULL << NICVF_INTR_QS_ERR_SHIFT
);
1249 /* Enable interrupt */
1250 void nicvf_enable_intr(struct nicvf
*nic
, int int_type
, int q_idx
)
1252 u64 mask
= nicvf_int_type_to_mask(int_type
, q_idx
);
1255 netdev_dbg(nic
->netdev
,
1256 "Failed to enable interrupt: unknown type\n");
1259 nicvf_reg_write(nic
, NIC_VF_ENA_W1S
,
1260 nicvf_reg_read(nic
, NIC_VF_ENA_W1S
) | mask
);
1263 /* Disable interrupt */
1264 void nicvf_disable_intr(struct nicvf
*nic
, int int_type
, int q_idx
)
1266 u64 mask
= nicvf_int_type_to_mask(int_type
, q_idx
);
1269 netdev_dbg(nic
->netdev
,
1270 "Failed to disable interrupt: unknown type\n");
1274 nicvf_reg_write(nic
, NIC_VF_ENA_W1C
, mask
);
1277 /* Clear interrupt */
1278 void nicvf_clear_intr(struct nicvf
*nic
, int int_type
, int q_idx
)
1280 u64 mask
= nicvf_int_type_to_mask(int_type
, q_idx
);
1283 netdev_dbg(nic
->netdev
,
1284 "Failed to clear interrupt: unknown type\n");
1288 nicvf_reg_write(nic
, NIC_VF_INT
, mask
);
1291 /* Check if interrupt is enabled */
1292 int nicvf_is_intr_enabled(struct nicvf
*nic
, int int_type
, int q_idx
)
1294 u64 mask
= nicvf_int_type_to_mask(int_type
, q_idx
);
1295 /* If interrupt type is unknown, we treat it disabled. */
1297 netdev_dbg(nic
->netdev
,
1298 "Failed to check interrupt enable: unknown type\n");
1302 return mask
& nicvf_reg_read(nic
, NIC_VF_ENA_W1S
);
1305 void nicvf_update_rq_stats(struct nicvf
*nic
, int rq_idx
)
1307 struct rcv_queue
*rq
;
1309 #define GET_RQ_STATS(reg) \
1310 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1311 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1313 rq
= &nic
->qs
->rq
[rq_idx
];
1314 rq
->stats
.bytes
= GET_RQ_STATS(RQ_SQ_STATS_OCTS
);
1315 rq
->stats
.pkts
= GET_RQ_STATS(RQ_SQ_STATS_PKTS
);
1318 void nicvf_update_sq_stats(struct nicvf
*nic
, int sq_idx
)
1320 struct snd_queue
*sq
;
1322 #define GET_SQ_STATS(reg) \
1323 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1324 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1326 sq
= &nic
->qs
->sq
[sq_idx
];
1327 sq
->stats
.bytes
= GET_SQ_STATS(RQ_SQ_STATS_OCTS
);
1328 sq
->stats
.pkts
= GET_SQ_STATS(RQ_SQ_STATS_PKTS
);
1331 /* Check for errors in the receive cmp.queue entry */
1332 int nicvf_check_cqe_rx_errs(struct nicvf
*nic
, struct cqe_rx_t
*cqe_rx
)
1334 struct nicvf_hw_stats
*stats
= &nic
->hw_stats
;
1336 if (!cqe_rx
->err_level
&& !cqe_rx
->err_opcode
)
1339 if (netif_msg_rx_err(nic
))
1340 netdev_err(nic
->netdev
,
1341 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1343 cqe_rx
->err_level
, cqe_rx
->err_opcode
);
1345 switch (cqe_rx
->err_opcode
) {
1346 case CQ_RX_ERROP_RE_PARTIAL
:
1347 stats
->rx_bgx_truncated_pkts
++;
1349 case CQ_RX_ERROP_RE_JABBER
:
1350 stats
->rx_jabber_errs
++;
1352 case CQ_RX_ERROP_RE_FCS
:
1353 stats
->rx_fcs_errs
++;
1355 case CQ_RX_ERROP_RE_RX_CTL
:
1356 stats
->rx_bgx_errs
++;
1358 case CQ_RX_ERROP_PREL2_ERR
:
1359 stats
->rx_prel2_errs
++;
1361 case CQ_RX_ERROP_L2_MAL
:
1362 stats
->rx_l2_hdr_malformed
++;
1364 case CQ_RX_ERROP_L2_OVERSIZE
:
1365 stats
->rx_oversize
++;
1367 case CQ_RX_ERROP_L2_UNDERSIZE
:
1368 stats
->rx_undersize
++;
1370 case CQ_RX_ERROP_L2_LENMISM
:
1371 stats
->rx_l2_len_mismatch
++;
1373 case CQ_RX_ERROP_L2_PCLP
:
1374 stats
->rx_l2_pclp
++;
1376 case CQ_RX_ERROP_IP_NOT
:
1377 stats
->rx_ip_ver_errs
++;
1379 case CQ_RX_ERROP_IP_CSUM_ERR
:
1380 stats
->rx_ip_csum_errs
++;
1382 case CQ_RX_ERROP_IP_MAL
:
1383 stats
->rx_ip_hdr_malformed
++;
1385 case CQ_RX_ERROP_IP_MALD
:
1386 stats
->rx_ip_payload_malformed
++;
1388 case CQ_RX_ERROP_IP_HOP
:
1389 stats
->rx_ip_ttl_errs
++;
1391 case CQ_RX_ERROP_L3_PCLP
:
1392 stats
->rx_l3_pclp
++;
1394 case CQ_RX_ERROP_L4_MAL
:
1395 stats
->rx_l4_malformed
++;
1397 case CQ_RX_ERROP_L4_CHK
:
1398 stats
->rx_l4_csum_errs
++;
1400 case CQ_RX_ERROP_UDP_LEN
:
1401 stats
->rx_udp_len_errs
++;
1403 case CQ_RX_ERROP_L4_PORT
:
1404 stats
->rx_l4_port_errs
++;
1406 case CQ_RX_ERROP_TCP_FLAG
:
1407 stats
->rx_tcp_flag_errs
++;
1409 case CQ_RX_ERROP_TCP_OFFSET
:
1410 stats
->rx_tcp_offset_errs
++;
1412 case CQ_RX_ERROP_L4_PCLP
:
1413 stats
->rx_l4_pclp
++;
1415 case CQ_RX_ERROP_RBDR_TRUNC
:
1416 stats
->rx_truncated_pkts
++;
1423 /* Check for errors in the send cmp.queue entry */
1424 int nicvf_check_cqe_tx_errs(struct nicvf
*nic
,
1425 struct cmp_queue
*cq
, struct cqe_send_t
*cqe_tx
)
1427 struct cmp_queue_stats
*stats
= &cq
->stats
;
1429 switch (cqe_tx
->send_status
) {
1430 case CQ_TX_ERROP_GOOD
:
1433 case CQ_TX_ERROP_DESC_FAULT
:
1434 stats
->tx
.desc_fault
++;
1436 case CQ_TX_ERROP_HDR_CONS_ERR
:
1437 stats
->tx
.hdr_cons_err
++;
1439 case CQ_TX_ERROP_SUBDC_ERR
:
1440 stats
->tx
.subdesc_err
++;
1442 case CQ_TX_ERROP_IMM_SIZE_OFLOW
:
1443 stats
->tx
.imm_size_oflow
++;
1445 case CQ_TX_ERROP_DATA_SEQUENCE_ERR
:
1446 stats
->tx
.data_seq_err
++;
1448 case CQ_TX_ERROP_MEM_SEQUENCE_ERR
:
1449 stats
->tx
.mem_seq_err
++;
1451 case CQ_TX_ERROP_LOCK_VIOL
:
1452 stats
->tx
.lock_viol
++;
1454 case CQ_TX_ERROP_DATA_FAULT
:
1455 stats
->tx
.data_fault
++;
1457 case CQ_TX_ERROP_TSTMP_CONFLICT
:
1458 stats
->tx
.tstmp_conflict
++;
1460 case CQ_TX_ERROP_TSTMP_TIMEOUT
:
1461 stats
->tx
.tstmp_timeout
++;
1463 case CQ_TX_ERROP_MEM_FAULT
:
1464 stats
->tx
.mem_fault
++;
1466 case CQ_TX_ERROP_CK_OVERLAP
:
1467 stats
->tx
.csum_overlap
++;
1469 case CQ_TX_ERROP_CK_OFLOW
:
1470 stats
->tx
.csum_overflow
++;