2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
10 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
19 #include "nicvf_queues.h"
27 #define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
29 /* Poll a register for a specific value */
30 static int nicvf_poll_reg(struct nicvf
*nic
, int qidx
,
31 u64 reg
, int bit_pos
, int bits
, int val
)
37 bit_mask
= (1ULL << bits
) - 1;
38 bit_mask
= (bit_mask
<< bit_pos
);
41 reg_val
= nicvf_queue_reg_read(nic
, reg
, qidx
);
42 if (((reg_val
& bit_mask
) >> bit_pos
) == val
)
44 usleep_range(1000, 2000);
47 netdev_err(nic
->netdev
, "Poll on reg 0x%llx failed\n", reg
);
51 /* Allocate memory for a queue's descriptors */
52 static int nicvf_alloc_q_desc_mem(struct nicvf
*nic
, struct q_desc_mem
*dmem
,
53 int q_len
, int desc_size
, int align_bytes
)
56 dmem
->size
= (desc_size
* q_len
) + align_bytes
;
57 /* Save address, need it while freeing */
58 dmem
->unalign_base
= dma_zalloc_coherent(&nic
->pdev
->dev
, dmem
->size
,
59 &dmem
->dma
, GFP_KERNEL
);
60 if (!dmem
->unalign_base
)
63 /* Align memory address for 'align_bytes' */
64 dmem
->phys_base
= NICVF_ALIGNED_ADDR((u64
)dmem
->dma
, align_bytes
);
65 dmem
->base
= dmem
->unalign_base
+ (dmem
->phys_base
- dmem
->dma
);
69 /* Free queue's descriptor memory */
70 static void nicvf_free_q_desc_mem(struct nicvf
*nic
, struct q_desc_mem
*dmem
)
75 dma_free_coherent(&nic
->pdev
->dev
, dmem
->size
,
76 dmem
->unalign_base
, dmem
->dma
);
77 dmem
->unalign_base
= NULL
;
81 /* Allocate buffer for packet reception
82 * HW returns memory address where packet is DMA'ed but not a pointer
83 * into RBDR ring, so save buffer address at the start of fragment and
84 * align the start address to a cache aligned address
86 static inline int nicvf_alloc_rcv_buffer(struct nicvf
*nic
, gfp_t gfp
,
87 u32 buf_len
, u64
**rbuf
)
90 struct rbuf_info
*rinfo
;
91 int order
= get_order(buf_len
);
93 /* Check if request can be accomodated in previous allocated page */
95 if ((nic
->rb_page_offset
+ buf_len
+ buf_len
) >
96 (PAGE_SIZE
<< order
)) {
99 nic
->rb_page_offset
+= buf_len
;
100 get_page(nic
->rb_page
);
104 /* Allocate a new page */
106 nic
->rb_page
= alloc_pages(gfp
| __GFP_COMP
, order
);
108 netdev_err(nic
->netdev
, "Failed to allocate new rcv buffer\n");
111 nic
->rb_page_offset
= 0;
114 data
= (u64
)page_address(nic
->rb_page
) + nic
->rb_page_offset
;
116 /* Align buffer addr to cache line i.e 128 bytes */
117 rinfo
= (struct rbuf_info
*)(data
+ NICVF_RCV_BUF_ALIGN_LEN(data
));
118 /* Save page address for reference updation */
119 rinfo
->page
= nic
->rb_page
;
120 /* Store start address for later retrieval */
121 rinfo
->data
= (void *)data
;
122 /* Store alignment offset */
123 rinfo
->offset
= NICVF_RCV_BUF_ALIGN_LEN(data
);
125 data
+= rinfo
->offset
;
127 /* Give next aligned address to hw for DMA */
128 *rbuf
= (u64
*)(data
+ NICVF_RCV_BUF_ALIGN_BYTES
);
132 /* Retrieve actual buffer start address and build skb for received packet */
133 static struct sk_buff
*nicvf_rb_ptr_to_skb(struct nicvf
*nic
,
137 struct rbuf_info
*rinfo
;
139 rb_ptr
= (u64
)phys_to_virt(rb_ptr
);
140 /* Get buffer start address and alignment offset */
141 rinfo
= GET_RBUF_INFO(rb_ptr
);
143 /* Now build an skb to give to stack */
144 skb
= build_skb(rinfo
->data
, RCV_FRAG_LEN
);
146 put_page(rinfo
->page
);
150 /* Set correct skb->data */
151 skb_reserve(skb
, rinfo
->offset
+ NICVF_RCV_BUF_ALIGN_BYTES
);
153 prefetch((void *)rb_ptr
);
157 /* Allocate RBDR ring and populate receive buffers */
158 static int nicvf_init_rbdr(struct nicvf
*nic
, struct rbdr
*rbdr
,
159 int ring_len
, int buf_size
)
163 struct rbdr_entry_t
*desc
;
166 err
= nicvf_alloc_q_desc_mem(nic
, &rbdr
->dmem
, ring_len
,
167 sizeof(struct rbdr_entry_t
),
168 NICVF_RCV_BUF_ALIGN_BYTES
);
172 rbdr
->desc
= rbdr
->dmem
.base
;
173 /* Buffer size has to be in multiples of 128 bytes */
174 rbdr
->dma_size
= buf_size
;
176 rbdr
->thresh
= RBDR_THRESH
;
179 for (idx
= 0; idx
< ring_len
; idx
++) {
180 err
= nicvf_alloc_rcv_buffer(nic
, GFP_KERNEL
, RCV_FRAG_LEN
,
185 desc
= GET_RBDR_DESC(rbdr
, idx
);
186 desc
->buf_addr
= virt_to_phys(rbuf
) >> NICVF_RCV_BUF_ALIGN
;
191 /* Free RBDR ring and its receive buffers */
192 static void nicvf_free_rbdr(struct nicvf
*nic
, struct rbdr
*rbdr
)
196 struct rbdr_entry_t
*desc
;
197 struct rbuf_info
*rinfo
;
202 rbdr
->enable
= false;
203 if (!rbdr
->dmem
.base
)
210 while (head
!= tail
) {
211 desc
= GET_RBDR_DESC(rbdr
, head
);
212 buf_addr
= desc
->buf_addr
<< NICVF_RCV_BUF_ALIGN
;
213 rinfo
= GET_RBUF_INFO((u64
)phys_to_virt(buf_addr
));
214 put_page(rinfo
->page
);
216 head
&= (rbdr
->dmem
.q_len
- 1);
218 /* Free SKB of tail desc */
219 desc
= GET_RBDR_DESC(rbdr
, tail
);
220 buf_addr
= desc
->buf_addr
<< NICVF_RCV_BUF_ALIGN
;
221 rinfo
= GET_RBUF_INFO((u64
)phys_to_virt(buf_addr
));
222 put_page(rinfo
->page
);
225 nicvf_free_q_desc_mem(nic
, &rbdr
->dmem
);
228 /* Refill receive buffer descriptors with new buffers.
230 static void nicvf_refill_rbdr(struct nicvf
*nic
, gfp_t gfp
)
232 struct queue_set
*qs
= nic
->qs
;
233 int rbdr_idx
= qs
->rbdr_cnt
;
237 struct rbdr_entry_t
*desc
;
245 rbdr
= &qs
->rbdr
[rbdr_idx
];
246 /* Check if it's enabled */
250 /* Get no of desc's to be refilled */
251 qcount
= nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_STATUS0
, rbdr_idx
);
253 /* Doorbell can be ringed with a max of ring size minus 1 */
254 if (qcount
>= (qs
->rbdr_len
- 1))
257 refill_rb_cnt
= qs
->rbdr_len
- qcount
- 1;
259 /* Start filling descs from tail */
260 tail
= nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_TAIL
, rbdr_idx
) >> 3;
261 while (refill_rb_cnt
) {
263 tail
&= (rbdr
->dmem
.q_len
- 1);
265 if (nicvf_alloc_rcv_buffer(nic
, gfp
, RCV_FRAG_LEN
, &rbuf
))
268 desc
= GET_RBDR_DESC(rbdr
, tail
);
269 desc
->buf_addr
= virt_to_phys(rbuf
) >> NICVF_RCV_BUF_ALIGN
;
274 /* make sure all memory stores are done before ringing doorbell */
277 /* Check if buffer allocation failed */
279 nic
->rb_alloc_fail
= true;
281 nic
->rb_alloc_fail
= false;
284 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_DOOR
,
287 /* Re-enable RBDR interrupts only if buffer allocation is success */
288 if (!nic
->rb_alloc_fail
&& rbdr
->enable
)
289 nicvf_enable_intr(nic
, NICVF_INTR_RBDR
, rbdr_idx
);
295 /* Alloc rcv buffers in non-atomic mode for better success */
296 void nicvf_rbdr_work(struct work_struct
*work
)
298 struct nicvf
*nic
= container_of(work
, struct nicvf
, rbdr_work
.work
);
300 nicvf_refill_rbdr(nic
, GFP_KERNEL
);
301 if (nic
->rb_alloc_fail
)
302 schedule_delayed_work(&nic
->rbdr_work
, msecs_to_jiffies(10));
304 nic
->rb_work_scheduled
= false;
307 /* In Softirq context, alloc rcv buffers in atomic mode */
308 void nicvf_rbdr_task(unsigned long data
)
310 struct nicvf
*nic
= (struct nicvf
*)data
;
312 nicvf_refill_rbdr(nic
, GFP_ATOMIC
);
313 if (nic
->rb_alloc_fail
) {
314 nic
->rb_work_scheduled
= true;
315 schedule_delayed_work(&nic
->rbdr_work
, msecs_to_jiffies(10));
319 /* Initialize completion queue */
320 static int nicvf_init_cmp_queue(struct nicvf
*nic
,
321 struct cmp_queue
*cq
, int q_len
)
325 err
= nicvf_alloc_q_desc_mem(nic
, &cq
->dmem
, q_len
, CMP_QUEUE_DESC_SIZE
,
326 NICVF_CQ_BASE_ALIGN_BYTES
);
330 cq
->desc
= cq
->dmem
.base
;
331 cq
->thresh
= CMP_QUEUE_CQE_THRESH
;
332 nic
->cq_coalesce_usecs
= (CMP_QUEUE_TIMER_THRESH
* 0.05) - 1;
337 static void nicvf_free_cmp_queue(struct nicvf
*nic
, struct cmp_queue
*cq
)
344 nicvf_free_q_desc_mem(nic
, &cq
->dmem
);
347 /* Initialize transmit queue */
348 static int nicvf_init_snd_queue(struct nicvf
*nic
,
349 struct snd_queue
*sq
, int q_len
)
353 err
= nicvf_alloc_q_desc_mem(nic
, &sq
->dmem
, q_len
, SND_QUEUE_DESC_SIZE
,
354 NICVF_SQ_BASE_ALIGN_BYTES
);
358 sq
->desc
= sq
->dmem
.base
;
359 sq
->skbuff
= kcalloc(q_len
, sizeof(u64
), GFP_KERNEL
);
364 atomic_set(&sq
->free_cnt
, q_len
- 1);
365 sq
->thresh
= SND_QUEUE_THRESH
;
367 /* Preallocate memory for TSO segment's header */
368 sq
->tso_hdrs
= dma_alloc_coherent(&nic
->pdev
->dev
,
369 q_len
* TSO_HEADER_SIZE
,
370 &sq
->tso_hdrs_phys
, GFP_KERNEL
);
377 static void nicvf_free_snd_queue(struct nicvf
*nic
, struct snd_queue
*sq
)
385 dma_free_coherent(&nic
->pdev
->dev
,
386 sq
->dmem
.q_len
* TSO_HEADER_SIZE
,
387 sq
->tso_hdrs
, sq
->tso_hdrs_phys
);
390 nicvf_free_q_desc_mem(nic
, &sq
->dmem
);
393 static void nicvf_reclaim_snd_queue(struct nicvf
*nic
,
394 struct queue_set
*qs
, int qidx
)
396 /* Disable send queue */
397 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, 0);
398 /* Check if SQ is stopped */
399 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_SQ_0_7_STATUS
, 21, 1, 0x01))
401 /* Reset send queue */
402 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, NICVF_SQ_RESET
);
405 static void nicvf_reclaim_rcv_queue(struct nicvf
*nic
,
406 struct queue_set
*qs
, int qidx
)
408 union nic_mbx mbx
= {};
410 /* Make sure all packets in the pipeline are written back into mem */
411 mbx
.msg
.msg
= NIC_MBOX_MSG_RQ_SW_SYNC
;
412 nicvf_send_msg_to_pf(nic
, &mbx
);
415 static void nicvf_reclaim_cmp_queue(struct nicvf
*nic
,
416 struct queue_set
*qs
, int qidx
)
418 /* Disable timer threshold (doesn't get reset upon CQ reset */
419 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG2
, qidx
, 0);
420 /* Disable completion queue */
421 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, 0);
422 /* Reset completion queue */
423 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, NICVF_CQ_RESET
);
426 static void nicvf_reclaim_rbdr(struct nicvf
*nic
,
427 struct rbdr
*rbdr
, int qidx
)
432 /* Save head and tail pointers for feeing up buffers */
433 rbdr
->head
= nicvf_queue_reg_read(nic
,
434 NIC_QSET_RBDR_0_1_HEAD
,
436 rbdr
->tail
= nicvf_queue_reg_read(nic
,
437 NIC_QSET_RBDR_0_1_TAIL
,
440 /* If RBDR FIFO is in 'FAIL' state then do a reset first
443 fifo_state
= nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_STATUS0
, qidx
);
444 if (((fifo_state
>> 62) & 0x03) == 0x3)
445 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
,
446 qidx
, NICVF_RBDR_RESET
);
449 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
, qidx
, 0);
450 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_RBDR_0_1_STATUS0
, 62, 2, 0x00))
453 tmp
= nicvf_queue_reg_read(nic
,
454 NIC_QSET_RBDR_0_1_PREFETCH_STATUS
,
456 if ((tmp
& 0xFFFFFFFF) == ((tmp
>> 32) & 0xFFFFFFFF))
458 usleep_range(1000, 2000);
461 netdev_err(nic
->netdev
,
462 "Failed polling on prefetch status\n");
466 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
,
467 qidx
, NICVF_RBDR_RESET
);
469 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_RBDR_0_1_STATUS0
, 62, 2, 0x02))
471 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
, qidx
, 0x00);
472 if (nicvf_poll_reg(nic
, qidx
, NIC_QSET_RBDR_0_1_STATUS0
, 62, 2, 0x00))
476 /* Configures receive queue */
477 static void nicvf_rcv_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
478 int qidx
, bool enable
)
480 union nic_mbx mbx
= {};
481 struct rcv_queue
*rq
;
482 struct rq_cfg rq_cfg
;
487 /* Disable receive queue */
488 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_0_7_CFG
, qidx
, 0);
491 nicvf_reclaim_rcv_queue(nic
, qs
, qidx
);
495 rq
->cq_qs
= qs
->vnic_id
;
497 rq
->start_rbdr_qs
= qs
->vnic_id
;
498 rq
->start_qs_rbdr_idx
= qs
->rbdr_cnt
- 1;
499 rq
->cont_rbdr_qs
= qs
->vnic_id
;
500 rq
->cont_qs_rbdr_idx
= qs
->rbdr_cnt
- 1;
501 /* all writes of RBDR data to be loaded into L2 Cache as well*/
504 /* Send a mailbox msg to PF to config RQ */
505 mbx
.rq
.msg
= NIC_MBOX_MSG_RQ_CFG
;
506 mbx
.rq
.qs_num
= qs
->vnic_id
;
507 mbx
.rq
.rq_num
= qidx
;
508 mbx
.rq
.cfg
= (rq
->caching
<< 26) | (rq
->cq_qs
<< 19) |
509 (rq
->cq_idx
<< 16) | (rq
->cont_rbdr_qs
<< 9) |
510 (rq
->cont_qs_rbdr_idx
<< 8) |
511 (rq
->start_rbdr_qs
<< 1) | (rq
->start_qs_rbdr_idx
);
512 nicvf_send_msg_to_pf(nic
, &mbx
);
514 mbx
.rq
.msg
= NIC_MBOX_MSG_RQ_BP_CFG
;
515 mbx
.rq
.cfg
= (1ULL << 63) | (1ULL << 62) | (qs
->vnic_id
<< 0);
516 nicvf_send_msg_to_pf(nic
, &mbx
);
519 * Enable CQ drop to reserve sufficient CQEs for all tx packets
521 mbx
.rq
.msg
= NIC_MBOX_MSG_RQ_DROP_CFG
;
522 mbx
.rq
.cfg
= (1ULL << 62) | (RQ_CQ_DROP
<< 8);
523 nicvf_send_msg_to_pf(nic
, &mbx
);
525 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_GEN_CFG
, qidx
, 0x00);
527 /* Enable Receive queue */
530 nicvf_queue_reg_write(nic
, NIC_QSET_RQ_0_7_CFG
, qidx
, *(u64
*)&rq_cfg
);
533 /* Configures completion queue */
534 void nicvf_cmp_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
535 int qidx
, bool enable
)
537 struct cmp_queue
*cq
;
538 struct cq_cfg cq_cfg
;
544 nicvf_reclaim_cmp_queue(nic
, qs
, qidx
);
548 /* Reset completion queue */
549 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, NICVF_CQ_RESET
);
554 spin_lock_init(&cq
->lock
);
555 /* Set completion queue base address */
556 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_BASE
,
557 qidx
, (u64
)(cq
->dmem
.phys_base
));
559 /* Enable Completion queue */
563 cq_cfg
.qsize
= CMP_QSIZE
;
565 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG
, qidx
, *(u64
*)&cq_cfg
);
567 /* Set threshold value for interrupt generation */
568 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_THRESH
, qidx
, cq
->thresh
);
569 nicvf_queue_reg_write(nic
, NIC_QSET_CQ_0_7_CFG2
,
570 qidx
, nic
->cq_coalesce_usecs
);
573 /* Configures transmit queue */
574 static void nicvf_snd_queue_config(struct nicvf
*nic
, struct queue_set
*qs
,
575 int qidx
, bool enable
)
577 union nic_mbx mbx
= {};
578 struct snd_queue
*sq
;
579 struct sq_cfg sq_cfg
;
585 nicvf_reclaim_snd_queue(nic
, qs
, qidx
);
589 /* Reset send queue */
590 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, NICVF_SQ_RESET
);
592 sq
->cq_qs
= qs
->vnic_id
;
595 /* Send a mailbox msg to PF to config SQ */
596 mbx
.sq
.msg
= NIC_MBOX_MSG_SQ_CFG
;
597 mbx
.sq
.qs_num
= qs
->vnic_id
;
598 mbx
.sq
.sq_num
= qidx
;
599 mbx
.sq
.cfg
= (sq
->cq_qs
<< 3) | sq
->cq_idx
;
600 nicvf_send_msg_to_pf(nic
, &mbx
);
602 /* Set queue base address */
603 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_BASE
,
604 qidx
, (u64
)(sq
->dmem
.phys_base
));
606 /* Enable send queue & set queue size */
610 sq_cfg
.qsize
= SND_QSIZE
;
611 sq_cfg
.tstmp_bgx_intf
= 0;
612 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, *(u64
*)&sq_cfg
);
614 /* Set threshold value for interrupt generation */
615 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_THRESH
, qidx
, sq
->thresh
);
617 /* Set queue:cpu affinity for better load distribution */
618 if (cpu_online(qidx
)) {
619 cpumask_set_cpu(qidx
, &sq
->affinity_mask
);
620 netif_set_xps_queue(nic
->netdev
,
621 &sq
->affinity_mask
, qidx
);
625 /* Configures receive buffer descriptor ring */
626 static void nicvf_rbdr_config(struct nicvf
*nic
, struct queue_set
*qs
,
627 int qidx
, bool enable
)
630 struct rbdr_cfg rbdr_cfg
;
632 rbdr
= &qs
->rbdr
[qidx
];
633 nicvf_reclaim_rbdr(nic
, rbdr
, qidx
);
637 /* Set descriptor base address */
638 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_BASE
,
639 qidx
, (u64
)(rbdr
->dmem
.phys_base
));
641 /* Enable RBDR & set queue size */
642 /* Buffer size should be in multiples of 128 bytes */
646 rbdr_cfg
.qsize
= RBDR_SIZE
;
647 rbdr_cfg
.avg_con
= 0;
648 rbdr_cfg
.lines
= rbdr
->dma_size
/ 128;
649 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_CFG
,
650 qidx
, *(u64
*)&rbdr_cfg
);
653 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_DOOR
,
654 qidx
, qs
->rbdr_len
- 1);
656 /* Set threshold value for interrupt generation */
657 nicvf_queue_reg_write(nic
, NIC_QSET_RBDR_0_1_THRESH
,
658 qidx
, rbdr
->thresh
- 1);
661 /* Requests PF to assign and enable Qset */
662 void nicvf_qset_config(struct nicvf
*nic
, bool enable
)
664 union nic_mbx mbx
= {};
665 struct queue_set
*qs
= nic
->qs
;
666 struct qs_cfg
*qs_cfg
;
669 netdev_warn(nic
->netdev
,
670 "Qset is still not allocated, don't init queues\n");
675 qs
->vnic_id
= nic
->vf_id
;
677 /* Send a mailbox msg to PF to config Qset */
678 mbx
.qs
.msg
= NIC_MBOX_MSG_QS_CFG
;
679 mbx
.qs
.num
= qs
->vnic_id
;
682 qs_cfg
= (struct qs_cfg
*)&mbx
.qs
.cfg
;
688 qs_cfg
->vnic
= qs
->vnic_id
;
690 nicvf_send_msg_to_pf(nic
, &mbx
);
693 static void nicvf_free_resources(struct nicvf
*nic
)
696 struct queue_set
*qs
= nic
->qs
;
698 /* Free receive buffer descriptor ring */
699 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
700 nicvf_free_rbdr(nic
, &qs
->rbdr
[qidx
]);
702 /* Free completion queue */
703 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
704 nicvf_free_cmp_queue(nic
, &qs
->cq
[qidx
]);
706 /* Free send queue */
707 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
708 nicvf_free_snd_queue(nic
, &qs
->sq
[qidx
]);
711 static int nicvf_alloc_resources(struct nicvf
*nic
)
714 struct queue_set
*qs
= nic
->qs
;
716 /* Alloc receive buffer descriptor ring */
717 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++) {
718 if (nicvf_init_rbdr(nic
, &qs
->rbdr
[qidx
], qs
->rbdr_len
,
723 /* Alloc send queue */
724 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++) {
725 if (nicvf_init_snd_queue(nic
, &qs
->sq
[qidx
], qs
->sq_len
))
729 /* Alloc completion queue */
730 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++) {
731 if (nicvf_init_cmp_queue(nic
, &qs
->cq
[qidx
], qs
->cq_len
))
737 nicvf_free_resources(nic
);
741 int nicvf_set_qset_resources(struct nicvf
*nic
)
743 struct queue_set
*qs
;
745 qs
= devm_kzalloc(&nic
->pdev
->dev
, sizeof(*qs
), GFP_KERNEL
);
750 /* Set count of each queue */
751 qs
->rbdr_cnt
= RBDR_CNT
;
752 qs
->rq_cnt
= RCV_QUEUE_CNT
;
753 qs
->sq_cnt
= SND_QUEUE_CNT
;
754 qs
->cq_cnt
= CMP_QUEUE_CNT
;
756 /* Set queue lengths */
757 qs
->rbdr_len
= RCV_BUF_COUNT
;
758 qs
->sq_len
= SND_QUEUE_LEN
;
759 qs
->cq_len
= CMP_QUEUE_LEN
;
763 int nicvf_config_data_transfer(struct nicvf
*nic
, bool enable
)
765 bool disable
= false;
766 struct queue_set
*qs
= nic
->qs
;
773 if (nicvf_alloc_resources(nic
))
776 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
777 nicvf_snd_queue_config(nic
, qs
, qidx
, enable
);
778 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
779 nicvf_cmp_queue_config(nic
, qs
, qidx
, enable
);
780 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
781 nicvf_rbdr_config(nic
, qs
, qidx
, enable
);
782 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
783 nicvf_rcv_queue_config(nic
, qs
, qidx
, enable
);
785 for (qidx
= 0; qidx
< qs
->rq_cnt
; qidx
++)
786 nicvf_rcv_queue_config(nic
, qs
, qidx
, disable
);
787 for (qidx
= 0; qidx
< qs
->rbdr_cnt
; qidx
++)
788 nicvf_rbdr_config(nic
, qs
, qidx
, disable
);
789 for (qidx
= 0; qidx
< qs
->sq_cnt
; qidx
++)
790 nicvf_snd_queue_config(nic
, qs
, qidx
, disable
);
791 for (qidx
= 0; qidx
< qs
->cq_cnt
; qidx
++)
792 nicvf_cmp_queue_config(nic
, qs
, qidx
, disable
);
794 nicvf_free_resources(nic
);
800 /* Get a free desc from SQ
801 * returns descriptor ponter & descriptor number
803 static inline int nicvf_get_sq_desc(struct snd_queue
*sq
, int desc_cnt
)
808 atomic_sub(desc_cnt
, &sq
->free_cnt
);
809 sq
->tail
+= desc_cnt
;
810 sq
->tail
&= (sq
->dmem
.q_len
- 1);
815 /* Free descriptor back to SQ for future use */
816 void nicvf_put_sq_desc(struct snd_queue
*sq
, int desc_cnt
)
818 atomic_add(desc_cnt
, &sq
->free_cnt
);
819 sq
->head
+= desc_cnt
;
820 sq
->head
&= (sq
->dmem
.q_len
- 1);
823 static inline int nicvf_get_nxt_sqentry(struct snd_queue
*sq
, int qentry
)
826 qentry
&= (sq
->dmem
.q_len
- 1);
830 void nicvf_sq_enable(struct nicvf
*nic
, struct snd_queue
*sq
, int qidx
)
834 sq_cfg
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
);
835 sq_cfg
|= NICVF_SQ_EN
;
836 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, sq_cfg
);
837 /* Ring doorbell so that H/W restarts processing SQEs */
838 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_DOOR
, qidx
, 0);
841 void nicvf_sq_disable(struct nicvf
*nic
, int qidx
)
845 sq_cfg
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
);
846 sq_cfg
&= ~NICVF_SQ_EN
;
847 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_CFG
, qidx
, sq_cfg
);
850 void nicvf_sq_free_used_descs(struct net_device
*netdev
, struct snd_queue
*sq
,
855 struct nicvf
*nic
= netdev_priv(netdev
);
856 struct sq_hdr_subdesc
*hdr
;
858 head
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_HEAD
, qidx
) >> 4;
859 tail
= nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_TAIL
, qidx
) >> 4;
860 while (sq
->head
!= head
) {
861 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, sq
->head
);
862 if (hdr
->subdesc_type
!= SQ_DESC_TYPE_HEADER
) {
863 nicvf_put_sq_desc(sq
, 1);
866 skb
= (struct sk_buff
*)sq
->skbuff
[sq
->head
];
868 dev_kfree_skb_any(skb
);
869 atomic64_add(1, (atomic64_t
*)&netdev
->stats
.tx_packets
);
870 atomic64_add(hdr
->tot_len
,
871 (atomic64_t
*)&netdev
->stats
.tx_bytes
);
872 nicvf_put_sq_desc(sq
, hdr
->subdesc_cnt
+ 1);
876 /* Calculate no of SQ subdescriptors needed to transmit all
877 * segments of this TSO packet.
878 * Taken from 'Tilera network driver' with a minor modification.
880 static int nicvf_tso_count_subdescs(struct sk_buff
*skb
)
882 struct skb_shared_info
*sh
= skb_shinfo(skb
);
883 unsigned int sh_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
884 unsigned int data_len
= skb
->len
- sh_len
;
885 unsigned int p_len
= sh
->gso_size
;
886 long f_id
= -1; /* id of the current fragment */
887 long f_size
= skb_headlen(skb
) - sh_len
; /* current fragment size */
888 long f_used
= 0; /* bytes used from the current fragment */
889 long n
; /* size of the current piece of payload */
893 for (segment
= 0; segment
< sh
->gso_segs
; segment
++) {
894 unsigned int p_used
= 0;
896 /* One edesc for header and for each piece of the payload. */
897 for (num_edescs
++; p_used
< p_len
; num_edescs
++) {
898 /* Advance as needed. */
899 while (f_used
>= f_size
) {
901 f_size
= skb_frag_size(&sh
->frags
[f_id
]);
905 /* Use bytes from the current fragment. */
907 if (n
> f_size
- f_used
)
913 /* The last segment may be less than gso_size. */
915 if (data_len
< p_len
)
919 /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
920 return num_edescs
+ sh
->gso_segs
;
923 /* Get the number of SQ descriptors needed to xmit this skb */
924 static int nicvf_sq_subdesc_required(struct nicvf
*nic
, struct sk_buff
*skb
)
926 int subdesc_cnt
= MIN_SQ_DESC_PER_PKT_XMIT
;
928 if (skb_shinfo(skb
)->gso_size
) {
929 subdesc_cnt
= nicvf_tso_count_subdescs(skb
);
933 if (skb_shinfo(skb
)->nr_frags
)
934 subdesc_cnt
+= skb_shinfo(skb
)->nr_frags
;
939 /* Add SQ HEADER subdescriptor.
940 * First subdescriptor for every send descriptor.
943 nicvf_sq_add_hdr_subdesc(struct snd_queue
*sq
, int qentry
,
944 int subdesc_cnt
, struct sk_buff
*skb
, int len
)
947 struct sq_hdr_subdesc
*hdr
;
949 hdr
= (struct sq_hdr_subdesc
*)GET_SQ_DESC(sq
, qentry
);
950 sq
->skbuff
[qentry
] = (u64
)skb
;
952 memset(hdr
, 0, SND_QUEUE_DESC_SIZE
);
953 hdr
->subdesc_type
= SQ_DESC_TYPE_HEADER
;
954 /* Enable notification via CQE after processing SQE */
956 /* No of subdescriptors following this */
957 hdr
->subdesc_cnt
= subdesc_cnt
;
960 /* Offload checksum calculation to HW */
961 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
962 if (skb
->protocol
!= htons(ETH_P_IP
))
965 hdr
->csum_l3
= 1; /* Enable IP csum calculation */
966 hdr
->l3_offset
= skb_network_offset(skb
);
967 hdr
->l4_offset
= skb_transport_offset(skb
);
969 proto
= ip_hdr(skb
)->protocol
;
972 hdr
->csum_l4
= SEND_L4_CSUM_TCP
;
975 hdr
->csum_l4
= SEND_L4_CSUM_UDP
;
978 hdr
->csum_l4
= SEND_L4_CSUM_SCTP
;
984 /* SQ GATHER subdescriptor
985 * Must follow HDR descriptor
987 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue
*sq
, int qentry
,
990 struct sq_gather_subdesc
*gather
;
992 qentry
&= (sq
->dmem
.q_len
- 1);
993 gather
= (struct sq_gather_subdesc
*)GET_SQ_DESC(sq
, qentry
);
995 memset(gather
, 0, SND_QUEUE_DESC_SIZE
);
996 gather
->subdesc_type
= SQ_DESC_TYPE_GATHER
;
997 gather
->ld_type
= NIC_SEND_LD_TYPE_E_LDD
;
1002 /* Segment a TSO packet into 'gso_size' segments and append
1003 * them to SQ for transfer
1005 static int nicvf_sq_append_tso(struct nicvf
*nic
, struct snd_queue
*sq
,
1006 int qentry
, struct sk_buff
*skb
)
1009 int seg_subdescs
= 0, desc_cnt
= 0;
1010 int seg_len
, total_len
, data_left
;
1011 int hdr_qentry
= qentry
;
1012 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1014 tso_start(skb
, &tso
);
1015 total_len
= skb
->len
- hdr_len
;
1016 while (total_len
> 0) {
1019 /* Save Qentry for adding HDR_SUBDESC at the end */
1020 hdr_qentry
= qentry
;
1022 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
1023 total_len
-= data_left
;
1025 /* Add segment's header */
1026 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1027 hdr
= sq
->tso_hdrs
+ qentry
* TSO_HEADER_SIZE
;
1028 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
1029 nicvf_sq_add_gather_subdesc(sq
, qentry
, hdr_len
,
1031 qentry
* TSO_HEADER_SIZE
);
1032 /* HDR_SUDESC + GATHER */
1036 /* Add segment's payload fragments */
1037 while (data_left
> 0) {
1040 size
= min_t(int, tso
.size
, data_left
);
1042 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1043 nicvf_sq_add_gather_subdesc(sq
, qentry
, size
,
1044 virt_to_phys(tso
.data
));
1049 tso_build_data(skb
, &tso
, size
);
1051 nicvf_sq_add_hdr_subdesc(sq
, hdr_qentry
,
1052 seg_subdescs
- 1, skb
, seg_len
);
1053 sq
->skbuff
[hdr_qentry
] = (u64
)NULL
;
1054 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1056 desc_cnt
+= seg_subdescs
;
1058 /* Save SKB in the last segment for freeing */
1059 sq
->skbuff
[hdr_qentry
] = (u64
)skb
;
1061 /* make sure all memory stores are done before ringing doorbell */
1064 /* Inform HW to xmit all TSO segments */
1065 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_DOOR
,
1066 skb_get_queue_mapping(skb
), desc_cnt
);
1070 /* Append an skb to a SQ for packet transfer. */
1071 int nicvf_sq_append_skb(struct nicvf
*nic
, struct sk_buff
*skb
)
1076 struct queue_set
*qs
= nic
->qs
;
1077 struct snd_queue
*sq
;
1079 sq_num
= skb_get_queue_mapping(skb
);
1080 sq
= &qs
->sq
[sq_num
];
1082 subdesc_cnt
= nicvf_sq_subdesc_required(nic
, skb
);
1083 if (subdesc_cnt
> atomic_read(&sq
->free_cnt
))
1086 qentry
= nicvf_get_sq_desc(sq
, subdesc_cnt
);
1088 /* Check if its a TSO packet */
1089 if (skb_shinfo(skb
)->gso_size
)
1090 return nicvf_sq_append_tso(nic
, sq
, qentry
, skb
);
1092 /* Add SQ header subdesc */
1093 nicvf_sq_add_hdr_subdesc(sq
, qentry
, subdesc_cnt
- 1, skb
, skb
->len
);
1095 /* Add SQ gather subdescs */
1096 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1097 size
= skb_is_nonlinear(skb
) ? skb_headlen(skb
) : skb
->len
;
1098 nicvf_sq_add_gather_subdesc(sq
, qentry
, size
, virt_to_phys(skb
->data
));
1100 /* Check for scattered buffer */
1101 if (!skb_is_nonlinear(skb
))
1104 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1105 const struct skb_frag_struct
*frag
;
1107 frag
= &skb_shinfo(skb
)->frags
[i
];
1109 qentry
= nicvf_get_nxt_sqentry(sq
, qentry
);
1110 size
= skb_frag_size(frag
);
1111 nicvf_sq_add_gather_subdesc(sq
, qentry
, size
,
1113 skb_frag_address(frag
)));
1117 /* make sure all memory stores are done before ringing doorbell */
1120 /* Inform HW to xmit new packet */
1121 nicvf_queue_reg_write(nic
, NIC_QSET_SQ_0_7_DOOR
,
1122 sq_num
, subdesc_cnt
);
1126 netdev_dbg(nic
->netdev
, "Not enough SQ descriptors to xmit pkt\n");
1130 static inline unsigned frag_num(unsigned i
)
1133 return (i
& ~3) + 3 - (i
& 3);
1139 /* Returns SKB for a received packet */
1140 struct sk_buff
*nicvf_get_rcv_skb(struct nicvf
*nic
, struct cqe_rx_t
*cqe_rx
)
1143 int payload_len
= 0;
1144 struct sk_buff
*skb
= NULL
;
1145 struct sk_buff
*skb_frag
= NULL
;
1146 struct sk_buff
*prev_frag
= NULL
;
1147 u16
*rb_lens
= NULL
;
1148 u64
*rb_ptrs
= NULL
;
1150 rb_lens
= (void *)cqe_rx
+ (3 * sizeof(u64
));
1151 rb_ptrs
= (void *)cqe_rx
+ (6 * sizeof(u64
));
1153 netdev_dbg(nic
->netdev
, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
1154 __func__
, cqe_rx
->rb_cnt
, cqe_rx
->rb0_ptr
, cqe_rx
->rb0_sz
);
1156 for (frag
= 0; frag
< cqe_rx
->rb_cnt
; frag
++) {
1157 payload_len
= rb_lens
[frag_num(frag
)];
1159 /* First fragment */
1160 skb
= nicvf_rb_ptr_to_skb(nic
,
1161 *rb_ptrs
- cqe_rx
->align_pad
,
1165 skb_reserve(skb
, cqe_rx
->align_pad
);
1166 skb_put(skb
, payload_len
);
1169 skb_frag
= nicvf_rb_ptr_to_skb(nic
, *rb_ptrs
,
1176 if (!skb_shinfo(skb
)->frag_list
)
1177 skb_shinfo(skb
)->frag_list
= skb_frag
;
1179 prev_frag
->next
= skb_frag
;
1181 prev_frag
= skb_frag
;
1182 skb
->len
+= payload_len
;
1183 skb
->data_len
+= payload_len
;
1184 skb_frag
->len
= payload_len
;
1186 /* Next buffer pointer */
1192 /* Enable interrupt */
1193 void nicvf_enable_intr(struct nicvf
*nic
, int int_type
, int q_idx
)
1197 reg_val
= nicvf_reg_read(nic
, NIC_VF_ENA_W1S
);
1201 reg_val
|= ((1ULL << q_idx
) << NICVF_INTR_CQ_SHIFT
);
1204 reg_val
|= ((1ULL << q_idx
) << NICVF_INTR_SQ_SHIFT
);
1206 case NICVF_INTR_RBDR
:
1207 reg_val
|= ((1ULL << q_idx
) << NICVF_INTR_RBDR_SHIFT
);
1209 case NICVF_INTR_PKT_DROP
:
1210 reg_val
|= (1ULL << NICVF_INTR_PKT_DROP_SHIFT
);
1212 case NICVF_INTR_TCP_TIMER
:
1213 reg_val
|= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT
);
1215 case NICVF_INTR_MBOX
:
1216 reg_val
|= (1ULL << NICVF_INTR_MBOX_SHIFT
);
1218 case NICVF_INTR_QS_ERR
:
1219 reg_val
|= (1ULL << NICVF_INTR_QS_ERR_SHIFT
);
1222 netdev_err(nic
->netdev
,
1223 "Failed to enable interrupt: unknown type\n");
1227 nicvf_reg_write(nic
, NIC_VF_ENA_W1S
, reg_val
);
1230 /* Disable interrupt */
1231 void nicvf_disable_intr(struct nicvf
*nic
, int int_type
, int q_idx
)
1237 reg_val
|= ((1ULL << q_idx
) << NICVF_INTR_CQ_SHIFT
);
1240 reg_val
|= ((1ULL << q_idx
) << NICVF_INTR_SQ_SHIFT
);
1242 case NICVF_INTR_RBDR
:
1243 reg_val
|= ((1ULL << q_idx
) << NICVF_INTR_RBDR_SHIFT
);
1245 case NICVF_INTR_PKT_DROP
:
1246 reg_val
|= (1ULL << NICVF_INTR_PKT_DROP_SHIFT
);
1248 case NICVF_INTR_TCP_TIMER
:
1249 reg_val
|= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT
);
1251 case NICVF_INTR_MBOX
:
1252 reg_val
|= (1ULL << NICVF_INTR_MBOX_SHIFT
);
1254 case NICVF_INTR_QS_ERR
:
1255 reg_val
|= (1ULL << NICVF_INTR_QS_ERR_SHIFT
);
1258 netdev_err(nic
->netdev
,
1259 "Failed to disable interrupt: unknown type\n");
1263 nicvf_reg_write(nic
, NIC_VF_ENA_W1C
, reg_val
);
1266 /* Clear interrupt */
1267 void nicvf_clear_intr(struct nicvf
*nic
, int int_type
, int q_idx
)
1273 reg_val
= ((1ULL << q_idx
) << NICVF_INTR_CQ_SHIFT
);
1276 reg_val
= ((1ULL << q_idx
) << NICVF_INTR_SQ_SHIFT
);
1278 case NICVF_INTR_RBDR
:
1279 reg_val
= ((1ULL << q_idx
) << NICVF_INTR_RBDR_SHIFT
);
1281 case NICVF_INTR_PKT_DROP
:
1282 reg_val
= (1ULL << NICVF_INTR_PKT_DROP_SHIFT
);
1284 case NICVF_INTR_TCP_TIMER
:
1285 reg_val
= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT
);
1287 case NICVF_INTR_MBOX
:
1288 reg_val
= (1ULL << NICVF_INTR_MBOX_SHIFT
);
1290 case NICVF_INTR_QS_ERR
:
1291 reg_val
|= (1ULL << NICVF_INTR_QS_ERR_SHIFT
);
1294 netdev_err(nic
->netdev
,
1295 "Failed to clear interrupt: unknown type\n");
1299 nicvf_reg_write(nic
, NIC_VF_INT
, reg_val
);
1302 /* Check if interrupt is enabled */
1303 int nicvf_is_intr_enabled(struct nicvf
*nic
, int int_type
, int q_idx
)
1308 reg_val
= nicvf_reg_read(nic
, NIC_VF_ENA_W1S
);
1312 mask
= ((1ULL << q_idx
) << NICVF_INTR_CQ_SHIFT
);
1315 mask
= ((1ULL << q_idx
) << NICVF_INTR_SQ_SHIFT
);
1317 case NICVF_INTR_RBDR
:
1318 mask
= ((1ULL << q_idx
) << NICVF_INTR_RBDR_SHIFT
);
1320 case NICVF_INTR_PKT_DROP
:
1321 mask
= NICVF_INTR_PKT_DROP_MASK
;
1323 case NICVF_INTR_TCP_TIMER
:
1324 mask
= NICVF_INTR_TCP_TIMER_MASK
;
1326 case NICVF_INTR_MBOX
:
1327 mask
= NICVF_INTR_MBOX_MASK
;
1329 case NICVF_INTR_QS_ERR
:
1330 mask
= NICVF_INTR_QS_ERR_MASK
;
1333 netdev_err(nic
->netdev
,
1334 "Failed to check interrupt enable: unknown type\n");
1338 return (reg_val
& mask
);
1341 void nicvf_update_rq_stats(struct nicvf
*nic
, int rq_idx
)
1343 struct rcv_queue
*rq
;
1345 #define GET_RQ_STATS(reg) \
1346 nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
1347 (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1349 rq
= &nic
->qs
->rq
[rq_idx
];
1350 rq
->stats
.bytes
= GET_RQ_STATS(RQ_SQ_STATS_OCTS
);
1351 rq
->stats
.pkts
= GET_RQ_STATS(RQ_SQ_STATS_PKTS
);
1354 void nicvf_update_sq_stats(struct nicvf
*nic
, int sq_idx
)
1356 struct snd_queue
*sq
;
1358 #define GET_SQ_STATS(reg) \
1359 nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
1360 (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
1362 sq
= &nic
->qs
->sq
[sq_idx
];
1363 sq
->stats
.bytes
= GET_SQ_STATS(RQ_SQ_STATS_OCTS
);
1364 sq
->stats
.pkts
= GET_SQ_STATS(RQ_SQ_STATS_PKTS
);
1367 /* Check for errors in the receive cmp.queue entry */
1368 int nicvf_check_cqe_rx_errs(struct nicvf
*nic
,
1369 struct cmp_queue
*cq
, struct cqe_rx_t
*cqe_rx
)
1371 struct cmp_queue_stats
*stats
= &cq
->stats
;
1373 if (!cqe_rx
->err_level
&& !cqe_rx
->err_opcode
) {
1374 stats
->rx
.errop
.good
++;
1378 if (netif_msg_rx_err(nic
))
1379 netdev_err(nic
->netdev
,
1380 "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
1382 cqe_rx
->err_level
, cqe_rx
->err_opcode
);
1384 switch (cqe_rx
->err_level
) {
1386 stats
->rx
.errlvl
.mac_errs
++;
1389 stats
->rx
.errlvl
.l2_errs
++;
1392 stats
->rx
.errlvl
.l3_errs
++;
1395 stats
->rx
.errlvl
.l4_errs
++;
1399 switch (cqe_rx
->err_opcode
) {
1400 case CQ_RX_ERROP_RE_PARTIAL
:
1401 stats
->rx
.errop
.partial_pkts
++;
1403 case CQ_RX_ERROP_RE_JABBER
:
1404 stats
->rx
.errop
.jabber_errs
++;
1406 case CQ_RX_ERROP_RE_FCS
:
1407 stats
->rx
.errop
.fcs_errs
++;
1409 case CQ_RX_ERROP_RE_TERMINATE
:
1410 stats
->rx
.errop
.terminate_errs
++;
1412 case CQ_RX_ERROP_RE_RX_CTL
:
1413 stats
->rx
.errop
.bgx_rx_errs
++;
1415 case CQ_RX_ERROP_PREL2_ERR
:
1416 stats
->rx
.errop
.prel2_errs
++;
1418 case CQ_RX_ERROP_L2_FRAGMENT
:
1419 stats
->rx
.errop
.l2_frags
++;
1421 case CQ_RX_ERROP_L2_OVERRUN
:
1422 stats
->rx
.errop
.l2_overruns
++;
1424 case CQ_RX_ERROP_L2_PFCS
:
1425 stats
->rx
.errop
.l2_pfcs
++;
1427 case CQ_RX_ERROP_L2_PUNY
:
1428 stats
->rx
.errop
.l2_puny
++;
1430 case CQ_RX_ERROP_L2_MAL
:
1431 stats
->rx
.errop
.l2_hdr_malformed
++;
1433 case CQ_RX_ERROP_L2_OVERSIZE
:
1434 stats
->rx
.errop
.l2_oversize
++;
1436 case CQ_RX_ERROP_L2_UNDERSIZE
:
1437 stats
->rx
.errop
.l2_undersize
++;
1439 case CQ_RX_ERROP_L2_LENMISM
:
1440 stats
->rx
.errop
.l2_len_mismatch
++;
1442 case CQ_RX_ERROP_L2_PCLP
:
1443 stats
->rx
.errop
.l2_pclp
++;
1445 case CQ_RX_ERROP_IP_NOT
:
1446 stats
->rx
.errop
.non_ip
++;
1448 case CQ_RX_ERROP_IP_CSUM_ERR
:
1449 stats
->rx
.errop
.ip_csum_err
++;
1451 case CQ_RX_ERROP_IP_MAL
:
1452 stats
->rx
.errop
.ip_hdr_malformed
++;
1454 case CQ_RX_ERROP_IP_MALD
:
1455 stats
->rx
.errop
.ip_payload_malformed
++;
1457 case CQ_RX_ERROP_IP_HOP
:
1458 stats
->rx
.errop
.ip_hop_errs
++;
1460 case CQ_RX_ERROP_L3_ICRC
:
1461 stats
->rx
.errop
.l3_icrc_errs
++;
1463 case CQ_RX_ERROP_L3_PCLP
:
1464 stats
->rx
.errop
.l3_pclp
++;
1466 case CQ_RX_ERROP_L4_MAL
:
1467 stats
->rx
.errop
.l4_malformed
++;
1469 case CQ_RX_ERROP_L4_CHK
:
1470 stats
->rx
.errop
.l4_csum_errs
++;
1472 case CQ_RX_ERROP_UDP_LEN
:
1473 stats
->rx
.errop
.udp_len_err
++;
1475 case CQ_RX_ERROP_L4_PORT
:
1476 stats
->rx
.errop
.bad_l4_port
++;
1478 case CQ_RX_ERROP_TCP_FLAG
:
1479 stats
->rx
.errop
.bad_tcp_flag
++;
1481 case CQ_RX_ERROP_TCP_OFFSET
:
1482 stats
->rx
.errop
.tcp_offset_errs
++;
1484 case CQ_RX_ERROP_L4_PCLP
:
1485 stats
->rx
.errop
.l4_pclp
++;
1487 case CQ_RX_ERROP_RBDR_TRUNC
:
1488 stats
->rx
.errop
.pkt_truncated
++;
1495 /* Check for errors in the send cmp.queue entry */
1496 int nicvf_check_cqe_tx_errs(struct nicvf
*nic
,
1497 struct cmp_queue
*cq
, struct cqe_send_t
*cqe_tx
)
1499 struct cmp_queue_stats
*stats
= &cq
->stats
;
1501 switch (cqe_tx
->send_status
) {
1502 case CQ_TX_ERROP_GOOD
:
1505 case CQ_TX_ERROP_DESC_FAULT
:
1506 stats
->tx
.desc_fault
++;
1508 case CQ_TX_ERROP_HDR_CONS_ERR
:
1509 stats
->tx
.hdr_cons_err
++;
1511 case CQ_TX_ERROP_SUBDC_ERR
:
1512 stats
->tx
.subdesc_err
++;
1514 case CQ_TX_ERROP_IMM_SIZE_OFLOW
:
1515 stats
->tx
.imm_size_oflow
++;
1517 case CQ_TX_ERROP_DATA_SEQUENCE_ERR
:
1518 stats
->tx
.data_seq_err
++;
1520 case CQ_TX_ERROP_MEM_SEQUENCE_ERR
:
1521 stats
->tx
.mem_seq_err
++;
1523 case CQ_TX_ERROP_LOCK_VIOL
:
1524 stats
->tx
.lock_viol
++;
1526 case CQ_TX_ERROP_DATA_FAULT
:
1527 stats
->tx
.data_fault
++;
1529 case CQ_TX_ERROP_TSTMP_CONFLICT
:
1530 stats
->tx
.tstmp_conflict
++;
1532 case CQ_TX_ERROP_TSTMP_TIMEOUT
:
1533 stats
->tx
.tstmp_timeout
++;
1535 case CQ_TX_ERROP_MEM_FAULT
:
1536 stats
->tx
.mem_fault
++;
1538 case CQ_TX_ERROP_CK_OVERLAP
:
1539 stats
->tx
.csum_overlap
++;
1541 case CQ_TX_ERROP_CK_OFLOW
:
1542 stats
->tx
.csum_overflow
++;